From a7e48212b9dd3451a79ea2c203abc95098c63064 Mon Sep 17 00:00:00 2001 From: Igor Pecovnik Date: Sat, 9 Sep 2017 00:32:28 +0200 Subject: [PATCH] Upstream patches --- .../mvebu-default/03-patch-4.4.84-85.patch | 1511 +++++++++++++++++ .../mvebu-default/03-patch-4.4.85-86.patch | 393 +++++ .../mvebu-default/03-patch-4.4.86-87.patch | 408 +++++ .../mvebu64-default/03-patch-4.4.84-85.patch | 1511 +++++++++++++++++ .../mvebu64-default/03-patch-4.4.85-86.patch | 393 +++++ .../mvebu64-default/03-patch-4.4.86-87.patch | 408 +++++ .../rockchip-default/03-patch-4.4.84-85.patch | 1498 ++++++++++++++++ .../rockchip-default/03-patch-4.4.85-86.patch | 393 +++++ .../rockchip-default/03-patch-4.4.86-87.patch | 408 +++++ .../kernel/udoo-next/03-patch-4.4.86-87.patch | 408 +++++ 10 files changed, 7331 insertions(+) create mode 100644 patch/kernel/mvebu-default/03-patch-4.4.84-85.patch create mode 100644 patch/kernel/mvebu-default/03-patch-4.4.85-86.patch create mode 100644 patch/kernel/mvebu-default/03-patch-4.4.86-87.patch create mode 100644 patch/kernel/mvebu64-default/03-patch-4.4.84-85.patch create mode 100644 patch/kernel/mvebu64-default/03-patch-4.4.85-86.patch create mode 100644 patch/kernel/mvebu64-default/03-patch-4.4.86-87.patch create mode 100644 patch/kernel/rockchip-default/03-patch-4.4.84-85.patch create mode 100644 patch/kernel/rockchip-default/03-patch-4.4.85-86.patch create mode 100644 patch/kernel/rockchip-default/03-patch-4.4.86-87.patch create mode 100644 patch/kernel/udoo-next/03-patch-4.4.86-87.patch diff --git a/patch/kernel/mvebu-default/03-patch-4.4.84-85.patch b/patch/kernel/mvebu-default/03-patch-4.4.84-85.patch new file mode 100644 index 000000000..27aa21d04 --- /dev/null +++ b/patch/kernel/mvebu-default/03-patch-4.4.84-85.patch @@ -0,0 +1,1511 @@ +diff --git a/Makefile b/Makefile +index 9d77ac063ec0..0f3d843f42a7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 84 ++SUBLEVEL = 85 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h +index 210ef3e72332..0ddd7144c492 100644 +--- a/arch/arc/include/asm/cache.h ++++ b/arch/arc/include/asm/cache.h +@@ -88,7 +88,9 @@ extern int ioc_exists; + #define ARC_REG_SLC_FLUSH 0x904 + #define ARC_REG_SLC_INVALIDATE 0x905 + #define ARC_REG_SLC_RGN_START 0x914 ++#define ARC_REG_SLC_RGN_START1 0x915 + #define ARC_REG_SLC_RGN_END 0x916 ++#define ARC_REG_SLC_RGN_END1 0x917 + + /* Bit val in SLC_CONTROL */ + #define SLC_CTRL_IM 0x040 +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c +index d81b6d7e11e7..9a84cbdd44b0 100644 +--- a/arch/arc/mm/cache.c ++++ b/arch/arc/mm/cache.c +@@ -543,6 +543,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) + static DEFINE_SPINLOCK(lock); + unsigned long flags; + unsigned int ctrl; ++ phys_addr_t end; + + spin_lock_irqsave(&lock, flags); + +@@ -572,8 +573,16 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) + * END needs to be setup before START (latter triggers the operation) + * END can't be same as START, so add (l2_line_sz - 1) to sz + */ +- write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1)); +- write_aux_reg(ARC_REG_SLC_RGN_START, paddr); ++ end = paddr + sz + l2_line_sz - 1; ++ if (is_pae40_enabled()) ++ write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end)); ++ ++ write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end)); ++ ++ if (is_pae40_enabled()) ++ write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr)); ++ ++ write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr)); + + while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); + +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c +index eac4f3b02df9..bb81cd05f0bc 100644 +--- a/drivers/acpi/apei/ghes.c ++++ b/drivers/acpi/apei/ghes.c +@@ -1067,6 +1067,7 @@ static int ghes_remove(struct platform_device *ghes_dev) + if (list_empty(&ghes_sci)) + unregister_acpi_hed_notifier(&ghes_notifier_sci); + mutex_unlock(&ghes_list_mutex); ++ synchronize_rcu(); + break; + case ACPI_HEST_NOTIFY_NMI: + ghes_nmi_remove(ghes); +diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c +index ccdc8db16bb8..fa2cf2dc4e33 100644 +--- a/drivers/acpi/ioapic.c ++++ b/drivers/acpi/ioapic.c +@@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data) + struct resource *res = data; + struct resource_win win; + ++ /* ++ * We might assign this to 'res' later, make sure all pointers are ++ * cleared before the resource is added to the global list ++ */ ++ memset(&win, 0, sizeof(win)); ++ + res->flags = 0; + if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0) + return AE_OK; +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index 47ddfefe2443..5531f020e561 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -1718,8 +1718,12 @@ static void binder_transaction(struct binder_proc *proc, + list_add_tail(&t->work.entry, target_list); + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; + list_add_tail(&tcomplete->entry, &thread->todo); +- if (target_wait) +- wake_up_interruptible(target_wait); ++ if (target_wait) { ++ if (reply || !(t->flags & TF_ONE_WAY)) ++ wake_up_interruptible_sync(target_wait); ++ else ++ wake_up_interruptible(target_wait); ++ } + return; + + err_get_unused_fd_failed: +@@ -2865,7 +2869,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) + const char *failure_string; + struct binder_buffer *buffer; + +- if (proc->tsk != current) ++ if (proc->tsk != current->group_leader) + return -EINVAL; + + if ((vma->vm_end - vma->vm_start) > SZ_4M) +@@ -2966,8 +2970,8 @@ static int binder_open(struct inode *nodp, struct file *filp) + proc = kzalloc(sizeof(*proc), GFP_KERNEL); + if (proc == NULL) + return -ENOMEM; +- get_task_struct(current); +- proc->tsk = current; ++ get_task_struct(current->group_leader); ++ proc->tsk = current->group_leader; + INIT_LIST_HEAD(&proc->todo); + init_waitqueue_head(&proc->wait); + proc->default_priority = task_nice(current); +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c +index 6253775b8d9c..50d74e5ce41b 100644 +--- a/drivers/gpu/drm/drm_atomic.c ++++ b/drivers/gpu/drm/drm_atomic.c +@@ -1247,6 +1247,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state) + if (config->funcs->atomic_check) + ret = config->funcs->atomic_check(state->dev, state); + ++ if (ret) ++ return ret; ++ + if (!state->allow_modeset) { + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) { +@@ -1257,7 +1260,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state) + } + } + +- return ret; ++ return 0; + } + EXPORT_SYMBOL(drm_atomic_check_only); + +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index b205224f1a44..9147113139be 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -715,13 +715,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) + struct drm_gem_object *obj = ptr; + struct drm_device *dev = obj->dev; + ++ if (dev->driver->gem_close_object) ++ dev->driver->gem_close_object(obj, file_priv); ++ + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_gem_remove_prime_handles(obj, file_priv); + drm_vma_node_revoke(&obj->vma_node, file_priv->filp); + +- if (dev->driver->gem_close_object) +- dev->driver->gem_close_object(obj, file_priv); +- + drm_gem_object_handle_unreference_unlocked(obj); + + return 0; +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +index 9255b9c096b6..9befd624a5f0 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +@@ -148,8 +148,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) + rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0); + + /* Signal polarities */ +- value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL) +- | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL) ++ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0) ++ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0) + | DSMR_DIPM_DE | DSMR_CSPM; + rcar_du_crtc_write(rcrtc, DSMR, value); + +@@ -171,7 +171,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) + mode->crtc_vsync_start - 1); + rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1); + +- rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start); ++ rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1); + rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); + } + +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c +index 46429c4be8e5..2b75a4891dec 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c +@@ -642,13 +642,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, + } + + ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector); +- of_node_put(encoder); +- of_node_put(connector); +- + if (ret && ret != -EPROBE_DEFER) + dev_warn(rcdu->dev, +- "failed to initialize encoder %s (%d), skipping\n", +- encoder->full_name, ret); ++ "failed to initialize encoder %s on output %u (%d), skipping\n", ++ of_node_full_name(encoder), output, ret); ++ ++ of_node_put(encoder); ++ of_node_put(connector); + + return ret; + } +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +index 85043c5bad03..873e04aa9352 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +@@ -56,11 +56,11 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, + return ret; + + /* PLL clock configuration */ +- if (freq <= 38000) ++ if (freq < 39000) + pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M; +- else if (freq <= 60000) ++ else if (freq < 61000) + pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M; +- else if (freq <= 121000) ++ else if (freq < 121000) + pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M; + else + pllcr = LVDPLLCR_PLLDLYCNT_150M; +@@ -102,7 +102,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, + /* Turn the PLL on, wait for the startup delay, and turn the output + * on. + */ +- lvdcr0 |= LVDCR0_PLLEN; ++ lvdcr0 |= LVDCR0_PLLON; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + + usleep_range(100, 150); +diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h +index 77cf9289ab65..b1eafd097a79 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h ++++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h +@@ -18,7 +18,7 @@ + #define LVDCR0_DMD (1 << 12) + #define LVDCR0_LVMD_MASK (0xf << 8) + #define LVDCR0_LVMD_SHIFT 8 +-#define LVDCR0_PLLEN (1 << 4) ++#define LVDCR0_PLLON (1 << 4) + #define LVDCR0_BEN (1 << 2) + #define LVDCR0_LVEN (1 << 1) + #define LVDCR0_LVRES (1 << 0) +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c +index 6b00061c3746..a2ae2213ef3e 100644 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c +@@ -294,7 +294,7 @@ static void dw_i2c_plat_complete(struct device *dev) + #endif + + #ifdef CONFIG_PM +-static int dw_i2c_plat_suspend(struct device *dev) ++static int dw_i2c_plat_runtime_suspend(struct device *dev) + { + struct platform_device *pdev = to_platform_device(dev); + struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); +@@ -318,11 +318,21 @@ static int dw_i2c_plat_resume(struct device *dev) + return 0; + } + ++#ifdef CONFIG_PM_SLEEP ++static int dw_i2c_plat_suspend(struct device *dev) ++{ ++ pm_runtime_resume(dev); ++ return dw_i2c_plat_runtime_suspend(dev); ++} ++#endif ++ + static const struct dev_pm_ops dw_i2c_dev_pm_ops = { + .prepare = dw_i2c_plat_prepare, + .complete = dw_i2c_plat_complete, + SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) +- SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL) ++ SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, ++ dw_i2c_plat_resume, ++ NULL) + }; + + #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) +diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +index 0a86ef43e781..a8db38db622e 100644 +--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c ++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +@@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) + s32 poll_value = 0; + + if (state) { +- if (!atomic_read(&st->user_requested_state)) +- return 0; + if (sensor_hub_device_open(st->hsdev)) + return -EIO; + +@@ -84,6 +82,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) + &report_val); + } + ++ pr_debug("HID_SENSOR %s set power_state %d report_state %d\n", ++ st->pdev->name, state_val, report_val); ++ + sensor_hub_get_feature(st->hsdev, st->power_state.report_id, + st->power_state.index, + sizeof(state_val), &state_val); +@@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) + ret = pm_runtime_get_sync(&st->pdev->dev); + else { + pm_runtime_mark_last_busy(&st->pdev->dev); ++ pm_runtime_use_autosuspend(&st->pdev->dev); + ret = pm_runtime_put_autosuspend(&st->pdev->dev); + } + if (ret < 0) { +@@ -175,8 +177,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, + /* Default to 3 seconds, but can be changed from sysfs */ + pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, + 3000); +- pm_runtime_use_autosuspend(&attrb->pdev->dev); +- + return ret; + error_unreg_trigger: + iio_trigger_unregister(trig); +diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c +index 2485b88ee1b6..1880105cc8c4 100644 +--- a/drivers/iio/imu/adis16480.c ++++ b/drivers/iio/imu/adis16480.c +@@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { + .gyro_max_val = IIO_RAD_TO_DEGREE(22500), + .gyro_max_scale = 450, + .accel_max_val = IIO_M_S_2_TO_G(12500), +- .accel_max_scale = 5, ++ .accel_max_scale = 10, + }, + [ADIS16485] = { + .channels = adis16485_channels, +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c +index 98d4e515587a..681dce15fbc8 100644 +--- a/drivers/input/mouse/elan_i2c_core.c ++++ b/drivers/input/mouse/elan_i2c_core.c +@@ -1234,6 +1234,7 @@ static const struct acpi_device_id elan_acpi_id[] = { + { "ELAN0000", 0 }, + { "ELAN0100", 0 }, + { "ELAN0600", 0 }, ++ { "ELAN0602", 0 }, + { "ELAN0605", 0 }, + { "ELAN0608", 0 }, + { "ELAN0605", 0 }, +diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c +index 354d47ecd66a..ce6ff9b301bb 100644 +--- a/drivers/input/mouse/trackpoint.c ++++ b/drivers/input/mouse/trackpoint.c +@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir + if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) + return -1; + +- if (param[0] != TP_MAGIC_IDENT) ++ /* add new TP ID. */ ++ if (!(param[0] & TP_MAGIC_IDENT)) + return -1; + + if (firmware_id) +diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h +index 5617ed3a7d7a..88055755f82e 100644 +--- a/drivers/input/mouse/trackpoint.h ++++ b/drivers/input/mouse/trackpoint.h +@@ -21,8 +21,9 @@ + #define TP_COMMAND 0xE2 /* Commands start with this */ + + #define TP_READ_ID 0xE1 /* Sent for device identification */ +-#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */ ++#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */ + /* by the firmware ID */ ++ /* Firmware ID includes 0x1, 0x2, 0x3 */ + + + /* +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h +index a2661381ddfc..d2774197fe58 100644 +--- a/drivers/misc/mei/hw-me-regs.h ++++ b/drivers/misc/mei/hw-me-regs.h +@@ -125,6 +125,11 @@ + #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ + #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ + ++#define MEI_DEV_ID_LBG 0xA1BA /* Lewisburg (SPT) */ ++ ++#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ ++#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ ++ + /* + * MEI HW Section + */ +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 01e20384ac44..adab5bbb642a 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -86,10 +86,14 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)}, + ++ {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)}, ++ + /* required last entry */ + {0, } + }; +diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c +index ecc6fb9ca92f..3bbdf60f8908 100644 +--- a/drivers/ntb/ntb_transport.c ++++ b/drivers/ntb/ntb_transport.c +@@ -599,7 +599,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, + if (!mw->virt_addr) + return -ENOMEM; + +- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) ++ if (mw_num < qp_count % mw_count) + num_qps_mw = qp_count / mw_count + 1; + else + num_qps_mw = qp_count / mw_count; +@@ -947,7 +947,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, + qp->event_handler = NULL; + ntb_qp_link_down_reset(qp); + +- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) ++ if (mw_num < qp_count % mw_count) + num_qps_mw = qp_count / mw_count + 1; + else + num_qps_mw = qp_count / mw_count; +@@ -1065,8 +1065,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) + qp_count = ilog2(qp_bitmap); + if (max_num_clients && max_num_clients < qp_count) + qp_count = max_num_clients; +- else if (mw_count < qp_count) +- qp_count = mw_count; ++ else if (nt->mw_count < qp_count) ++ qp_count = nt->mw_count; + + qp_bitmap &= BIT_ULL(qp_count) - 1; + +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +index 02c3feef4e36..c2d2c17550a7 100644 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +@@ -49,6 +49,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ + {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ ++ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ + {} /* Terminating entry */ + }; + +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index 26a3b389a265..fa8df3fef6fc 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -183,15 +183,20 @@ cifs_bp_rename_retry: + } + + /* ++ * Don't allow path components longer than the server max. + * Don't allow the separator character in a path component. + * The VFS will not allow "/", but "\" is allowed by posix. + */ + static int +-check_name(struct dentry *direntry) ++check_name(struct dentry *direntry, struct cifs_tcon *tcon) + { + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); + int i; + ++ if (unlikely(direntry->d_name.len > ++ tcon->fsAttrInfo.MaxPathNameComponentLength)) ++ return -ENAMETOOLONG; ++ + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { + for (i = 0; i < direntry->d_name.len; i++) { + if (direntry->d_name.name[i] == '\\') { +@@ -489,10 +494,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, + return finish_no_open(file, res); + } + +- rc = check_name(direntry); +- if (rc) +- return rc; +- + xid = get_xid(); + + cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", +@@ -505,6 +506,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, + } + + tcon = tlink_tcon(tlink); ++ ++ rc = check_name(direntry, tcon); ++ if (rc) ++ goto out_free_xid; ++ + server = tcon->ses->server; + + if (server->ops->new_lease_key) +@@ -765,7 +771,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, + } + pTcon = tlink_tcon(tlink); + +- rc = check_name(direntry); ++ rc = check_name(direntry, pTcon); + if (rc) + goto lookup_out; + +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index f4afa3b1cc56..6c484ddf26a9 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -2768,8 +2768,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, + kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * + le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); + kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); +- kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits); +- kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); ++ kst->f_bfree = kst->f_bavail = ++ le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); + return; + } + +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index 3f68a25f2169..544672b440de 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -129,7 +129,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp) + argp->p = page_address(argp->pagelist[0]); + argp->pagelist++; + if (argp->pagelen < PAGE_SIZE) { +- argp->end = argp->p + (argp->pagelen>>2); ++ argp->end = argp->p + XDR_QUADLEN(argp->pagelen); + argp->pagelen = 0; + } else { + argp->end = argp->p + (PAGE_SIZE>>2); +@@ -1246,9 +1246,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) + argp->pagelen -= pages * PAGE_SIZE; + len -= pages * PAGE_SIZE; + +- argp->p = (__be32 *)page_address(argp->pagelist[0]); +- argp->pagelist++; +- argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE); ++ next_decode_page(argp); + } + argp->p += XDR_QUADLEN(len); + +diff --git a/include/net/ip.h b/include/net/ip.h +index b450d8653b30..7476bb10ff37 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -314,7 +314,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, + !forwarding) + return dst_mtu(dst); + +- return min(dst->dev->mtu, IP_MAX_MTU); ++ return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); + } + + static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) +@@ -327,7 +327,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) + return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); + } + +- return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); ++ return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); + } + + u32 ip_idents_reserve(u32 hash, int segs); +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index e5bba897d206..7a5d6a073165 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -717,8 +717,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, + old = *pold; + *pold = new; + if (old != NULL) { +- qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); ++ unsigned int qlen = old->q.qlen; ++ unsigned int backlog = old->qstats.backlog; ++ + qdisc_reset(old); ++ qdisc_tree_reduce_backlog(old, qlen, backlog); + } + sch_tree_unlock(sch); + +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 784ab8fe8714..3697063dd09a 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -8473,28 +8473,27 @@ SYSCALL_DEFINE5(perf_event_open, + goto err_context; + + /* +- * Do not allow to attach to a group in a different +- * task or CPU context: ++ * Make sure we're both events for the same CPU; ++ * grouping events for different CPUs is broken; since ++ * you can never concurrently schedule them anyhow. + */ +- if (move_group) { +- /* +- * Make sure we're both on the same task, or both +- * per-cpu events. +- */ +- if (group_leader->ctx->task != ctx->task) +- goto err_context; ++ if (group_leader->cpu != event->cpu) ++ goto err_context; + +- /* +- * Make sure we're both events for the same CPU; +- * grouping events for different CPUs is broken; since +- * you can never concurrently schedule them anyhow. +- */ +- if (group_leader->cpu != event->cpu) +- goto err_context; +- } else { +- if (group_leader->ctx != ctx) +- goto err_context; +- } ++ /* ++ * Make sure we're both on the same task, or both ++ * per-CPU events. ++ */ ++ if (group_leader->ctx->task != ctx->task) ++ goto err_context; ++ ++ /* ++ * Do not allow to attach to a group in a different task ++ * or CPU context. If we're moving SW events, we'll fix ++ * this up later, so allow that. ++ */ ++ if (!move_group && group_leader->ctx != ctx) ++ goto err_context; + + /* + * Only a group leader can be exclusive or pinned +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c +index 6816302542b2..f0e5408499b6 100644 +--- a/kernel/trace/trace_events_filter.c ++++ b/kernel/trace/trace_events_filter.c +@@ -1979,6 +1979,10 @@ static int create_filter(struct trace_event_call *call, + if (err && set_str) + append_filter_err(ps, filter); + } ++ if (err && !set_str) { ++ free_event_filter(filter); ++ filter = NULL; ++ } + create_filter_finish(ps); + + *filterp = filter; +diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c +index 1641367e54ca..69f56073b337 100644 +--- a/net/bluetooth/bnep/core.c ++++ b/net/bluetooth/bnep/core.c +@@ -484,16 +484,16 @@ static int bnep_session(void *arg) + struct net_device *dev = s->dev; + struct sock *sk = s->sock->sk; + struct sk_buff *skb; +- wait_queue_t wait; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + + BT_DBG(""); + + set_user_nice(current, -15); + +- init_waitqueue_entry(&wait, current); + add_wait_queue(sk_sleep(sk), &wait); + while (1) { +- set_current_state(TASK_INTERRUPTIBLE); ++ /* Ensure session->terminate is updated */ ++ smp_mb__before_atomic(); + + if (atomic_read(&s->terminate)) + break; +@@ -515,9 +515,8 @@ static int bnep_session(void *arg) + break; + netif_wake_queue(dev); + +- schedule(); ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } +- __set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + + /* Cleanup session */ +@@ -663,7 +662,7 @@ int bnep_del_connection(struct bnep_conndel_req *req) + s = __bnep_get_session(req->dst); + if (s) { + atomic_inc(&s->terminate); +- wake_up_process(s->task); ++ wake_up_interruptible(sk_sleep(s->sock->sk)); + } else + err = -ENOENT; + +diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c +index 298ed37010e6..3a39fd523e40 100644 +--- a/net/bluetooth/cmtp/core.c ++++ b/net/bluetooth/cmtp/core.c +@@ -281,16 +281,16 @@ static int cmtp_session(void *arg) + struct cmtp_session *session = arg; + struct sock *sk = session->sock->sk; + struct sk_buff *skb; +- wait_queue_t wait; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + + BT_DBG("session %p", session); + + set_user_nice(current, -15); + +- init_waitqueue_entry(&wait, current); + add_wait_queue(sk_sleep(sk), &wait); + while (1) { +- set_current_state(TASK_INTERRUPTIBLE); ++ /* Ensure session->terminate is updated */ ++ smp_mb__before_atomic(); + + if (atomic_read(&session->terminate)) + break; +@@ -307,9 +307,8 @@ static int cmtp_session(void *arg) + + cmtp_process_transmit(session); + +- schedule(); ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } +- __set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + + down_write(&cmtp_session_sem); +@@ -394,7 +393,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) + err = cmtp_attach_device(session); + if (err < 0) { + atomic_inc(&session->terminate); +- wake_up_process(session->task); ++ wake_up_interruptible(sk_sleep(session->sock->sk)); + up_write(&cmtp_session_sem); + return err; + } +@@ -432,7 +431,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req) + + /* Stop session thread */ + atomic_inc(&session->terminate); +- wake_up_process(session->task); ++ ++ /* Ensure session->terminate is updated */ ++ smp_mb__after_atomic(); ++ ++ wake_up_interruptible(sk_sleep(session->sock->sk)); + } else + err = -ENOENT; + +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c +index 0bec4588c3c8..1fc076420d1e 100644 +--- a/net/bluetooth/hidp/core.c ++++ b/net/bluetooth/hidp/core.c +@@ -36,6 +36,7 @@ + #define VERSION "1.2" + + static DECLARE_RWSEM(hidp_session_sem); ++static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq); + static LIST_HEAD(hidp_session_list); + + static unsigned char hidp_keycode[256] = { +@@ -1068,12 +1069,12 @@ static int hidp_session_start_sync(struct hidp_session *session) + * Wake up session thread and notify it to stop. This is asynchronous and + * returns immediately. Call this whenever a runtime error occurs and you want + * the session to stop. +- * Note: wake_up_process() performs any necessary memory-barriers for us. ++ * Note: wake_up_interruptible() performs any necessary memory-barriers for us. + */ + static void hidp_session_terminate(struct hidp_session *session) + { + atomic_inc(&session->terminate); +- wake_up_process(session->task); ++ wake_up_interruptible(&hidp_session_wq); + } + + /* +@@ -1180,7 +1181,9 @@ static void hidp_session_run(struct hidp_session *session) + struct sock *ctrl_sk = session->ctrl_sock->sk; + struct sock *intr_sk = session->intr_sock->sk; + struct sk_buff *skb; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + ++ add_wait_queue(&hidp_session_wq, &wait); + for (;;) { + /* + * This thread can be woken up two ways: +@@ -1188,12 +1191,10 @@ static void hidp_session_run(struct hidp_session *session) + * session->terminate flag and wakes this thread up. + * - Via modifying the socket state of ctrl/intr_sock. This + * thread is woken up by ->sk_state_changed(). +- * +- * Note: set_current_state() performs any necessary +- * memory-barriers for us. + */ +- set_current_state(TASK_INTERRUPTIBLE); + ++ /* Ensure session->terminate is updated */ ++ smp_mb__before_atomic(); + if (atomic_read(&session->terminate)) + break; + +@@ -1227,11 +1228,22 @@ static void hidp_session_run(struct hidp_session *session) + hidp_process_transmit(session, &session->ctrl_transmit, + session->ctrl_sock); + +- schedule(); ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } ++ remove_wait_queue(&hidp_session_wq, &wait); + + atomic_inc(&session->terminate); +- set_current_state(TASK_RUNNING); ++ ++ /* Ensure session->terminate is updated */ ++ smp_mb__after_atomic(); ++} ++ ++static int hidp_session_wake_function(wait_queue_t *wait, ++ unsigned int mode, ++ int sync, void *key) ++{ ++ wake_up_interruptible(&hidp_session_wq); ++ return false; + } + + /* +@@ -1244,7 +1256,8 @@ static void hidp_session_run(struct hidp_session *session) + static int hidp_session_thread(void *arg) + { + struct hidp_session *session = arg; +- wait_queue_t ctrl_wait, intr_wait; ++ DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function); ++ DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function); + + BT_DBG("session %p", session); + +@@ -1254,8 +1267,6 @@ static int hidp_session_thread(void *arg) + set_user_nice(current, -15); + hidp_set_timer(session); + +- init_waitqueue_entry(&ctrl_wait, current); +- init_waitqueue_entry(&intr_wait, current); + add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); + add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); + /* This memory barrier is paired with wq_has_sleeper(). See +diff --git a/net/dccp/proto.c b/net/dccp/proto.c +index 9fe25bf63296..b68168fcc06a 100644 +--- a/net/dccp/proto.c ++++ b/net/dccp/proto.c +@@ -24,6 +24,7 @@ + #include + + #include ++#include + #include + #include + +@@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type) + + EXPORT_SYMBOL_GPL(dccp_packet_name); + ++static void dccp_sk_destruct(struct sock *sk) ++{ ++ struct dccp_sock *dp = dccp_sk(sk); ++ ++ ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); ++ dp->dccps_hc_tx_ccid = NULL; ++ inet_sock_destruct(sk); ++} ++ + int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) + { + struct dccp_sock *dp = dccp_sk(sk); +@@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) + icsk->icsk_syn_retries = sysctl_dccp_request_retries; + sk->sk_state = DCCP_CLOSED; + sk->sk_write_space = dccp_write_space; ++ sk->sk_destruct = dccp_sk_destruct; + icsk->icsk_sync_mss = dccp_sync_mss; + dp->dccps_mss_cache = 536; + dp->dccps_rate_last = jiffies; +@@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk) + { + struct dccp_sock *dp = dccp_sk(sk); + +- /* +- * DCCP doesn't use sk_write_queue, just sk_send_head +- * for retransmissions +- */ ++ __skb_queue_purge(&sk->sk_write_queue); + if (sk->sk_send_head != NULL) { + kfree_skb(sk->sk_send_head); + sk->sk_send_head = NULL; +@@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk) + dp->dccps_hc_rx_ackvec = NULL; + } + ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); +- ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); +- dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL; ++ dp->dccps_hc_rx_ccid = NULL; + + /* clean up feature negotiation state */ + dccp_feat_list_purge(&dp->dccps_featneg); +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index b2504712259f..313e3c11a15a 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -1044,15 +1044,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg) + fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); + if (!fi) + goto failure; +- fib_info_cnt++; + if (cfg->fc_mx) { + fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL); +- if (!fi->fib_metrics) +- goto failure; ++ if (unlikely(!fi->fib_metrics)) { ++ kfree(fi); ++ return ERR_PTR(err); ++ } + atomic_set(&fi->fib_metrics->refcnt, 1); +- } else ++ } else { + fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; +- ++ } ++ fib_info_cnt++; + fi->fib_net = net; + fi->fib_protocol = cfg->fc_protocol; + fi->fib_scope = cfg->fc_scope; +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index c295d882c6e0..0294f7c99c85 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -1241,7 +1241,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) + if (mtu) + return mtu; + +- mtu = dst->dev->mtu; ++ mtu = READ_ONCE(dst->dev->mtu); + + if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { + if (rt->rt_uses_gateway && mtu > 576) +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index f0dabd125c43..c4bbf704ff9c 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -3028,8 +3028,7 @@ void tcp_rearm_rto(struct sock *sk) + /* delta may not be positive if the socket is locked + * when the retrans timer fires and is rescheduled. + */ +- if (delta > 0) +- rto = delta; ++ rto = max(delta, 1); + } + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, + TCP_RTO_MAX); +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index f60e8caea767..aad8cdf15472 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -892,6 +892,8 @@ add: + } + nsiblings = iter->rt6i_nsiblings; + fib6_purge_rt(iter, fn, info->nl_net); ++ if (fn->rr_ptr == iter) ++ fn->rr_ptr = NULL; + rt6_release(iter); + + if (nsiblings) { +@@ -904,6 +906,8 @@ add: + if (rt6_qualify_for_ecmp(iter)) { + *ins = iter->dst.rt6_next; + fib6_purge_rt(iter, fn, info->nl_net); ++ if (fn->rr_ptr == iter) ++ fn->rr_ptr = NULL; + rt6_release(iter); + nsiblings--; + } else { +@@ -992,7 +996,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + /* Create subtree root node */ + sfn = node_alloc(); + if (!sfn) +- goto st_failure; ++ goto failure; + + sfn->leaf = info->nl_net->ipv6.ip6_null_entry; + atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref); +@@ -1008,12 +1012,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + + if (IS_ERR(sn)) { + /* If it is failed, discard just allocated +- root, and then (in st_failure) stale node ++ root, and then (in failure) stale node + in main tree. + */ + node_free(sfn); + err = PTR_ERR(sn); +- goto st_failure; ++ goto failure; + } + + /* Now link new subtree to main tree */ +@@ -1027,7 +1031,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + + if (IS_ERR(sn)) { + err = PTR_ERR(sn); +- goto st_failure; ++ goto failure; + } + } + +@@ -1069,22 +1073,22 @@ out: + atomic_inc(&pn->leaf->rt6i_ref); + } + #endif +- if (!(rt->dst.flags & DST_NOCACHE)) +- dst_free(&rt->dst); ++ goto failure; + } + return err; + +-#ifdef CONFIG_IPV6_SUBTREES +- /* Subtree creation failed, probably main tree node +- is orphan. If it is, shoot it. ++failure: ++ /* fn->leaf could be NULL if fn is an intermediate node and we ++ * failed to add the new route to it in both subtree creation ++ * failure and fib6_add_rt2node() failure case. ++ * In both cases, fib6_repair_tree() should be called to fix ++ * fn->leaf. + */ +-st_failure: + if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) + fib6_repair_tree(info->nl_net, fn); + if (!(rt->dst.flags & DST_NOCACHE)) + dst_free(&rt->dst); + return err; +-#endif + } + + /* +diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c +index 8d2f7c9b491d..4a116d766c15 100644 +--- a/net/irda/af_irda.c ++++ b/net/irda/af_irda.c +@@ -2227,7 +2227,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname, + { + struct sock *sk = sock->sk; + struct irda_sock *self = irda_sk(sk); +- struct irda_device_list list; ++ struct irda_device_list list = { 0 }; + struct irda_device_info *discoveries; + struct irda_ias_set * ias_opt; /* IAS get/query params */ + struct ias_object * ias_obj; /* Object in IAS */ +diff --git a/net/key/af_key.c b/net/key/af_key.c +index 2e1050ec2cf0..94bf810ad242 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, + #define BROADCAST_ONE 1 + #define BROADCAST_REGISTERED 2 + #define BROADCAST_PROMISC_ONLY 4 +-static int pfkey_broadcast(struct sk_buff *skb, ++static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, + int broadcast_flags, struct sock *one_sk, + struct net *net) + { +@@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb, + rcu_read_unlock(); + + if (one_sk != NULL) +- err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk); ++ err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); + + kfree_skb(skb2); + kfree_skb(skb); +@@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk) + hdr = (struct sadb_msg *) pfk->dump.skb->data; + hdr->sadb_msg_seq = 0; + hdr->sadb_msg_errno = rc; +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, + &pfk->sk, sock_net(&pfk->sk)); + pfk->dump.skb = NULL; + } +@@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk) + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / + sizeof(uint64_t)); + +- pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); ++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk)); + + return 0; + } +@@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_ + + xfrm_state_put(x); + +- pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net); ++ pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net); + + return 0; + } +@@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c) + hdr->sadb_msg_seq = c->seq; + hdr->sadb_msg_pid = c->portid; + +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x)); ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x)); + + return 0; + } +@@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg + out_hdr->sadb_msg_reserved = 0; + out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; + out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; +- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk)); + + return 0; + } +@@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad + return -ENOBUFS; + } + +- pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk)); +- ++ pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, ++ sock_net(sk)); + return 0; + } + +@@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr) + hdr->sadb_msg_errno = (uint8_t) 0; + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + +- return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, ++ sock_net(sk)); + } + + static int key_notify_sa_flush(const struct km_event *c) +@@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c) + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + hdr->sadb_msg_reserved = 0; + +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net); ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); + + return 0; + } +@@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr) + out_hdr->sadb_msg_pid = pfk->dump.msg_portid; + + if (pfk->dump.skb) +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, + &pfk->sk, sock_net(&pfk->sk)); + pfk->dump.skb = out_skb; + +@@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb + new_hdr->sadb_msg_errno = 0; + } + +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk)); ++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk)); + return 0; + } + +@@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev + out_hdr->sadb_msg_errno = 0; + out_hdr->sadb_msg_seq = c->seq; + out_hdr->sadb_msg_pid = c->portid; +- pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); + return 0; + + } +@@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc + out_hdr->sadb_msg_errno = 0; + out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; + out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; +- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp)); + err = 0; + + out: +@@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) + out_hdr->sadb_msg_pid = pfk->dump.msg_portid; + + if (pfk->dump.skb) +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, + &pfk->sk, sock_net(&pfk->sk)); + pfk->dump.skb = out_skb; + +@@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event *c) + hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + hdr->sadb_msg_reserved = 0; +- pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net); ++ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); + return 0; + + } +@@ -2814,7 +2815,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb + void *ext_hdrs[SADB_EXT_MAX]; + int err; + +- pfkey_broadcast(skb_clone(skb, GFP_KERNEL), ++ pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL, + BROADCAST_PROMISC_ONLY, NULL, sock_net(sk)); + + memset(ext_hdrs, 0, sizeof(ext_hdrs)); +@@ -3036,7 +3037,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c) + out_hdr->sadb_msg_seq = 0; + out_hdr->sadb_msg_pid = 0; + +- pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, ++ xs_net(x)); + return 0; + } + +@@ -3226,7 +3228,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct + xfrm_ctx->ctx_len); + } + +- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, ++ xs_net(x)); + } + + static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, +@@ -3424,7 +3427,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, + n_port->sadb_x_nat_t_port_port = sport; + n_port->sadb_x_nat_t_port_reserved = 0; + +- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, ++ xs_net(x)); + } + + #ifdef CONFIG_NET_KEY_MIGRATE +@@ -3616,7 +3620,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, + } + + /* broadcast migrate message to sockets */ +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net); ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net); + + return 0; + +diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c +index 0915d448ba23..075b0d22f213 100644 +--- a/net/sched/act_ipt.c ++++ b/net/sched/act_ipt.c +@@ -34,6 +34,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int + { + struct xt_tgchk_param par; + struct xt_target *target; ++ struct ipt_entry e = {}; + int ret = 0; + + target = xt_request_find_target(AF_INET, t->u.user.name, +@@ -44,6 +45,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int + t->u.kernel.target = target; + memset(&par, 0, sizeof(par)); + par.table = table; ++ par.entryinfo = &e; + par.target = target; + par.targinfo = t->data; + par.hook_mask = hook; +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c +index 4431e2833e45..3f2c3eed04da 100644 +--- a/net/sched/sch_sfq.c ++++ b/net/sched/sch_sfq.c +@@ -434,6 +434,7 @@ congestion_drop: + qdisc_drop(head, sch); + + slot_queue_add(slot, skb); ++ qdisc_tree_reduce_backlog(sch, 0, delta); + return NET_XMIT_CN; + } + +@@ -465,8 +466,10 @@ enqueue: + /* Return Congestion Notification only if we dropped a packet + * from this flow. + */ +- if (qlen != slot->qlen) ++ if (qlen != slot->qlen) { ++ qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb)); + return NET_XMIT_CN; ++ } + + /* As we dropped a packet, better let upper stack know this */ + qdisc_tree_reduce_backlog(sch, 1, dropped); +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c +index 7527c168e471..e33e9bd4ed5a 100644 +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -510,7 +510,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, + { + addr->sa.sa_family = AF_INET6; + addr->v6.sin6_port = port; ++ addr->v6.sin6_flowinfo = 0; + addr->v6.sin6_addr = *saddr; ++ addr->v6.sin6_scope_id = 0; + } + + /* Compare addresses exactly. +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c +index a0c90572d0e5..f86c6555a539 100644 +--- a/net/tipc/netlink_compat.c ++++ b/net/tipc/netlink_compat.c +@@ -258,13 +258,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, + arg = nlmsg_new(0, GFP_KERNEL); + if (!arg) { + kfree_skb(msg->rep); ++ msg->rep = NULL; + return -ENOMEM; + } + + err = __tipc_nl_compat_dumpit(cmd, msg, arg); +- if (err) ++ if (err) { + kfree_skb(msg->rep); +- ++ msg->rep = NULL; ++ } + kfree_skb(arg); + + return err; +diff --git a/sound/core/control.c b/sound/core/control.c +index b4fe9b002512..bd01d492f46a 100644 +--- a/sound/core/control.c ++++ b/sound/core/control.c +@@ -1126,7 +1126,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, + mutex_lock(&ue->card->user_ctl_lock); + change = ue->tlv_data_size != size; + if (!change) +- change = memcmp(ue->tlv_data, new_data, size); ++ change = memcmp(ue->tlv_data, new_data, size) != 0; + kfree(ue->tlv_data); + ue->tlv_data = new_data; + ue->tlv_data_size = size; +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 46f7b023f69c..ac5de4365e15 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -854,6 +854,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), ++ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI), + SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004), +diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c +index 54c33204541f..ff6fcd9f92f7 100644 +--- a/sound/soc/generic/simple-card.c ++++ b/sound/soc/generic/simple-card.c +@@ -100,7 +100,7 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream, + if (ret && ret != -ENOTSUPP) + goto err; + } +- ++ return 0; + err: + return ret; + } +diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c +index 2a5b3a293cd2..b123734f9fbd 100644 +--- a/sound/soc/sh/rcar/adg.c ++++ b/sound/soc/sh/rcar/adg.c +@@ -437,7 +437,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv, + struct device *dev = rsnd_priv_to_dev(priv); + struct device_node *np = dev->of_node; + u32 ckr, rbgx, rbga, rbgb; +- u32 rate, req_rate, div; ++ u32 rate, req_rate = 0, div; + uint32_t count = 0; + unsigned long req_48kHz_rate, req_441kHz_rate; + int i; +diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c +index deed48ef28b8..362446c36c9e 100644 +--- a/sound/soc/sh/rcar/core.c ++++ b/sound/soc/sh/rcar/core.c +@@ -192,19 +192,16 @@ void rsnd_mod_interrupt(struct rsnd_mod *mod, + struct rsnd_priv *priv = rsnd_mod_to_priv(mod); + struct rsnd_dai_stream *io; + struct rsnd_dai *rdai; +- int i, j; +- +- for_each_rsnd_dai(rdai, priv, j) { ++ int i; + +- for (i = 0; i < RSND_MOD_MAX; i++) { +- io = &rdai->playback; +- if (mod == io->mod[i]) +- callback(mod, io); ++ for_each_rsnd_dai(rdai, priv, i) { ++ io = &rdai->playback; ++ if (mod == io->mod[mod->type]) ++ callback(mod, io); + +- io = &rdai->capture; +- if (mod == io->mod[i]) +- callback(mod, io); +- } ++ io = &rdai->capture; ++ if (mod == io->mod[mod->type]) ++ callback(mod, io); + } + } + +@@ -1019,7 +1016,7 @@ static int rsnd_kctrl_put(struct snd_kcontrol *kctrl, + } + } + +- if (change) ++ if (change && cfg->update) + cfg->update(cfg->io, mod); + + return change; +diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c +index 68b439ed22d7..460d29cbaaa5 100644 +--- a/sound/soc/sh/rcar/src.c ++++ b/sound/soc/sh/rcar/src.c +@@ -691,13 +691,27 @@ static int _rsnd_src_stop_gen2(struct rsnd_mod *mod) + { + rsnd_src_irq_disable_gen2(mod); + +- rsnd_mod_write(mod, SRC_CTRL, 0); ++ /* ++ * stop SRC output only ++ * see rsnd_src_quit_gen2 ++ */ ++ rsnd_mod_write(mod, SRC_CTRL, 0x01); + + rsnd_src_error_record_gen2(mod); + + return rsnd_src_stop(mod); + } + ++static int rsnd_src_quit_gen2(struct rsnd_mod *mod, ++ struct rsnd_dai_stream *io, ++ struct rsnd_priv *priv) ++{ ++ /* stop both out/in */ ++ rsnd_mod_write(mod, SRC_CTRL, 0); ++ ++ return 0; ++} ++ + static void __rsnd_src_interrupt_gen2(struct rsnd_mod *mod, + struct rsnd_dai_stream *io) + { +@@ -971,7 +985,7 @@ static struct rsnd_mod_ops rsnd_src_gen2_ops = { + .probe = rsnd_src_probe_gen2, + .remove = rsnd_src_remove_gen2, + .init = rsnd_src_init_gen2, +- .quit = rsnd_src_quit, ++ .quit = rsnd_src_quit_gen2, + .start = rsnd_src_start_gen2, + .stop = rsnd_src_stop_gen2, + .hw_params = rsnd_src_hw_params, +diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c +index 1427ec21bd7e..c62a2947ac14 100644 +--- a/sound/soc/sh/rcar/ssi.c ++++ b/sound/soc/sh/rcar/ssi.c +@@ -39,6 +39,7 @@ + #define SCKP (1 << 13) /* Serial Bit Clock Polarity */ + #define SWSP (1 << 12) /* Serial WS Polarity */ + #define SDTA (1 << 10) /* Serial Data Alignment */ ++#define PDTA (1 << 9) /* Parallel Data Alignment */ + #define DEL (1 << 8) /* Serial Data Delay */ + #define CKDV(v) (v << 4) /* Serial Clock Division Ratio */ + #define TRMD (1 << 1) /* Transmit/Receive Mode Select */ +@@ -286,7 +287,7 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, + struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); + u32 cr; + +- cr = FORCE; ++ cr = FORCE | PDTA; + + /* + * always use 32bit system word for easy clock calculation. diff --git a/patch/kernel/mvebu-default/03-patch-4.4.85-86.patch b/patch/kernel/mvebu-default/03-patch-4.4.85-86.patch new file mode 100644 index 000000000..a20d51922 --- /dev/null +++ b/patch/kernel/mvebu-default/03-patch-4.4.85-86.patch @@ -0,0 +1,393 @@ +diff --git a/Makefile b/Makefile +index 0f3d843f42a7..1207bf6a0e7a 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 85 ++SUBLEVEL = 86 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c +index 4c46c54a3ad7..6638903f0cb9 100644 +--- a/arch/arm64/kernel/fpsimd.c ++++ b/arch/arm64/kernel/fpsimd.c +@@ -157,9 +157,11 @@ void fpsimd_thread_switch(struct task_struct *next) + + void fpsimd_flush_thread(void) + { ++ preempt_disable(); + memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); + fpsimd_flush_task_state(current); + set_thread_flag(TIF_FOREIGN_FPSTATE); ++ preempt_enable(); + } + + /* +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c +index a4b466424a32..7fabf49f2aeb 100644 +--- a/arch/arm64/mm/fault.c ++++ b/arch/arm64/mm/fault.c +@@ -313,8 +313,11 @@ retry: + * signal first. We do not need to release the mmap_sem because it + * would already be released in __lock_page_or_retry in mm/filemap.c. + */ +- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) ++ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { ++ if (!user_mode(regs)) ++ goto no_context; + return 0; ++ } + + /* + * Major/minor page fault accounting is only done on the initial +diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h +index de25aad07853..9016b4b70375 100644 +--- a/arch/x86/include/asm/io.h ++++ b/arch/x86/include/asm/io.h +@@ -304,13 +304,13 @@ static inline unsigned type in##bwl##_p(int port) \ + static inline void outs##bwl(int port, const void *addr, unsigned long count) \ + { \ + asm volatile("rep; outs" #bwl \ +- : "+S"(addr), "+c"(count) : "d"(port)); \ ++ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \ + } \ + \ + static inline void ins##bwl(int port, void *addr, unsigned long count) \ + { \ + asm volatile("rep; ins" #bwl \ +- : "+D"(addr), "+c"(count) : "d"(port)); \ ++ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \ + } + + BUILDIO(b, b, char) +diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c +index cc91ae832ffb..6fd7b50c5747 100644 +--- a/drivers/gpu/drm/i915/intel_uncore.c ++++ b/drivers/gpu/drm/i915/intel_uncore.c +@@ -635,7 +635,8 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) + "enabling oneshot unclaimed register reporting. " + "Please use i915.mmio_debug=N for more information.\n"); + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); +- i915.mmio_debug = mmio_debug_once--; ++ i915.mmio_debug = mmio_debug_once; ++ mmio_debug_once = false; + } + } + +diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c +index f325663c27c5..4b58e8aaf5c5 100644 +--- a/drivers/i2c/busses/i2c-jz4780.c ++++ b/drivers/i2c/busses/i2c-jz4780.c +@@ -786,10 +786,6 @@ static int jz4780_i2c_probe(struct platform_device *pdev) + + jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0); + +- i2c->cmd = 0; +- memset(i2c->cmd_buf, 0, BUFSIZE); +- memset(i2c->data_buf, 0, BUFSIZE); +- + i2c->irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0, + dev_name(&pdev->dev), i2c); +diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c +index 257a9eadd595..4ac6764f4897 100644 +--- a/drivers/net/wireless/p54/fwio.c ++++ b/drivers/net/wireless/p54/fwio.c +@@ -488,7 +488,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell) + + entry += sizeof(__le16); + chan->pa_points_per_curve = 8; +- memset(chan->curve_data, 0, sizeof(*chan->curve_data)); ++ memset(chan->curve_data, 0, sizeof(chan->curve_data)); + memcpy(chan->curve_data, entry, + sizeof(struct p54_pa_curve_data_sample) * + min((u8)8, curve_data->points_per_channel)); +diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c +index 1910100638a2..00602abec0ea 100644 +--- a/drivers/scsi/isci/remote_node_context.c ++++ b/drivers/scsi/isci/remote_node_context.c +@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state) + { + static const char * const strings[] = RNC_STATES; + ++ if (state >= ARRAY_SIZE(strings)) ++ return "UNKNOWN"; ++ + return strings[state]; + } + #undef C +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c +index 0e6aaef9a038..c74f74ab981c 100644 +--- a/drivers/scsi/lpfc/lpfc_els.c ++++ b/drivers/scsi/lpfc/lpfc_els.c +@@ -1054,7 +1054,10 @@ stop_rr_fcf_flogi: + lpfc_sli4_unreg_all_rpis(vport); + } + } +- lpfc_issue_reg_vfi(vport); ++ ++ /* Do not register VFI if the driver aborted FLOGI */ ++ if (!lpfc_error_lost_link(irsp)) ++ lpfc_issue_reg_vfi(vport); + lpfc_nlp_put(ndlp); + goto out; + } +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 6514636431ab..8a9e139e2853 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -153,6 +153,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ + struct sg_device *parentdp; /* owning device */ + wait_queue_head_t read_wait; /* queue read until command done */ + rwlock_t rq_list_lock; /* protect access to list in req_arr */ ++ struct mutex f_mutex; /* protect against changes in this fd */ + int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ + int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ + Sg_scatter_hold reserve; /* buffer held for this file descriptor */ +@@ -166,6 +167,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ + unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ + char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ + char mmap_called; /* 0 -> mmap() never called on this fd */ ++ char res_in_use; /* 1 -> 'reserve' array in use */ + struct kref f_ref; + struct execute_work ew; + } Sg_fd; +@@ -209,7 +211,6 @@ static void sg_remove_sfp(struct kref *); + static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); + static Sg_request *sg_add_request(Sg_fd * sfp); + static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); +-static int sg_res_in_use(Sg_fd * sfp); + static Sg_device *sg_get_dev(int dev); + static void sg_device_destroy(struct kref *kref); + +@@ -625,6 +626,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + } + buf += SZ_SG_HEADER; + __get_user(opcode, buf); ++ mutex_lock(&sfp->f_mutex); + if (sfp->next_cmd_len > 0) { + cmd_size = sfp->next_cmd_len; + sfp->next_cmd_len = 0; /* reset so only this write() effected */ +@@ -633,6 +635,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + if ((opcode >= 0xc0) && old_hdr.twelve_byte) + cmd_size = 12; + } ++ mutex_unlock(&sfp->f_mutex); + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, + "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); + /* Determine buffer size. */ +@@ -732,7 +735,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, + sg_remove_request(sfp, srp); + return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ + } +- if (sg_res_in_use(sfp)) { ++ if (sfp->res_in_use) { + sg_remove_request(sfp, srp); + return -EBUSY; /* reserve buffer already being used */ + } +@@ -902,7 +905,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) + return result; + if (val) { + sfp->low_dma = 1; +- if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { ++ if ((0 == sfp->low_dma) && !sfp->res_in_use) { + val = (int) sfp->reserve.bufflen; + sg_remove_scat(sfp, &sfp->reserve); + sg_build_reserve(sfp, val); +@@ -977,12 +980,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) + return -EINVAL; + val = min_t(int, val, + max_sectors_bytes(sdp->device->request_queue)); ++ mutex_lock(&sfp->f_mutex); + if (val != sfp->reserve.bufflen) { +- if (sg_res_in_use(sfp) || sfp->mmap_called) ++ if (sfp->mmap_called || ++ sfp->res_in_use) { ++ mutex_unlock(&sfp->f_mutex); + return -EBUSY; ++ } ++ + sg_remove_scat(sfp, &sfp->reserve); + sg_build_reserve(sfp, val); + } ++ mutex_unlock(&sfp->f_mutex); + return 0; + case SG_GET_RESERVED_SIZE: + val = min_t(int, sfp->reserve.bufflen, +@@ -1737,13 +1746,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) + md = &map_data; + + if (md) { +- if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) ++ mutex_lock(&sfp->f_mutex); ++ if (dxfer_len <= rsv_schp->bufflen && ++ !sfp->res_in_use) { ++ sfp->res_in_use = 1; + sg_link_reserve(sfp, srp, dxfer_len); +- else { ++ } else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) { ++ mutex_unlock(&sfp->f_mutex); ++ return -EBUSY; ++ } else { + res = sg_build_indirect(req_schp, sfp, dxfer_len); +- if (res) ++ if (res) { ++ mutex_unlock(&sfp->f_mutex); + return res; ++ } + } ++ mutex_unlock(&sfp->f_mutex); + + md->pages = req_schp->pages; + md->page_order = req_schp->page_order; +@@ -2034,6 +2052,8 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) + req_schp->sglist_len = 0; + sfp->save_scat_len = 0; + srp->res_used = 0; ++ /* Called without mutex lock to avoid deadlock */ ++ sfp->res_in_use = 0; + } + + static Sg_request * +@@ -2145,6 +2165,7 @@ sg_add_sfp(Sg_device * sdp) + rwlock_init(&sfp->rq_list_lock); + + kref_init(&sfp->f_ref); ++ mutex_init(&sfp->f_mutex); + sfp->timeout = SG_DEFAULT_TIMEOUT; + sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; + sfp->force_packid = SG_DEF_FORCE_PACK_ID; +@@ -2220,20 +2241,6 @@ sg_remove_sfp(struct kref *kref) + schedule_work(&sfp->ew.work); + } + +-static int +-sg_res_in_use(Sg_fd * sfp) +-{ +- const Sg_request *srp; +- unsigned long iflags; +- +- read_lock_irqsave(&sfp->rq_list_lock, iflags); +- for (srp = sfp->headrp; srp; srp = srp->nextrp) +- if (srp->res_used) +- break; +- read_unlock_irqrestore(&sfp->rq_list_lock, iflags); +- return srp ? 1 : 0; +-} +- + #ifdef CONFIG_SCSI_PROC_FS + static int + sg_idr_max_id(int id, void *p, void *data) +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 9c62a6f9757a..600c67ef8a03 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -108,7 +108,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { + }, + }; + +-const u64 const btrfs_raid_group[BTRFS_NR_RAID_TYPES] = { ++const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = { + [BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10, + [BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1, + [BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP, +diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h +index 782d4e814e21..4bc4b1b13193 100644 +--- a/include/linux/lightnvm.h ++++ b/include/linux/lightnvm.h +@@ -310,6 +310,7 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, + { + struct ppa_addr l; + ++ l.ppa = 0; + /* + * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. + */ +diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c +index 7080ae1eb6c1..f850e906564b 100644 +--- a/kernel/gcov/base.c ++++ b/kernel/gcov/base.c +@@ -98,6 +98,12 @@ void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters) + } + EXPORT_SYMBOL(__gcov_merge_icall_topn); + ++void __gcov_exit(void) ++{ ++ /* Unused. */ ++} ++EXPORT_SYMBOL(__gcov_exit); ++ + /** + * gcov_enable_events - enable event reporting through gcov_event() + * +diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c +index e25e92fb44fa..46a18e72bce6 100644 +--- a/kernel/gcov/gcc_4_7.c ++++ b/kernel/gcov/gcc_4_7.c +@@ -18,7 +18,9 @@ + #include + #include "gcov.h" + +-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1 ++#if (__GNUC__ >= 7) ++#define GCOV_COUNTERS 9 ++#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) + #define GCOV_COUNTERS 10 + #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 + #define GCOV_COUNTERS 9 +diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c +index 74177189063c..d3125c169684 100644 +--- a/sound/pci/au88x0/au88x0_core.c ++++ b/sound/pci/au88x0/au88x0_core.c +@@ -2150,8 +2150,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + stream->resources, en, + VORTEX_RESOURCE_SRC)) < 0) { + memset(stream->resources, 0, +- sizeof(unsigned char) * +- VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + return -EBUSY; + } + if (stream->type != VORTEX_PCM_A3D) { +@@ -2161,7 +2160,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + VORTEX_RESOURCE_MIXIN)) < 0) { + memset(stream->resources, + 0, +- sizeof(unsigned char) * VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + return -EBUSY; + } + } +@@ -2174,8 +2173,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + stream->resources, en, + VORTEX_RESOURCE_A3D)) < 0) { + memset(stream->resources, 0, +- sizeof(unsigned char) * +- VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + dev_err(vortex->card->dev, + "out of A3D sources. Sorry\n"); + return -EBUSY; +@@ -2289,8 +2287,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + VORTEX_RESOURCE_MIXOUT)) + < 0) { + memset(stream->resources, 0, +- sizeof(unsigned char) * +- VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + return -EBUSY; + } + if ((src[i] = +@@ -2298,8 +2295,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + stream->resources, en, + VORTEX_RESOURCE_SRC)) < 0) { + memset(stream->resources, 0, +- sizeof(unsigned char) * +- VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + return -EBUSY; + } + } diff --git a/patch/kernel/mvebu-default/03-patch-4.4.86-87.patch b/patch/kernel/mvebu-default/03-patch-4.4.86-87.patch new file mode 100644 index 000000000..5292853d3 --- /dev/null +++ b/patch/kernel/mvebu-default/03-patch-4.4.86-87.patch @@ -0,0 +1,408 @@ +diff --git a/Makefile b/Makefile +index 1207bf6a0e7a..f6838187b568 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 86 ++SUBLEVEL = 87 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h +index 4cb4b6d3452c..0bc66e1d3a7e 100644 +--- a/arch/alpha/include/asm/types.h ++++ b/arch/alpha/include/asm/types.h +@@ -1,6 +1,6 @@ + #ifndef _ALPHA_TYPES_H + #define _ALPHA_TYPES_H + +-#include ++#include + + #endif /* _ALPHA_TYPES_H */ +diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h +index 9fd3cd459777..8d1024d7be05 100644 +--- a/arch/alpha/include/uapi/asm/types.h ++++ b/arch/alpha/include/uapi/asm/types.h +@@ -9,8 +9,18 @@ + * need to be careful to avoid a name clashes. + */ + +-#ifndef __KERNEL__ ++/* ++ * This is here because we used to use l64 for alpha ++ * and we don't want to impact user mode with our change to ll64 ++ * in the kernel. ++ * ++ * However, some user programs are fine with this. They can ++ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here. ++ */ ++#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__) + #include ++#else ++#include + #endif + + #endif /* _UAPI_ALPHA_TYPES_H */ +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c +index ba079e279b58..e8835d4e173c 100644 +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -824,24 +824,25 @@ void stage2_unmap_vm(struct kvm *kvm) + * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all + * underlying level-2 and level-3 tables before freeing the actual level-1 table + * and setting the struct pointer to NULL. +- * +- * Note we don't need locking here as this is only called when the VM is +- * destroyed, which can only be done once. + */ + void kvm_free_stage2_pgd(struct kvm *kvm) + { +- if (kvm->arch.pgd == NULL) +- return; ++ void *pgd = NULL; ++ void *hwpgd = NULL; + + spin_lock(&kvm->mmu_lock); +- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); ++ if (kvm->arch.pgd) { ++ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); ++ pgd = READ_ONCE(kvm->arch.pgd); ++ hwpgd = kvm_get_hwpgd(kvm); ++ kvm->arch.pgd = NULL; ++ } + spin_unlock(&kvm->mmu_lock); + +- kvm_free_hwpgd(kvm_get_hwpgd(kvm)); +- if (KVM_PREALLOC_LEVEL > 0) +- kfree(kvm->arch.pgd); +- +- kvm->arch.pgd = NULL; ++ if (hwpgd) ++ kvm_free_hwpgd(hwpgd); ++ if (KVM_PREALLOC_LEVEL > 0 && pgd) ++ kfree(pgd); + } + + static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c +index f5e9f9310b48..b3b0004ea8ac 100644 +--- a/crypto/algif_skcipher.c ++++ b/crypto/algif_skcipher.c +@@ -86,8 +86,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) + } + sgl = sreq->tsg; + n = sg_nents(sgl); +- for_each_sg(sgl, sg, n, i) +- put_page(sg_page(sg)); ++ for_each_sg(sgl, sg, n, i) { ++ struct page *page = sg_page(sg); ++ ++ /* some SGs may not have a page mapped */ ++ if (page && atomic_read(&page->_count)) ++ put_page(page); ++ } + + kfree(sreq->tsg); + } +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c +index 025c429050c0..5d8dfe027b30 100644 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c +@@ -612,7 +612,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, + } else { + pr_err("Failed to fill pool (%p)\n", pool); + /* If we have any pages left put them to the pool. */ +- list_for_each_entry(p, &pool->list, lru) { ++ list_for_each_entry(p, &new_pages, lru) { + ++cpages; + } + list_splice(&new_pages, &pool->list); +diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c +index 7ba795b24e75..639d1a9c8793 100644 +--- a/drivers/i2c/busses/i2c-ismt.c ++++ b/drivers/i2c/busses/i2c-ismt.c +@@ -339,8 +339,10 @@ static int ismt_process_desc(const struct ismt_desc *desc, + break; + case I2C_SMBUS_BLOCK_DATA: + case I2C_SMBUS_I2C_BLOCK_DATA: +- memcpy(&data->block[1], dma_buffer, desc->rxbytes); +- data->block[0] = desc->rxbytes; ++ if (desc->rxbytes != dma_buffer[0] + 1) ++ return -EMSGSIZE; ++ ++ memcpy(data->block, dma_buffer, desc->rxbytes); + break; + } + return 0; +diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c +index 9e17ef27a183..6f1dbd52ec91 100644 +--- a/drivers/irqchip/irq-mips-gic.c ++++ b/drivers/irqchip/irq-mips-gic.c +@@ -915,8 +915,11 @@ static int __init gic_of_init(struct device_node *node, + gic_len = resource_size(&res); + } + +- if (mips_cm_present()) ++ if (mips_cm_present()) { + write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); ++ /* Ensure GIC region is enabled before trying to access it */ ++ __sync(); ++ } + gic_present = true; + + __gic_init(gic_base, gic_len, cpu_vec, 0, node); +diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c +index cd4777954f87..9bee3f11898a 100644 +--- a/drivers/net/wireless/ti/wl1251/main.c ++++ b/drivers/net/wireless/ti/wl1251/main.c +@@ -1567,6 +1567,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void) + + wl->state = WL1251_STATE_OFF; + mutex_init(&wl->mutex); ++ spin_lock_init(&wl->wl_lock); + + wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE; + wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE; +diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c +index c6a1ec110c01..22bae2b434e2 100644 +--- a/fs/ceph/addr.c ++++ b/fs/ceph/addr.c +@@ -189,7 +189,7 @@ static int ceph_releasepage(struct page *page, gfp_t g) + /* + * read a single page, without unlocking it. + */ +-static int readpage_nounlock(struct file *filp, struct page *page) ++static int ceph_do_readpage(struct file *filp, struct page *page) + { + struct inode *inode = file_inode(filp); + struct ceph_inode_info *ci = ceph_inode(inode); +@@ -219,7 +219,7 @@ static int readpage_nounlock(struct file *filp, struct page *page) + + err = ceph_readpage_from_fscache(inode, page); + if (err == 0) +- goto out; ++ return -EINPROGRESS; + + dout("readpage inode %p file %p page %p index %lu\n", + inode, filp, page, page->index); +@@ -249,8 +249,11 @@ out: + + static int ceph_readpage(struct file *filp, struct page *page) + { +- int r = readpage_nounlock(filp, page); +- unlock_page(page); ++ int r = ceph_do_readpage(filp, page); ++ if (r != -EINPROGRESS) ++ unlock_page(page); ++ else ++ r = 0; + return r; + } + +@@ -1094,7 +1097,7 @@ retry_locked: + goto retry_locked; + r = writepage_nounlock(page, NULL); + if (r < 0) +- goto fail_nosnap; ++ goto fail_unlock; + goto retry_locked; + } + +@@ -1122,11 +1125,14 @@ retry_locked: + } + + /* we need to read it. */ +- r = readpage_nounlock(file, page); +- if (r < 0) +- goto fail_nosnap; ++ r = ceph_do_readpage(file, page); ++ if (r < 0) { ++ if (r == -EINPROGRESS) ++ return -EAGAIN; ++ goto fail_unlock; ++ } + goto retry_locked; +-fail_nosnap: ++fail_unlock: + unlock_page(page); + return r; + } +diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c +index a4766ded1ba7..ff1cfd7b1083 100644 +--- a/fs/ceph/cache.c ++++ b/fs/ceph/cache.c +@@ -224,13 +224,7 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) + fscache_relinquish_cookie(cookie, 0); + } + +-static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) +-{ +- if (!error) +- SetPageUptodate(page); +-} +- +-static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error) ++static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error) + { + if (!error) + SetPageUptodate(page); +@@ -259,7 +253,7 @@ int ceph_readpage_from_fscache(struct inode *inode, struct page *page) + return -ENOBUFS; + + ret = fscache_read_or_alloc_page(ci->fscache, page, +- ceph_vfs_readpage_complete, NULL, ++ ceph_readpage_from_fscache_complete, NULL, + GFP_KERNEL); + + switch (ret) { +@@ -288,7 +282,7 @@ int ceph_readpages_from_fscache(struct inode *inode, + return -ENOBUFS; + + ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages, +- ceph_vfs_readpage_complete_unlock, ++ ceph_readpage_from_fscache_complete, + NULL, mapping_gfp_mask(mapping)); + + switch (ret) { +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index fa8df3fef6fc..297e05c9e2b0 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -194,7 +194,7 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon) + int i; + + if (unlikely(direntry->d_name.len > +- tcon->fsAttrInfo.MaxPathNameComponentLength)) ++ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength))) + return -ENAMETOOLONG; + + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h +index b8f553b32dda..aacb15bd56fe 100644 +--- a/fs/cifs/smb2pdu.h ++++ b/fs/cifs/smb2pdu.h +@@ -82,8 +82,8 @@ + + #define NUMBER_OF_SMB2_COMMANDS 0x0013 + +-/* BB FIXME - analyze following length BB */ +-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */ ++/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */ ++#define MAX_SMB2_HDR_SIZE 0x00b0 + + #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) + +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 1e009cad8d5c..1b08556776ce 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -518,8 +518,13 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq) + wait_queue_head_t *whead; + + rcu_read_lock(); +- /* If it is cleared by POLLFREE, it should be rcu-safe */ +- whead = rcu_dereference(pwq->whead); ++ /* ++ * If it is cleared by POLLFREE, it should be rcu-safe. ++ * If we read NULL we need a barrier paired with ++ * smp_store_release() in ep_poll_callback(), otherwise ++ * we rely on whead->lock. ++ */ ++ whead = smp_load_acquire(&pwq->whead); + if (whead) + remove_wait_queue(whead, &pwq->wait); + rcu_read_unlock(); +@@ -1003,17 +1008,6 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k + struct epitem *epi = ep_item_from_wait(wait); + struct eventpoll *ep = epi->ep; + +- if ((unsigned long)key & POLLFREE) { +- ep_pwq_from_wait(wait)->whead = NULL; +- /* +- * whead = NULL above can race with ep_remove_wait_queue() +- * which can do another remove_wait_queue() after us, so we +- * can't use __remove_wait_queue(). whead->lock is held by +- * the caller. +- */ +- list_del_init(&wait->task_list); +- } +- + spin_lock_irqsave(&ep->lock, flags); + + /* +@@ -1078,6 +1072,23 @@ out_unlock: + if (pwake) + ep_poll_safewake(&ep->poll_wait); + ++ ++ if ((unsigned long)key & POLLFREE) { ++ /* ++ * If we race with ep_remove_wait_queue() it can miss ++ * ->whead = NULL and do another remove_wait_queue() after ++ * us, so we can't use __remove_wait_queue(). ++ */ ++ list_del_init(&wait->task_list); ++ /* ++ * ->whead != NULL protects us from the race with ep_free() ++ * or ep_remove(), ep_remove_wait_queue() takes whead->lock ++ * held by the caller. Once we nullify it, nothing protects ++ * ep/epi or even wait. ++ */ ++ smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL); ++ } ++ + return 1; + } + +diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h +index fc824e2828f3..5d2add1a6c96 100644 +--- a/include/asm-generic/topology.h ++++ b/include/asm-generic/topology.h +@@ -48,7 +48,11 @@ + #define parent_node(node) ((void)(node),0) + #endif + #ifndef cpumask_of_node +-#define cpumask_of_node(node) ((void)node, cpu_online_mask) ++ #ifdef CONFIG_NEED_MULTIPLE_NODES ++ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) ++ #else ++ #define cpumask_of_node(node) ((void)node, cpu_online_mask) ++ #endif + #endif + #ifndef pcibus_to_node + #define pcibus_to_node(bus) ((void)(bus), -1) +diff --git a/kernel/cpuset.c b/kernel/cpuset.c +index 8ccd66a97c8b..2924b6faa469 100644 +--- a/kernel/cpuset.c ++++ b/kernel/cpuset.c +@@ -1910,6 +1910,7 @@ static struct cftype files[] = { + { + .name = "memory_pressure", + .read_u64 = cpuset_read_u64, ++ .private = FILE_MEMORY_PRESSURE, + }, + + { +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 77055a362041..0e01250f2072 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -3275,9 +3275,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, + struct xfrm_state *x_new[XFRM_MAX_DEPTH]; + struct xfrm_migrate *mp; + ++ /* Stage 0 - sanity checks */ + if ((err = xfrm_migrate_check(m, num_migrate)) < 0) + goto out; + ++ if (dir >= XFRM_POLICY_MAX) { ++ err = -EINVAL; ++ goto out; ++ } ++ + /* Stage 1 - find policy */ + if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { + err = -ENOENT; diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.84-85.patch b/patch/kernel/mvebu64-default/03-patch-4.4.84-85.patch new file mode 100644 index 000000000..27aa21d04 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.84-85.patch @@ -0,0 +1,1511 @@ +diff --git a/Makefile b/Makefile +index 9d77ac063ec0..0f3d843f42a7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 84 ++SUBLEVEL = 85 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h +index 210ef3e72332..0ddd7144c492 100644 +--- a/arch/arc/include/asm/cache.h ++++ b/arch/arc/include/asm/cache.h +@@ -88,7 +88,9 @@ extern int ioc_exists; + #define ARC_REG_SLC_FLUSH 0x904 + #define ARC_REG_SLC_INVALIDATE 0x905 + #define ARC_REG_SLC_RGN_START 0x914 ++#define ARC_REG_SLC_RGN_START1 0x915 + #define ARC_REG_SLC_RGN_END 0x916 ++#define ARC_REG_SLC_RGN_END1 0x917 + + /* Bit val in SLC_CONTROL */ + #define SLC_CTRL_IM 0x040 +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c +index d81b6d7e11e7..9a84cbdd44b0 100644 +--- a/arch/arc/mm/cache.c ++++ b/arch/arc/mm/cache.c +@@ -543,6 +543,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) + static DEFINE_SPINLOCK(lock); + unsigned long flags; + unsigned int ctrl; ++ phys_addr_t end; + + spin_lock_irqsave(&lock, flags); + +@@ -572,8 +573,16 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) + * END needs to be setup before START (latter triggers the operation) + * END can't be same as START, so add (l2_line_sz - 1) to sz + */ +- write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1)); +- write_aux_reg(ARC_REG_SLC_RGN_START, paddr); ++ end = paddr + sz + l2_line_sz - 1; ++ if (is_pae40_enabled()) ++ write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end)); ++ ++ write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end)); ++ ++ if (is_pae40_enabled()) ++ write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr)); ++ ++ write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr)); + + while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); + +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c +index eac4f3b02df9..bb81cd05f0bc 100644 +--- a/drivers/acpi/apei/ghes.c ++++ b/drivers/acpi/apei/ghes.c +@@ -1067,6 +1067,7 @@ static int ghes_remove(struct platform_device *ghes_dev) + if (list_empty(&ghes_sci)) + unregister_acpi_hed_notifier(&ghes_notifier_sci); + mutex_unlock(&ghes_list_mutex); ++ synchronize_rcu(); + break; + case ACPI_HEST_NOTIFY_NMI: + ghes_nmi_remove(ghes); +diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c +index ccdc8db16bb8..fa2cf2dc4e33 100644 +--- a/drivers/acpi/ioapic.c ++++ b/drivers/acpi/ioapic.c +@@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data) + struct resource *res = data; + struct resource_win win; + ++ /* ++ * We might assign this to 'res' later, make sure all pointers are ++ * cleared before the resource is added to the global list ++ */ ++ memset(&win, 0, sizeof(win)); ++ + res->flags = 0; + if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0) + return AE_OK; +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index 47ddfefe2443..5531f020e561 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -1718,8 +1718,12 @@ static void binder_transaction(struct binder_proc *proc, + list_add_tail(&t->work.entry, target_list); + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; + list_add_tail(&tcomplete->entry, &thread->todo); +- if (target_wait) +- wake_up_interruptible(target_wait); ++ if (target_wait) { ++ if (reply || !(t->flags & TF_ONE_WAY)) ++ wake_up_interruptible_sync(target_wait); ++ else ++ wake_up_interruptible(target_wait); ++ } + return; + + err_get_unused_fd_failed: +@@ -2865,7 +2869,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) + const char *failure_string; + struct binder_buffer *buffer; + +- if (proc->tsk != current) ++ if (proc->tsk != current->group_leader) + return -EINVAL; + + if ((vma->vm_end - vma->vm_start) > SZ_4M) +@@ -2966,8 +2970,8 @@ static int binder_open(struct inode *nodp, struct file *filp) + proc = kzalloc(sizeof(*proc), GFP_KERNEL); + if (proc == NULL) + return -ENOMEM; +- get_task_struct(current); +- proc->tsk = current; ++ get_task_struct(current->group_leader); ++ proc->tsk = current->group_leader; + INIT_LIST_HEAD(&proc->todo); + init_waitqueue_head(&proc->wait); + proc->default_priority = task_nice(current); +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c +index 6253775b8d9c..50d74e5ce41b 100644 +--- a/drivers/gpu/drm/drm_atomic.c ++++ b/drivers/gpu/drm/drm_atomic.c +@@ -1247,6 +1247,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state) + if (config->funcs->atomic_check) + ret = config->funcs->atomic_check(state->dev, state); + ++ if (ret) ++ return ret; ++ + if (!state->allow_modeset) { + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) { +@@ -1257,7 +1260,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state) + } + } + +- return ret; ++ return 0; + } + EXPORT_SYMBOL(drm_atomic_check_only); + +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index b205224f1a44..9147113139be 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -715,13 +715,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) + struct drm_gem_object *obj = ptr; + struct drm_device *dev = obj->dev; + ++ if (dev->driver->gem_close_object) ++ dev->driver->gem_close_object(obj, file_priv); ++ + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_gem_remove_prime_handles(obj, file_priv); + drm_vma_node_revoke(&obj->vma_node, file_priv->filp); + +- if (dev->driver->gem_close_object) +- dev->driver->gem_close_object(obj, file_priv); +- + drm_gem_object_handle_unreference_unlocked(obj); + + return 0; +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +index 9255b9c096b6..9befd624a5f0 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +@@ -148,8 +148,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) + rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0); + + /* Signal polarities */ +- value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL) +- | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL) ++ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0) ++ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0) + | DSMR_DIPM_DE | DSMR_CSPM; + rcar_du_crtc_write(rcrtc, DSMR, value); + +@@ -171,7 +171,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) + mode->crtc_vsync_start - 1); + rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1); + +- rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start); ++ rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1); + rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); + } + +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c +index 46429c4be8e5..2b75a4891dec 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c +@@ -642,13 +642,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, + } + + ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector); +- of_node_put(encoder); +- of_node_put(connector); +- + if (ret && ret != -EPROBE_DEFER) + dev_warn(rcdu->dev, +- "failed to initialize encoder %s (%d), skipping\n", +- encoder->full_name, ret); ++ "failed to initialize encoder %s on output %u (%d), skipping\n", ++ of_node_full_name(encoder), output, ret); ++ ++ of_node_put(encoder); ++ of_node_put(connector); + + return ret; + } +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +index 85043c5bad03..873e04aa9352 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +@@ -56,11 +56,11 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, + return ret; + + /* PLL clock configuration */ +- if (freq <= 38000) ++ if (freq < 39000) + pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M; +- else if (freq <= 60000) ++ else if (freq < 61000) + pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M; +- else if (freq <= 121000) ++ else if (freq < 121000) + pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M; + else + pllcr = LVDPLLCR_PLLDLYCNT_150M; +@@ -102,7 +102,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, + /* Turn the PLL on, wait for the startup delay, and turn the output + * on. + */ +- lvdcr0 |= LVDCR0_PLLEN; ++ lvdcr0 |= LVDCR0_PLLON; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + + usleep_range(100, 150); +diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h +index 77cf9289ab65..b1eafd097a79 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h ++++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h +@@ -18,7 +18,7 @@ + #define LVDCR0_DMD (1 << 12) + #define LVDCR0_LVMD_MASK (0xf << 8) + #define LVDCR0_LVMD_SHIFT 8 +-#define LVDCR0_PLLEN (1 << 4) ++#define LVDCR0_PLLON (1 << 4) + #define LVDCR0_BEN (1 << 2) + #define LVDCR0_LVEN (1 << 1) + #define LVDCR0_LVRES (1 << 0) +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c +index 6b00061c3746..a2ae2213ef3e 100644 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c +@@ -294,7 +294,7 @@ static void dw_i2c_plat_complete(struct device *dev) + #endif + + #ifdef CONFIG_PM +-static int dw_i2c_plat_suspend(struct device *dev) ++static int dw_i2c_plat_runtime_suspend(struct device *dev) + { + struct platform_device *pdev = to_platform_device(dev); + struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); +@@ -318,11 +318,21 @@ static int dw_i2c_plat_resume(struct device *dev) + return 0; + } + ++#ifdef CONFIG_PM_SLEEP ++static int dw_i2c_plat_suspend(struct device *dev) ++{ ++ pm_runtime_resume(dev); ++ return dw_i2c_plat_runtime_suspend(dev); ++} ++#endif ++ + static const struct dev_pm_ops dw_i2c_dev_pm_ops = { + .prepare = dw_i2c_plat_prepare, + .complete = dw_i2c_plat_complete, + SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) +- SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL) ++ SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, ++ dw_i2c_plat_resume, ++ NULL) + }; + + #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) +diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +index 0a86ef43e781..a8db38db622e 100644 +--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c ++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +@@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) + s32 poll_value = 0; + + if (state) { +- if (!atomic_read(&st->user_requested_state)) +- return 0; + if (sensor_hub_device_open(st->hsdev)) + return -EIO; + +@@ -84,6 +82,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) + &report_val); + } + ++ pr_debug("HID_SENSOR %s set power_state %d report_state %d\n", ++ st->pdev->name, state_val, report_val); ++ + sensor_hub_get_feature(st->hsdev, st->power_state.report_id, + st->power_state.index, + sizeof(state_val), &state_val); +@@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) + ret = pm_runtime_get_sync(&st->pdev->dev); + else { + pm_runtime_mark_last_busy(&st->pdev->dev); ++ pm_runtime_use_autosuspend(&st->pdev->dev); + ret = pm_runtime_put_autosuspend(&st->pdev->dev); + } + if (ret < 0) { +@@ -175,8 +177,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, + /* Default to 3 seconds, but can be changed from sysfs */ + pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, + 3000); +- pm_runtime_use_autosuspend(&attrb->pdev->dev); +- + return ret; + error_unreg_trigger: + iio_trigger_unregister(trig); +diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c +index 2485b88ee1b6..1880105cc8c4 100644 +--- a/drivers/iio/imu/adis16480.c ++++ b/drivers/iio/imu/adis16480.c +@@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { + .gyro_max_val = IIO_RAD_TO_DEGREE(22500), + .gyro_max_scale = 450, + .accel_max_val = IIO_M_S_2_TO_G(12500), +- .accel_max_scale = 5, ++ .accel_max_scale = 10, + }, + [ADIS16485] = { + .channels = adis16485_channels, +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c +index 98d4e515587a..681dce15fbc8 100644 +--- a/drivers/input/mouse/elan_i2c_core.c ++++ b/drivers/input/mouse/elan_i2c_core.c +@@ -1234,6 +1234,7 @@ static const struct acpi_device_id elan_acpi_id[] = { + { "ELAN0000", 0 }, + { "ELAN0100", 0 }, + { "ELAN0600", 0 }, ++ { "ELAN0602", 0 }, + { "ELAN0605", 0 }, + { "ELAN0608", 0 }, + { "ELAN0605", 0 }, +diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c +index 354d47ecd66a..ce6ff9b301bb 100644 +--- a/drivers/input/mouse/trackpoint.c ++++ b/drivers/input/mouse/trackpoint.c +@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir + if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) + return -1; + +- if (param[0] != TP_MAGIC_IDENT) ++ /* add new TP ID. */ ++ if (!(param[0] & TP_MAGIC_IDENT)) + return -1; + + if (firmware_id) +diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h +index 5617ed3a7d7a..88055755f82e 100644 +--- a/drivers/input/mouse/trackpoint.h ++++ b/drivers/input/mouse/trackpoint.h +@@ -21,8 +21,9 @@ + #define TP_COMMAND 0xE2 /* Commands start with this */ + + #define TP_READ_ID 0xE1 /* Sent for device identification */ +-#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */ ++#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */ + /* by the firmware ID */ ++ /* Firmware ID includes 0x1, 0x2, 0x3 */ + + + /* +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h +index a2661381ddfc..d2774197fe58 100644 +--- a/drivers/misc/mei/hw-me-regs.h ++++ b/drivers/misc/mei/hw-me-regs.h +@@ -125,6 +125,11 @@ + #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ + #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ + ++#define MEI_DEV_ID_LBG 0xA1BA /* Lewisburg (SPT) */ ++ ++#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ ++#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ ++ + /* + * MEI HW Section + */ +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 01e20384ac44..adab5bbb642a 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -86,10 +86,14 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)}, + ++ {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)}, ++ + /* required last entry */ + {0, } + }; +diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c +index ecc6fb9ca92f..3bbdf60f8908 100644 +--- a/drivers/ntb/ntb_transport.c ++++ b/drivers/ntb/ntb_transport.c +@@ -599,7 +599,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, + if (!mw->virt_addr) + return -ENOMEM; + +- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) ++ if (mw_num < qp_count % mw_count) + num_qps_mw = qp_count / mw_count + 1; + else + num_qps_mw = qp_count / mw_count; +@@ -947,7 +947,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, + qp->event_handler = NULL; + ntb_qp_link_down_reset(qp); + +- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) ++ if (mw_num < qp_count % mw_count) + num_qps_mw = qp_count / mw_count + 1; + else + num_qps_mw = qp_count / mw_count; +@@ -1065,8 +1065,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) + qp_count = ilog2(qp_bitmap); + if (max_num_clients && max_num_clients < qp_count) + qp_count = max_num_clients; +- else if (mw_count < qp_count) +- qp_count = mw_count; ++ else if (nt->mw_count < qp_count) ++ qp_count = nt->mw_count; + + qp_bitmap &= BIT_ULL(qp_count) - 1; + +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +index 02c3feef4e36..c2d2c17550a7 100644 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +@@ -49,6 +49,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ + {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ ++ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ + {} /* Terminating entry */ + }; + +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index 26a3b389a265..fa8df3fef6fc 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -183,15 +183,20 @@ cifs_bp_rename_retry: + } + + /* ++ * Don't allow path components longer than the server max. + * Don't allow the separator character in a path component. + * The VFS will not allow "/", but "\" is allowed by posix. + */ + static int +-check_name(struct dentry *direntry) ++check_name(struct dentry *direntry, struct cifs_tcon *tcon) + { + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); + int i; + ++ if (unlikely(direntry->d_name.len > ++ tcon->fsAttrInfo.MaxPathNameComponentLength)) ++ return -ENAMETOOLONG; ++ + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { + for (i = 0; i < direntry->d_name.len; i++) { + if (direntry->d_name.name[i] == '\\') { +@@ -489,10 +494,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, + return finish_no_open(file, res); + } + +- rc = check_name(direntry); +- if (rc) +- return rc; +- + xid = get_xid(); + + cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", +@@ -505,6 +506,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, + } + + tcon = tlink_tcon(tlink); ++ ++ rc = check_name(direntry, tcon); ++ if (rc) ++ goto out_free_xid; ++ + server = tcon->ses->server; + + if (server->ops->new_lease_key) +@@ -765,7 +771,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, + } + pTcon = tlink_tcon(tlink); + +- rc = check_name(direntry); ++ rc = check_name(direntry, pTcon); + if (rc) + goto lookup_out; + +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index f4afa3b1cc56..6c484ddf26a9 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -2768,8 +2768,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, + kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * + le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); + kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); +- kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits); +- kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); ++ kst->f_bfree = kst->f_bavail = ++ le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); + return; + } + +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index 3f68a25f2169..544672b440de 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -129,7 +129,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp) + argp->p = page_address(argp->pagelist[0]); + argp->pagelist++; + if (argp->pagelen < PAGE_SIZE) { +- argp->end = argp->p + (argp->pagelen>>2); ++ argp->end = argp->p + XDR_QUADLEN(argp->pagelen); + argp->pagelen = 0; + } else { + argp->end = argp->p + (PAGE_SIZE>>2); +@@ -1246,9 +1246,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) + argp->pagelen -= pages * PAGE_SIZE; + len -= pages * PAGE_SIZE; + +- argp->p = (__be32 *)page_address(argp->pagelist[0]); +- argp->pagelist++; +- argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE); ++ next_decode_page(argp); + } + argp->p += XDR_QUADLEN(len); + +diff --git a/include/net/ip.h b/include/net/ip.h +index b450d8653b30..7476bb10ff37 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -314,7 +314,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, + !forwarding) + return dst_mtu(dst); + +- return min(dst->dev->mtu, IP_MAX_MTU); ++ return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); + } + + static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) +@@ -327,7 +327,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) + return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); + } + +- return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); ++ return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); + } + + u32 ip_idents_reserve(u32 hash, int segs); +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index e5bba897d206..7a5d6a073165 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -717,8 +717,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, + old = *pold; + *pold = new; + if (old != NULL) { +- qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); ++ unsigned int qlen = old->q.qlen; ++ unsigned int backlog = old->qstats.backlog; ++ + qdisc_reset(old); ++ qdisc_tree_reduce_backlog(old, qlen, backlog); + } + sch_tree_unlock(sch); + +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 784ab8fe8714..3697063dd09a 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -8473,28 +8473,27 @@ SYSCALL_DEFINE5(perf_event_open, + goto err_context; + + /* +- * Do not allow to attach to a group in a different +- * task or CPU context: ++ * Make sure we're both events for the same CPU; ++ * grouping events for different CPUs is broken; since ++ * you can never concurrently schedule them anyhow. + */ +- if (move_group) { +- /* +- * Make sure we're both on the same task, or both +- * per-cpu events. +- */ +- if (group_leader->ctx->task != ctx->task) +- goto err_context; ++ if (group_leader->cpu != event->cpu) ++ goto err_context; + +- /* +- * Make sure we're both events for the same CPU; +- * grouping events for different CPUs is broken; since +- * you can never concurrently schedule them anyhow. +- */ +- if (group_leader->cpu != event->cpu) +- goto err_context; +- } else { +- if (group_leader->ctx != ctx) +- goto err_context; +- } ++ /* ++ * Make sure we're both on the same task, or both ++ * per-CPU events. ++ */ ++ if (group_leader->ctx->task != ctx->task) ++ goto err_context; ++ ++ /* ++ * Do not allow to attach to a group in a different task ++ * or CPU context. If we're moving SW events, we'll fix ++ * this up later, so allow that. ++ */ ++ if (!move_group && group_leader->ctx != ctx) ++ goto err_context; + + /* + * Only a group leader can be exclusive or pinned +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c +index 6816302542b2..f0e5408499b6 100644 +--- a/kernel/trace/trace_events_filter.c ++++ b/kernel/trace/trace_events_filter.c +@@ -1979,6 +1979,10 @@ static int create_filter(struct trace_event_call *call, + if (err && set_str) + append_filter_err(ps, filter); + } ++ if (err && !set_str) { ++ free_event_filter(filter); ++ filter = NULL; ++ } + create_filter_finish(ps); + + *filterp = filter; +diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c +index 1641367e54ca..69f56073b337 100644 +--- a/net/bluetooth/bnep/core.c ++++ b/net/bluetooth/bnep/core.c +@@ -484,16 +484,16 @@ static int bnep_session(void *arg) + struct net_device *dev = s->dev; + struct sock *sk = s->sock->sk; + struct sk_buff *skb; +- wait_queue_t wait; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + + BT_DBG(""); + + set_user_nice(current, -15); + +- init_waitqueue_entry(&wait, current); + add_wait_queue(sk_sleep(sk), &wait); + while (1) { +- set_current_state(TASK_INTERRUPTIBLE); ++ /* Ensure session->terminate is updated */ ++ smp_mb__before_atomic(); + + if (atomic_read(&s->terminate)) + break; +@@ -515,9 +515,8 @@ static int bnep_session(void *arg) + break; + netif_wake_queue(dev); + +- schedule(); ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } +- __set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + + /* Cleanup session */ +@@ -663,7 +662,7 @@ int bnep_del_connection(struct bnep_conndel_req *req) + s = __bnep_get_session(req->dst); + if (s) { + atomic_inc(&s->terminate); +- wake_up_process(s->task); ++ wake_up_interruptible(sk_sleep(s->sock->sk)); + } else + err = -ENOENT; + +diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c +index 298ed37010e6..3a39fd523e40 100644 +--- a/net/bluetooth/cmtp/core.c ++++ b/net/bluetooth/cmtp/core.c +@@ -281,16 +281,16 @@ static int cmtp_session(void *arg) + struct cmtp_session *session = arg; + struct sock *sk = session->sock->sk; + struct sk_buff *skb; +- wait_queue_t wait; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + + BT_DBG("session %p", session); + + set_user_nice(current, -15); + +- init_waitqueue_entry(&wait, current); + add_wait_queue(sk_sleep(sk), &wait); + while (1) { +- set_current_state(TASK_INTERRUPTIBLE); ++ /* Ensure session->terminate is updated */ ++ smp_mb__before_atomic(); + + if (atomic_read(&session->terminate)) + break; +@@ -307,9 +307,8 @@ static int cmtp_session(void *arg) + + cmtp_process_transmit(session); + +- schedule(); ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } +- __set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + + down_write(&cmtp_session_sem); +@@ -394,7 +393,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) + err = cmtp_attach_device(session); + if (err < 0) { + atomic_inc(&session->terminate); +- wake_up_process(session->task); ++ wake_up_interruptible(sk_sleep(session->sock->sk)); + up_write(&cmtp_session_sem); + return err; + } +@@ -432,7 +431,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req) + + /* Stop session thread */ + atomic_inc(&session->terminate); +- wake_up_process(session->task); ++ ++ /* Ensure session->terminate is updated */ ++ smp_mb__after_atomic(); ++ ++ wake_up_interruptible(sk_sleep(session->sock->sk)); + } else + err = -ENOENT; + +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c +index 0bec4588c3c8..1fc076420d1e 100644 +--- a/net/bluetooth/hidp/core.c ++++ b/net/bluetooth/hidp/core.c +@@ -36,6 +36,7 @@ + #define VERSION "1.2" + + static DECLARE_RWSEM(hidp_session_sem); ++static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq); + static LIST_HEAD(hidp_session_list); + + static unsigned char hidp_keycode[256] = { +@@ -1068,12 +1069,12 @@ static int hidp_session_start_sync(struct hidp_session *session) + * Wake up session thread and notify it to stop. This is asynchronous and + * returns immediately. Call this whenever a runtime error occurs and you want + * the session to stop. +- * Note: wake_up_process() performs any necessary memory-barriers for us. ++ * Note: wake_up_interruptible() performs any necessary memory-barriers for us. + */ + static void hidp_session_terminate(struct hidp_session *session) + { + atomic_inc(&session->terminate); +- wake_up_process(session->task); ++ wake_up_interruptible(&hidp_session_wq); + } + + /* +@@ -1180,7 +1181,9 @@ static void hidp_session_run(struct hidp_session *session) + struct sock *ctrl_sk = session->ctrl_sock->sk; + struct sock *intr_sk = session->intr_sock->sk; + struct sk_buff *skb; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + ++ add_wait_queue(&hidp_session_wq, &wait); + for (;;) { + /* + * This thread can be woken up two ways: +@@ -1188,12 +1191,10 @@ static void hidp_session_run(struct hidp_session *session) + * session->terminate flag and wakes this thread up. + * - Via modifying the socket state of ctrl/intr_sock. This + * thread is woken up by ->sk_state_changed(). +- * +- * Note: set_current_state() performs any necessary +- * memory-barriers for us. + */ +- set_current_state(TASK_INTERRUPTIBLE); + ++ /* Ensure session->terminate is updated */ ++ smp_mb__before_atomic(); + if (atomic_read(&session->terminate)) + break; + +@@ -1227,11 +1228,22 @@ static void hidp_session_run(struct hidp_session *session) + hidp_process_transmit(session, &session->ctrl_transmit, + session->ctrl_sock); + +- schedule(); ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } ++ remove_wait_queue(&hidp_session_wq, &wait); + + atomic_inc(&session->terminate); +- set_current_state(TASK_RUNNING); ++ ++ /* Ensure session->terminate is updated */ ++ smp_mb__after_atomic(); ++} ++ ++static int hidp_session_wake_function(wait_queue_t *wait, ++ unsigned int mode, ++ int sync, void *key) ++{ ++ wake_up_interruptible(&hidp_session_wq); ++ return false; + } + + /* +@@ -1244,7 +1256,8 @@ static void hidp_session_run(struct hidp_session *session) + static int hidp_session_thread(void *arg) + { + struct hidp_session *session = arg; +- wait_queue_t ctrl_wait, intr_wait; ++ DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function); ++ DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function); + + BT_DBG("session %p", session); + +@@ -1254,8 +1267,6 @@ static int hidp_session_thread(void *arg) + set_user_nice(current, -15); + hidp_set_timer(session); + +- init_waitqueue_entry(&ctrl_wait, current); +- init_waitqueue_entry(&intr_wait, current); + add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); + add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); + /* This memory barrier is paired with wq_has_sleeper(). See +diff --git a/net/dccp/proto.c b/net/dccp/proto.c +index 9fe25bf63296..b68168fcc06a 100644 +--- a/net/dccp/proto.c ++++ b/net/dccp/proto.c +@@ -24,6 +24,7 @@ + #include + + #include ++#include + #include + #include + +@@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type) + + EXPORT_SYMBOL_GPL(dccp_packet_name); + ++static void dccp_sk_destruct(struct sock *sk) ++{ ++ struct dccp_sock *dp = dccp_sk(sk); ++ ++ ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); ++ dp->dccps_hc_tx_ccid = NULL; ++ inet_sock_destruct(sk); ++} ++ + int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) + { + struct dccp_sock *dp = dccp_sk(sk); +@@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) + icsk->icsk_syn_retries = sysctl_dccp_request_retries; + sk->sk_state = DCCP_CLOSED; + sk->sk_write_space = dccp_write_space; ++ sk->sk_destruct = dccp_sk_destruct; + icsk->icsk_sync_mss = dccp_sync_mss; + dp->dccps_mss_cache = 536; + dp->dccps_rate_last = jiffies; +@@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk) + { + struct dccp_sock *dp = dccp_sk(sk); + +- /* +- * DCCP doesn't use sk_write_queue, just sk_send_head +- * for retransmissions +- */ ++ __skb_queue_purge(&sk->sk_write_queue); + if (sk->sk_send_head != NULL) { + kfree_skb(sk->sk_send_head); + sk->sk_send_head = NULL; +@@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk) + dp->dccps_hc_rx_ackvec = NULL; + } + ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); +- ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); +- dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL; ++ dp->dccps_hc_rx_ccid = NULL; + + /* clean up feature negotiation state */ + dccp_feat_list_purge(&dp->dccps_featneg); +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index b2504712259f..313e3c11a15a 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -1044,15 +1044,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg) + fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); + if (!fi) + goto failure; +- fib_info_cnt++; + if (cfg->fc_mx) { + fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL); +- if (!fi->fib_metrics) +- goto failure; ++ if (unlikely(!fi->fib_metrics)) { ++ kfree(fi); ++ return ERR_PTR(err); ++ } + atomic_set(&fi->fib_metrics->refcnt, 1); +- } else ++ } else { + fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; +- ++ } ++ fib_info_cnt++; + fi->fib_net = net; + fi->fib_protocol = cfg->fc_protocol; + fi->fib_scope = cfg->fc_scope; +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index c295d882c6e0..0294f7c99c85 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -1241,7 +1241,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) + if (mtu) + return mtu; + +- mtu = dst->dev->mtu; ++ mtu = READ_ONCE(dst->dev->mtu); + + if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { + if (rt->rt_uses_gateway && mtu > 576) +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index f0dabd125c43..c4bbf704ff9c 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -3028,8 +3028,7 @@ void tcp_rearm_rto(struct sock *sk) + /* delta may not be positive if the socket is locked + * when the retrans timer fires and is rescheduled. + */ +- if (delta > 0) +- rto = delta; ++ rto = max(delta, 1); + } + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, + TCP_RTO_MAX); +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index f60e8caea767..aad8cdf15472 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -892,6 +892,8 @@ add: + } + nsiblings = iter->rt6i_nsiblings; + fib6_purge_rt(iter, fn, info->nl_net); ++ if (fn->rr_ptr == iter) ++ fn->rr_ptr = NULL; + rt6_release(iter); + + if (nsiblings) { +@@ -904,6 +906,8 @@ add: + if (rt6_qualify_for_ecmp(iter)) { + *ins = iter->dst.rt6_next; + fib6_purge_rt(iter, fn, info->nl_net); ++ if (fn->rr_ptr == iter) ++ fn->rr_ptr = NULL; + rt6_release(iter); + nsiblings--; + } else { +@@ -992,7 +996,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + /* Create subtree root node */ + sfn = node_alloc(); + if (!sfn) +- goto st_failure; ++ goto failure; + + sfn->leaf = info->nl_net->ipv6.ip6_null_entry; + atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref); +@@ -1008,12 +1012,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + + if (IS_ERR(sn)) { + /* If it is failed, discard just allocated +- root, and then (in st_failure) stale node ++ root, and then (in failure) stale node + in main tree. + */ + node_free(sfn); + err = PTR_ERR(sn); +- goto st_failure; ++ goto failure; + } + + /* Now link new subtree to main tree */ +@@ -1027,7 +1031,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + + if (IS_ERR(sn)) { + err = PTR_ERR(sn); +- goto st_failure; ++ goto failure; + } + } + +@@ -1069,22 +1073,22 @@ out: + atomic_inc(&pn->leaf->rt6i_ref); + } + #endif +- if (!(rt->dst.flags & DST_NOCACHE)) +- dst_free(&rt->dst); ++ goto failure; + } + return err; + +-#ifdef CONFIG_IPV6_SUBTREES +- /* Subtree creation failed, probably main tree node +- is orphan. If it is, shoot it. ++failure: ++ /* fn->leaf could be NULL if fn is an intermediate node and we ++ * failed to add the new route to it in both subtree creation ++ * failure and fib6_add_rt2node() failure case. ++ * In both cases, fib6_repair_tree() should be called to fix ++ * fn->leaf. + */ +-st_failure: + if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) + fib6_repair_tree(info->nl_net, fn); + if (!(rt->dst.flags & DST_NOCACHE)) + dst_free(&rt->dst); + return err; +-#endif + } + + /* +diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c +index 8d2f7c9b491d..4a116d766c15 100644 +--- a/net/irda/af_irda.c ++++ b/net/irda/af_irda.c +@@ -2227,7 +2227,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname, + { + struct sock *sk = sock->sk; + struct irda_sock *self = irda_sk(sk); +- struct irda_device_list list; ++ struct irda_device_list list = { 0 }; + struct irda_device_info *discoveries; + struct irda_ias_set * ias_opt; /* IAS get/query params */ + struct ias_object * ias_obj; /* Object in IAS */ +diff --git a/net/key/af_key.c b/net/key/af_key.c +index 2e1050ec2cf0..94bf810ad242 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, + #define BROADCAST_ONE 1 + #define BROADCAST_REGISTERED 2 + #define BROADCAST_PROMISC_ONLY 4 +-static int pfkey_broadcast(struct sk_buff *skb, ++static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, + int broadcast_flags, struct sock *one_sk, + struct net *net) + { +@@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb, + rcu_read_unlock(); + + if (one_sk != NULL) +- err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk); ++ err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); + + kfree_skb(skb2); + kfree_skb(skb); +@@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk) + hdr = (struct sadb_msg *) pfk->dump.skb->data; + hdr->sadb_msg_seq = 0; + hdr->sadb_msg_errno = rc; +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, + &pfk->sk, sock_net(&pfk->sk)); + pfk->dump.skb = NULL; + } +@@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk) + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / + sizeof(uint64_t)); + +- pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); ++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk)); + + return 0; + } +@@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_ + + xfrm_state_put(x); + +- pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net); ++ pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net); + + return 0; + } +@@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c) + hdr->sadb_msg_seq = c->seq; + hdr->sadb_msg_pid = c->portid; + +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x)); ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x)); + + return 0; + } +@@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg + out_hdr->sadb_msg_reserved = 0; + out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; + out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; +- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk)); + + return 0; + } +@@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad + return -ENOBUFS; + } + +- pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk)); +- ++ pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, ++ sock_net(sk)); + return 0; + } + +@@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr) + hdr->sadb_msg_errno = (uint8_t) 0; + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + +- return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, ++ sock_net(sk)); + } + + static int key_notify_sa_flush(const struct km_event *c) +@@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c) + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + hdr->sadb_msg_reserved = 0; + +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net); ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); + + return 0; + } +@@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr) + out_hdr->sadb_msg_pid = pfk->dump.msg_portid; + + if (pfk->dump.skb) +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, + &pfk->sk, sock_net(&pfk->sk)); + pfk->dump.skb = out_skb; + +@@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb + new_hdr->sadb_msg_errno = 0; + } + +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk)); ++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk)); + return 0; + } + +@@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev + out_hdr->sadb_msg_errno = 0; + out_hdr->sadb_msg_seq = c->seq; + out_hdr->sadb_msg_pid = c->portid; +- pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); + return 0; + + } +@@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc + out_hdr->sadb_msg_errno = 0; + out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; + out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; +- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp)); + err = 0; + + out: +@@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) + out_hdr->sadb_msg_pid = pfk->dump.msg_portid; + + if (pfk->dump.skb) +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, + &pfk->sk, sock_net(&pfk->sk)); + pfk->dump.skb = out_skb; + +@@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event *c) + hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + hdr->sadb_msg_reserved = 0; +- pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net); ++ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); + return 0; + + } +@@ -2814,7 +2815,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb + void *ext_hdrs[SADB_EXT_MAX]; + int err; + +- pfkey_broadcast(skb_clone(skb, GFP_KERNEL), ++ pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL, + BROADCAST_PROMISC_ONLY, NULL, sock_net(sk)); + + memset(ext_hdrs, 0, sizeof(ext_hdrs)); +@@ -3036,7 +3037,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c) + out_hdr->sadb_msg_seq = 0; + out_hdr->sadb_msg_pid = 0; + +- pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, ++ xs_net(x)); + return 0; + } + +@@ -3226,7 +3228,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct + xfrm_ctx->ctx_len); + } + +- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, ++ xs_net(x)); + } + + static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, +@@ -3424,7 +3427,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, + n_port->sadb_x_nat_t_port_port = sport; + n_port->sadb_x_nat_t_port_reserved = 0; + +- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, ++ xs_net(x)); + } + + #ifdef CONFIG_NET_KEY_MIGRATE +@@ -3616,7 +3620,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, + } + + /* broadcast migrate message to sockets */ +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net); ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net); + + return 0; + +diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c +index 0915d448ba23..075b0d22f213 100644 +--- a/net/sched/act_ipt.c ++++ b/net/sched/act_ipt.c +@@ -34,6 +34,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int + { + struct xt_tgchk_param par; + struct xt_target *target; ++ struct ipt_entry e = {}; + int ret = 0; + + target = xt_request_find_target(AF_INET, t->u.user.name, +@@ -44,6 +45,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int + t->u.kernel.target = target; + memset(&par, 0, sizeof(par)); + par.table = table; ++ par.entryinfo = &e; + par.target = target; + par.targinfo = t->data; + par.hook_mask = hook; +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c +index 4431e2833e45..3f2c3eed04da 100644 +--- a/net/sched/sch_sfq.c ++++ b/net/sched/sch_sfq.c +@@ -434,6 +434,7 @@ congestion_drop: + qdisc_drop(head, sch); + + slot_queue_add(slot, skb); ++ qdisc_tree_reduce_backlog(sch, 0, delta); + return NET_XMIT_CN; + } + +@@ -465,8 +466,10 @@ enqueue: + /* Return Congestion Notification only if we dropped a packet + * from this flow. + */ +- if (qlen != slot->qlen) ++ if (qlen != slot->qlen) { ++ qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb)); + return NET_XMIT_CN; ++ } + + /* As we dropped a packet, better let upper stack know this */ + qdisc_tree_reduce_backlog(sch, 1, dropped); +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c +index 7527c168e471..e33e9bd4ed5a 100644 +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -510,7 +510,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, + { + addr->sa.sa_family = AF_INET6; + addr->v6.sin6_port = port; ++ addr->v6.sin6_flowinfo = 0; + addr->v6.sin6_addr = *saddr; ++ addr->v6.sin6_scope_id = 0; + } + + /* Compare addresses exactly. +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c +index a0c90572d0e5..f86c6555a539 100644 +--- a/net/tipc/netlink_compat.c ++++ b/net/tipc/netlink_compat.c +@@ -258,13 +258,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, + arg = nlmsg_new(0, GFP_KERNEL); + if (!arg) { + kfree_skb(msg->rep); ++ msg->rep = NULL; + return -ENOMEM; + } + + err = __tipc_nl_compat_dumpit(cmd, msg, arg); +- if (err) ++ if (err) { + kfree_skb(msg->rep); +- ++ msg->rep = NULL; ++ } + kfree_skb(arg); + + return err; +diff --git a/sound/core/control.c b/sound/core/control.c +index b4fe9b002512..bd01d492f46a 100644 +--- a/sound/core/control.c ++++ b/sound/core/control.c +@@ -1126,7 +1126,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, + mutex_lock(&ue->card->user_ctl_lock); + change = ue->tlv_data_size != size; + if (!change) +- change = memcmp(ue->tlv_data, new_data, size); ++ change = memcmp(ue->tlv_data, new_data, size) != 0; + kfree(ue->tlv_data); + ue->tlv_data = new_data; + ue->tlv_data_size = size; +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 46f7b023f69c..ac5de4365e15 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -854,6 +854,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), ++ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI), + SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004), +diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c +index 54c33204541f..ff6fcd9f92f7 100644 +--- a/sound/soc/generic/simple-card.c ++++ b/sound/soc/generic/simple-card.c +@@ -100,7 +100,7 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream, + if (ret && ret != -ENOTSUPP) + goto err; + } +- ++ return 0; + err: + return ret; + } +diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c +index 2a5b3a293cd2..b123734f9fbd 100644 +--- a/sound/soc/sh/rcar/adg.c ++++ b/sound/soc/sh/rcar/adg.c +@@ -437,7 +437,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv, + struct device *dev = rsnd_priv_to_dev(priv); + struct device_node *np = dev->of_node; + u32 ckr, rbgx, rbga, rbgb; +- u32 rate, req_rate, div; ++ u32 rate, req_rate = 0, div; + uint32_t count = 0; + unsigned long req_48kHz_rate, req_441kHz_rate; + int i; +diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c +index deed48ef28b8..362446c36c9e 100644 +--- a/sound/soc/sh/rcar/core.c ++++ b/sound/soc/sh/rcar/core.c +@@ -192,19 +192,16 @@ void rsnd_mod_interrupt(struct rsnd_mod *mod, + struct rsnd_priv *priv = rsnd_mod_to_priv(mod); + struct rsnd_dai_stream *io; + struct rsnd_dai *rdai; +- int i, j; +- +- for_each_rsnd_dai(rdai, priv, j) { ++ int i; + +- for (i = 0; i < RSND_MOD_MAX; i++) { +- io = &rdai->playback; +- if (mod == io->mod[i]) +- callback(mod, io); ++ for_each_rsnd_dai(rdai, priv, i) { ++ io = &rdai->playback; ++ if (mod == io->mod[mod->type]) ++ callback(mod, io); + +- io = &rdai->capture; +- if (mod == io->mod[i]) +- callback(mod, io); +- } ++ io = &rdai->capture; ++ if (mod == io->mod[mod->type]) ++ callback(mod, io); + } + } + +@@ -1019,7 +1016,7 @@ static int rsnd_kctrl_put(struct snd_kcontrol *kctrl, + } + } + +- if (change) ++ if (change && cfg->update) + cfg->update(cfg->io, mod); + + return change; +diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c +index 68b439ed22d7..460d29cbaaa5 100644 +--- a/sound/soc/sh/rcar/src.c ++++ b/sound/soc/sh/rcar/src.c +@@ -691,13 +691,27 @@ static int _rsnd_src_stop_gen2(struct rsnd_mod *mod) + { + rsnd_src_irq_disable_gen2(mod); + +- rsnd_mod_write(mod, SRC_CTRL, 0); ++ /* ++ * stop SRC output only ++ * see rsnd_src_quit_gen2 ++ */ ++ rsnd_mod_write(mod, SRC_CTRL, 0x01); + + rsnd_src_error_record_gen2(mod); + + return rsnd_src_stop(mod); + } + ++static int rsnd_src_quit_gen2(struct rsnd_mod *mod, ++ struct rsnd_dai_stream *io, ++ struct rsnd_priv *priv) ++{ ++ /* stop both out/in */ ++ rsnd_mod_write(mod, SRC_CTRL, 0); ++ ++ return 0; ++} ++ + static void __rsnd_src_interrupt_gen2(struct rsnd_mod *mod, + struct rsnd_dai_stream *io) + { +@@ -971,7 +985,7 @@ static struct rsnd_mod_ops rsnd_src_gen2_ops = { + .probe = rsnd_src_probe_gen2, + .remove = rsnd_src_remove_gen2, + .init = rsnd_src_init_gen2, +- .quit = rsnd_src_quit, ++ .quit = rsnd_src_quit_gen2, + .start = rsnd_src_start_gen2, + .stop = rsnd_src_stop_gen2, + .hw_params = rsnd_src_hw_params, +diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c +index 1427ec21bd7e..c62a2947ac14 100644 +--- a/sound/soc/sh/rcar/ssi.c ++++ b/sound/soc/sh/rcar/ssi.c +@@ -39,6 +39,7 @@ + #define SCKP (1 << 13) /* Serial Bit Clock Polarity */ + #define SWSP (1 << 12) /* Serial WS Polarity */ + #define SDTA (1 << 10) /* Serial Data Alignment */ ++#define PDTA (1 << 9) /* Parallel Data Alignment */ + #define DEL (1 << 8) /* Serial Data Delay */ + #define CKDV(v) (v << 4) /* Serial Clock Division Ratio */ + #define TRMD (1 << 1) /* Transmit/Receive Mode Select */ +@@ -286,7 +287,7 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, + struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); + u32 cr; + +- cr = FORCE; ++ cr = FORCE | PDTA; + + /* + * always use 32bit system word for easy clock calculation. diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.85-86.patch b/patch/kernel/mvebu64-default/03-patch-4.4.85-86.patch new file mode 100644 index 000000000..a20d51922 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.85-86.patch @@ -0,0 +1,393 @@ +diff --git a/Makefile b/Makefile +index 0f3d843f42a7..1207bf6a0e7a 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 85 ++SUBLEVEL = 86 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c +index 4c46c54a3ad7..6638903f0cb9 100644 +--- a/arch/arm64/kernel/fpsimd.c ++++ b/arch/arm64/kernel/fpsimd.c +@@ -157,9 +157,11 @@ void fpsimd_thread_switch(struct task_struct *next) + + void fpsimd_flush_thread(void) + { ++ preempt_disable(); + memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); + fpsimd_flush_task_state(current); + set_thread_flag(TIF_FOREIGN_FPSTATE); ++ preempt_enable(); + } + + /* +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c +index a4b466424a32..7fabf49f2aeb 100644 +--- a/arch/arm64/mm/fault.c ++++ b/arch/arm64/mm/fault.c +@@ -313,8 +313,11 @@ retry: + * signal first. We do not need to release the mmap_sem because it + * would already be released in __lock_page_or_retry in mm/filemap.c. + */ +- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) ++ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { ++ if (!user_mode(regs)) ++ goto no_context; + return 0; ++ } + + /* + * Major/minor page fault accounting is only done on the initial +diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h +index de25aad07853..9016b4b70375 100644 +--- a/arch/x86/include/asm/io.h ++++ b/arch/x86/include/asm/io.h +@@ -304,13 +304,13 @@ static inline unsigned type in##bwl##_p(int port) \ + static inline void outs##bwl(int port, const void *addr, unsigned long count) \ + { \ + asm volatile("rep; outs" #bwl \ +- : "+S"(addr), "+c"(count) : "d"(port)); \ ++ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \ + } \ + \ + static inline void ins##bwl(int port, void *addr, unsigned long count) \ + { \ + asm volatile("rep; ins" #bwl \ +- : "+D"(addr), "+c"(count) : "d"(port)); \ ++ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \ + } + + BUILDIO(b, b, char) +diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c +index cc91ae832ffb..6fd7b50c5747 100644 +--- a/drivers/gpu/drm/i915/intel_uncore.c ++++ b/drivers/gpu/drm/i915/intel_uncore.c +@@ -635,7 +635,8 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) + "enabling oneshot unclaimed register reporting. " + "Please use i915.mmio_debug=N for more information.\n"); + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); +- i915.mmio_debug = mmio_debug_once--; ++ i915.mmio_debug = mmio_debug_once; ++ mmio_debug_once = false; + } + } + +diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c +index f325663c27c5..4b58e8aaf5c5 100644 +--- a/drivers/i2c/busses/i2c-jz4780.c ++++ b/drivers/i2c/busses/i2c-jz4780.c +@@ -786,10 +786,6 @@ static int jz4780_i2c_probe(struct platform_device *pdev) + + jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0); + +- i2c->cmd = 0; +- memset(i2c->cmd_buf, 0, BUFSIZE); +- memset(i2c->data_buf, 0, BUFSIZE); +- + i2c->irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0, + dev_name(&pdev->dev), i2c); +diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c +index 257a9eadd595..4ac6764f4897 100644 +--- a/drivers/net/wireless/p54/fwio.c ++++ b/drivers/net/wireless/p54/fwio.c +@@ -488,7 +488,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell) + + entry += sizeof(__le16); + chan->pa_points_per_curve = 8; +- memset(chan->curve_data, 0, sizeof(*chan->curve_data)); ++ memset(chan->curve_data, 0, sizeof(chan->curve_data)); + memcpy(chan->curve_data, entry, + sizeof(struct p54_pa_curve_data_sample) * + min((u8)8, curve_data->points_per_channel)); +diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c +index 1910100638a2..00602abec0ea 100644 +--- a/drivers/scsi/isci/remote_node_context.c ++++ b/drivers/scsi/isci/remote_node_context.c +@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state) + { + static const char * const strings[] = RNC_STATES; + ++ if (state >= ARRAY_SIZE(strings)) ++ return "UNKNOWN"; ++ + return strings[state]; + } + #undef C +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c +index 0e6aaef9a038..c74f74ab981c 100644 +--- a/drivers/scsi/lpfc/lpfc_els.c ++++ b/drivers/scsi/lpfc/lpfc_els.c +@@ -1054,7 +1054,10 @@ stop_rr_fcf_flogi: + lpfc_sli4_unreg_all_rpis(vport); + } + } +- lpfc_issue_reg_vfi(vport); ++ ++ /* Do not register VFI if the driver aborted FLOGI */ ++ if (!lpfc_error_lost_link(irsp)) ++ lpfc_issue_reg_vfi(vport); + lpfc_nlp_put(ndlp); + goto out; + } +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 6514636431ab..8a9e139e2853 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -153,6 +153,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ + struct sg_device *parentdp; /* owning device */ + wait_queue_head_t read_wait; /* queue read until command done */ + rwlock_t rq_list_lock; /* protect access to list in req_arr */ ++ struct mutex f_mutex; /* protect against changes in this fd */ + int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ + int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ + Sg_scatter_hold reserve; /* buffer held for this file descriptor */ +@@ -166,6 +167,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ + unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ + char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ + char mmap_called; /* 0 -> mmap() never called on this fd */ ++ char res_in_use; /* 1 -> 'reserve' array in use */ + struct kref f_ref; + struct execute_work ew; + } Sg_fd; +@@ -209,7 +211,6 @@ static void sg_remove_sfp(struct kref *); + static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); + static Sg_request *sg_add_request(Sg_fd * sfp); + static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); +-static int sg_res_in_use(Sg_fd * sfp); + static Sg_device *sg_get_dev(int dev); + static void sg_device_destroy(struct kref *kref); + +@@ -625,6 +626,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + } + buf += SZ_SG_HEADER; + __get_user(opcode, buf); ++ mutex_lock(&sfp->f_mutex); + if (sfp->next_cmd_len > 0) { + cmd_size = sfp->next_cmd_len; + sfp->next_cmd_len = 0; /* reset so only this write() effected */ +@@ -633,6 +635,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + if ((opcode >= 0xc0) && old_hdr.twelve_byte) + cmd_size = 12; + } ++ mutex_unlock(&sfp->f_mutex); + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, + "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); + /* Determine buffer size. */ +@@ -732,7 +735,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, + sg_remove_request(sfp, srp); + return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ + } +- if (sg_res_in_use(sfp)) { ++ if (sfp->res_in_use) { + sg_remove_request(sfp, srp); + return -EBUSY; /* reserve buffer already being used */ + } +@@ -902,7 +905,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) + return result; + if (val) { + sfp->low_dma = 1; +- if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { ++ if ((0 == sfp->low_dma) && !sfp->res_in_use) { + val = (int) sfp->reserve.bufflen; + sg_remove_scat(sfp, &sfp->reserve); + sg_build_reserve(sfp, val); +@@ -977,12 +980,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) + return -EINVAL; + val = min_t(int, val, + max_sectors_bytes(sdp->device->request_queue)); ++ mutex_lock(&sfp->f_mutex); + if (val != sfp->reserve.bufflen) { +- if (sg_res_in_use(sfp) || sfp->mmap_called) ++ if (sfp->mmap_called || ++ sfp->res_in_use) { ++ mutex_unlock(&sfp->f_mutex); + return -EBUSY; ++ } ++ + sg_remove_scat(sfp, &sfp->reserve); + sg_build_reserve(sfp, val); + } ++ mutex_unlock(&sfp->f_mutex); + return 0; + case SG_GET_RESERVED_SIZE: + val = min_t(int, sfp->reserve.bufflen, +@@ -1737,13 +1746,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) + md = &map_data; + + if (md) { +- if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) ++ mutex_lock(&sfp->f_mutex); ++ if (dxfer_len <= rsv_schp->bufflen && ++ !sfp->res_in_use) { ++ sfp->res_in_use = 1; + sg_link_reserve(sfp, srp, dxfer_len); +- else { ++ } else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) { ++ mutex_unlock(&sfp->f_mutex); ++ return -EBUSY; ++ } else { + res = sg_build_indirect(req_schp, sfp, dxfer_len); +- if (res) ++ if (res) { ++ mutex_unlock(&sfp->f_mutex); + return res; ++ } + } ++ mutex_unlock(&sfp->f_mutex); + + md->pages = req_schp->pages; + md->page_order = req_schp->page_order; +@@ -2034,6 +2052,8 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) + req_schp->sglist_len = 0; + sfp->save_scat_len = 0; + srp->res_used = 0; ++ /* Called without mutex lock to avoid deadlock */ ++ sfp->res_in_use = 0; + } + + static Sg_request * +@@ -2145,6 +2165,7 @@ sg_add_sfp(Sg_device * sdp) + rwlock_init(&sfp->rq_list_lock); + + kref_init(&sfp->f_ref); ++ mutex_init(&sfp->f_mutex); + sfp->timeout = SG_DEFAULT_TIMEOUT; + sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; + sfp->force_packid = SG_DEF_FORCE_PACK_ID; +@@ -2220,20 +2241,6 @@ sg_remove_sfp(struct kref *kref) + schedule_work(&sfp->ew.work); + } + +-static int +-sg_res_in_use(Sg_fd * sfp) +-{ +- const Sg_request *srp; +- unsigned long iflags; +- +- read_lock_irqsave(&sfp->rq_list_lock, iflags); +- for (srp = sfp->headrp; srp; srp = srp->nextrp) +- if (srp->res_used) +- break; +- read_unlock_irqrestore(&sfp->rq_list_lock, iflags); +- return srp ? 1 : 0; +-} +- + #ifdef CONFIG_SCSI_PROC_FS + static int + sg_idr_max_id(int id, void *p, void *data) +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 9c62a6f9757a..600c67ef8a03 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -108,7 +108,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { + }, + }; + +-const u64 const btrfs_raid_group[BTRFS_NR_RAID_TYPES] = { ++const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = { + [BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10, + [BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1, + [BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP, +diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h +index 782d4e814e21..4bc4b1b13193 100644 +--- a/include/linux/lightnvm.h ++++ b/include/linux/lightnvm.h +@@ -310,6 +310,7 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, + { + struct ppa_addr l; + ++ l.ppa = 0; + /* + * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. + */ +diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c +index 7080ae1eb6c1..f850e906564b 100644 +--- a/kernel/gcov/base.c ++++ b/kernel/gcov/base.c +@@ -98,6 +98,12 @@ void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters) + } + EXPORT_SYMBOL(__gcov_merge_icall_topn); + ++void __gcov_exit(void) ++{ ++ /* Unused. */ ++} ++EXPORT_SYMBOL(__gcov_exit); ++ + /** + * gcov_enable_events - enable event reporting through gcov_event() + * +diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c +index e25e92fb44fa..46a18e72bce6 100644 +--- a/kernel/gcov/gcc_4_7.c ++++ b/kernel/gcov/gcc_4_7.c +@@ -18,7 +18,9 @@ + #include + #include "gcov.h" + +-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1 ++#if (__GNUC__ >= 7) ++#define GCOV_COUNTERS 9 ++#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) + #define GCOV_COUNTERS 10 + #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 + #define GCOV_COUNTERS 9 +diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c +index 74177189063c..d3125c169684 100644 +--- a/sound/pci/au88x0/au88x0_core.c ++++ b/sound/pci/au88x0/au88x0_core.c +@@ -2150,8 +2150,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + stream->resources, en, + VORTEX_RESOURCE_SRC)) < 0) { + memset(stream->resources, 0, +- sizeof(unsigned char) * +- VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + return -EBUSY; + } + if (stream->type != VORTEX_PCM_A3D) { +@@ -2161,7 +2160,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + VORTEX_RESOURCE_MIXIN)) < 0) { + memset(stream->resources, + 0, +- sizeof(unsigned char) * VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + return -EBUSY; + } + } +@@ -2174,8 +2173,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + stream->resources, en, + VORTEX_RESOURCE_A3D)) < 0) { + memset(stream->resources, 0, +- sizeof(unsigned char) * +- VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + dev_err(vortex->card->dev, + "out of A3D sources. Sorry\n"); + return -EBUSY; +@@ -2289,8 +2287,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + VORTEX_RESOURCE_MIXOUT)) + < 0) { + memset(stream->resources, 0, +- sizeof(unsigned char) * +- VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + return -EBUSY; + } + if ((src[i] = +@@ -2298,8 +2295,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + stream->resources, en, + VORTEX_RESOURCE_SRC)) < 0) { + memset(stream->resources, 0, +- sizeof(unsigned char) * +- VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + return -EBUSY; + } + } diff --git a/patch/kernel/mvebu64-default/03-patch-4.4.86-87.patch b/patch/kernel/mvebu64-default/03-patch-4.4.86-87.patch new file mode 100644 index 000000000..5292853d3 --- /dev/null +++ b/patch/kernel/mvebu64-default/03-patch-4.4.86-87.patch @@ -0,0 +1,408 @@ +diff --git a/Makefile b/Makefile +index 1207bf6a0e7a..f6838187b568 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 86 ++SUBLEVEL = 87 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h +index 4cb4b6d3452c..0bc66e1d3a7e 100644 +--- a/arch/alpha/include/asm/types.h ++++ b/arch/alpha/include/asm/types.h +@@ -1,6 +1,6 @@ + #ifndef _ALPHA_TYPES_H + #define _ALPHA_TYPES_H + +-#include ++#include + + #endif /* _ALPHA_TYPES_H */ +diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h +index 9fd3cd459777..8d1024d7be05 100644 +--- a/arch/alpha/include/uapi/asm/types.h ++++ b/arch/alpha/include/uapi/asm/types.h +@@ -9,8 +9,18 @@ + * need to be careful to avoid a name clashes. + */ + +-#ifndef __KERNEL__ ++/* ++ * This is here because we used to use l64 for alpha ++ * and we don't want to impact user mode with our change to ll64 ++ * in the kernel. ++ * ++ * However, some user programs are fine with this. They can ++ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here. ++ */ ++#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__) + #include ++#else ++#include + #endif + + #endif /* _UAPI_ALPHA_TYPES_H */ +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c +index ba079e279b58..e8835d4e173c 100644 +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -824,24 +824,25 @@ void stage2_unmap_vm(struct kvm *kvm) + * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all + * underlying level-2 and level-3 tables before freeing the actual level-1 table + * and setting the struct pointer to NULL. +- * +- * Note we don't need locking here as this is only called when the VM is +- * destroyed, which can only be done once. + */ + void kvm_free_stage2_pgd(struct kvm *kvm) + { +- if (kvm->arch.pgd == NULL) +- return; ++ void *pgd = NULL; ++ void *hwpgd = NULL; + + spin_lock(&kvm->mmu_lock); +- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); ++ if (kvm->arch.pgd) { ++ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); ++ pgd = READ_ONCE(kvm->arch.pgd); ++ hwpgd = kvm_get_hwpgd(kvm); ++ kvm->arch.pgd = NULL; ++ } + spin_unlock(&kvm->mmu_lock); + +- kvm_free_hwpgd(kvm_get_hwpgd(kvm)); +- if (KVM_PREALLOC_LEVEL > 0) +- kfree(kvm->arch.pgd); +- +- kvm->arch.pgd = NULL; ++ if (hwpgd) ++ kvm_free_hwpgd(hwpgd); ++ if (KVM_PREALLOC_LEVEL > 0 && pgd) ++ kfree(pgd); + } + + static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c +index f5e9f9310b48..b3b0004ea8ac 100644 +--- a/crypto/algif_skcipher.c ++++ b/crypto/algif_skcipher.c +@@ -86,8 +86,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) + } + sgl = sreq->tsg; + n = sg_nents(sgl); +- for_each_sg(sgl, sg, n, i) +- put_page(sg_page(sg)); ++ for_each_sg(sgl, sg, n, i) { ++ struct page *page = sg_page(sg); ++ ++ /* some SGs may not have a page mapped */ ++ if (page && atomic_read(&page->_count)) ++ put_page(page); ++ } + + kfree(sreq->tsg); + } +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c +index 025c429050c0..5d8dfe027b30 100644 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c +@@ -612,7 +612,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, + } else { + pr_err("Failed to fill pool (%p)\n", pool); + /* If we have any pages left put them to the pool. */ +- list_for_each_entry(p, &pool->list, lru) { ++ list_for_each_entry(p, &new_pages, lru) { + ++cpages; + } + list_splice(&new_pages, &pool->list); +diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c +index 7ba795b24e75..639d1a9c8793 100644 +--- a/drivers/i2c/busses/i2c-ismt.c ++++ b/drivers/i2c/busses/i2c-ismt.c +@@ -339,8 +339,10 @@ static int ismt_process_desc(const struct ismt_desc *desc, + break; + case I2C_SMBUS_BLOCK_DATA: + case I2C_SMBUS_I2C_BLOCK_DATA: +- memcpy(&data->block[1], dma_buffer, desc->rxbytes); +- data->block[0] = desc->rxbytes; ++ if (desc->rxbytes != dma_buffer[0] + 1) ++ return -EMSGSIZE; ++ ++ memcpy(data->block, dma_buffer, desc->rxbytes); + break; + } + return 0; +diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c +index 9e17ef27a183..6f1dbd52ec91 100644 +--- a/drivers/irqchip/irq-mips-gic.c ++++ b/drivers/irqchip/irq-mips-gic.c +@@ -915,8 +915,11 @@ static int __init gic_of_init(struct device_node *node, + gic_len = resource_size(&res); + } + +- if (mips_cm_present()) ++ if (mips_cm_present()) { + write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); ++ /* Ensure GIC region is enabled before trying to access it */ ++ __sync(); ++ } + gic_present = true; + + __gic_init(gic_base, gic_len, cpu_vec, 0, node); +diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c +index cd4777954f87..9bee3f11898a 100644 +--- a/drivers/net/wireless/ti/wl1251/main.c ++++ b/drivers/net/wireless/ti/wl1251/main.c +@@ -1567,6 +1567,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void) + + wl->state = WL1251_STATE_OFF; + mutex_init(&wl->mutex); ++ spin_lock_init(&wl->wl_lock); + + wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE; + wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE; +diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c +index c6a1ec110c01..22bae2b434e2 100644 +--- a/fs/ceph/addr.c ++++ b/fs/ceph/addr.c +@@ -189,7 +189,7 @@ static int ceph_releasepage(struct page *page, gfp_t g) + /* + * read a single page, without unlocking it. + */ +-static int readpage_nounlock(struct file *filp, struct page *page) ++static int ceph_do_readpage(struct file *filp, struct page *page) + { + struct inode *inode = file_inode(filp); + struct ceph_inode_info *ci = ceph_inode(inode); +@@ -219,7 +219,7 @@ static int readpage_nounlock(struct file *filp, struct page *page) + + err = ceph_readpage_from_fscache(inode, page); + if (err == 0) +- goto out; ++ return -EINPROGRESS; + + dout("readpage inode %p file %p page %p index %lu\n", + inode, filp, page, page->index); +@@ -249,8 +249,11 @@ out: + + static int ceph_readpage(struct file *filp, struct page *page) + { +- int r = readpage_nounlock(filp, page); +- unlock_page(page); ++ int r = ceph_do_readpage(filp, page); ++ if (r != -EINPROGRESS) ++ unlock_page(page); ++ else ++ r = 0; + return r; + } + +@@ -1094,7 +1097,7 @@ retry_locked: + goto retry_locked; + r = writepage_nounlock(page, NULL); + if (r < 0) +- goto fail_nosnap; ++ goto fail_unlock; + goto retry_locked; + } + +@@ -1122,11 +1125,14 @@ retry_locked: + } + + /* we need to read it. */ +- r = readpage_nounlock(file, page); +- if (r < 0) +- goto fail_nosnap; ++ r = ceph_do_readpage(file, page); ++ if (r < 0) { ++ if (r == -EINPROGRESS) ++ return -EAGAIN; ++ goto fail_unlock; ++ } + goto retry_locked; +-fail_nosnap: ++fail_unlock: + unlock_page(page); + return r; + } +diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c +index a4766ded1ba7..ff1cfd7b1083 100644 +--- a/fs/ceph/cache.c ++++ b/fs/ceph/cache.c +@@ -224,13 +224,7 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) + fscache_relinquish_cookie(cookie, 0); + } + +-static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) +-{ +- if (!error) +- SetPageUptodate(page); +-} +- +-static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error) ++static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error) + { + if (!error) + SetPageUptodate(page); +@@ -259,7 +253,7 @@ int ceph_readpage_from_fscache(struct inode *inode, struct page *page) + return -ENOBUFS; + + ret = fscache_read_or_alloc_page(ci->fscache, page, +- ceph_vfs_readpage_complete, NULL, ++ ceph_readpage_from_fscache_complete, NULL, + GFP_KERNEL); + + switch (ret) { +@@ -288,7 +282,7 @@ int ceph_readpages_from_fscache(struct inode *inode, + return -ENOBUFS; + + ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages, +- ceph_vfs_readpage_complete_unlock, ++ ceph_readpage_from_fscache_complete, + NULL, mapping_gfp_mask(mapping)); + + switch (ret) { +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index fa8df3fef6fc..297e05c9e2b0 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -194,7 +194,7 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon) + int i; + + if (unlikely(direntry->d_name.len > +- tcon->fsAttrInfo.MaxPathNameComponentLength)) ++ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength))) + return -ENAMETOOLONG; + + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h +index b8f553b32dda..aacb15bd56fe 100644 +--- a/fs/cifs/smb2pdu.h ++++ b/fs/cifs/smb2pdu.h +@@ -82,8 +82,8 @@ + + #define NUMBER_OF_SMB2_COMMANDS 0x0013 + +-/* BB FIXME - analyze following length BB */ +-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */ ++/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */ ++#define MAX_SMB2_HDR_SIZE 0x00b0 + + #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) + +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 1e009cad8d5c..1b08556776ce 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -518,8 +518,13 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq) + wait_queue_head_t *whead; + + rcu_read_lock(); +- /* If it is cleared by POLLFREE, it should be rcu-safe */ +- whead = rcu_dereference(pwq->whead); ++ /* ++ * If it is cleared by POLLFREE, it should be rcu-safe. ++ * If we read NULL we need a barrier paired with ++ * smp_store_release() in ep_poll_callback(), otherwise ++ * we rely on whead->lock. ++ */ ++ whead = smp_load_acquire(&pwq->whead); + if (whead) + remove_wait_queue(whead, &pwq->wait); + rcu_read_unlock(); +@@ -1003,17 +1008,6 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k + struct epitem *epi = ep_item_from_wait(wait); + struct eventpoll *ep = epi->ep; + +- if ((unsigned long)key & POLLFREE) { +- ep_pwq_from_wait(wait)->whead = NULL; +- /* +- * whead = NULL above can race with ep_remove_wait_queue() +- * which can do another remove_wait_queue() after us, so we +- * can't use __remove_wait_queue(). whead->lock is held by +- * the caller. +- */ +- list_del_init(&wait->task_list); +- } +- + spin_lock_irqsave(&ep->lock, flags); + + /* +@@ -1078,6 +1072,23 @@ out_unlock: + if (pwake) + ep_poll_safewake(&ep->poll_wait); + ++ ++ if ((unsigned long)key & POLLFREE) { ++ /* ++ * If we race with ep_remove_wait_queue() it can miss ++ * ->whead = NULL and do another remove_wait_queue() after ++ * us, so we can't use __remove_wait_queue(). ++ */ ++ list_del_init(&wait->task_list); ++ /* ++ * ->whead != NULL protects us from the race with ep_free() ++ * or ep_remove(), ep_remove_wait_queue() takes whead->lock ++ * held by the caller. Once we nullify it, nothing protects ++ * ep/epi or even wait. ++ */ ++ smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL); ++ } ++ + return 1; + } + +diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h +index fc824e2828f3..5d2add1a6c96 100644 +--- a/include/asm-generic/topology.h ++++ b/include/asm-generic/topology.h +@@ -48,7 +48,11 @@ + #define parent_node(node) ((void)(node),0) + #endif + #ifndef cpumask_of_node +-#define cpumask_of_node(node) ((void)node, cpu_online_mask) ++ #ifdef CONFIG_NEED_MULTIPLE_NODES ++ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) ++ #else ++ #define cpumask_of_node(node) ((void)node, cpu_online_mask) ++ #endif + #endif + #ifndef pcibus_to_node + #define pcibus_to_node(bus) ((void)(bus), -1) +diff --git a/kernel/cpuset.c b/kernel/cpuset.c +index 8ccd66a97c8b..2924b6faa469 100644 +--- a/kernel/cpuset.c ++++ b/kernel/cpuset.c +@@ -1910,6 +1910,7 @@ static struct cftype files[] = { + { + .name = "memory_pressure", + .read_u64 = cpuset_read_u64, ++ .private = FILE_MEMORY_PRESSURE, + }, + + { +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 77055a362041..0e01250f2072 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -3275,9 +3275,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, + struct xfrm_state *x_new[XFRM_MAX_DEPTH]; + struct xfrm_migrate *mp; + ++ /* Stage 0 - sanity checks */ + if ((err = xfrm_migrate_check(m, num_migrate)) < 0) + goto out; + ++ if (dir >= XFRM_POLICY_MAX) { ++ err = -EINVAL; ++ goto out; ++ } ++ + /* Stage 1 - find policy */ + if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { + err = -ENOENT; diff --git a/patch/kernel/rockchip-default/03-patch-4.4.84-85.patch b/patch/kernel/rockchip-default/03-patch-4.4.84-85.patch new file mode 100644 index 000000000..d88ef90b4 --- /dev/null +++ b/patch/kernel/rockchip-default/03-patch-4.4.84-85.patch @@ -0,0 +1,1498 @@ +diff --git a/Makefile b/Makefile +index 9d77ac063ec0..0f3d843f42a7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 84 ++SUBLEVEL = 85 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h +index 210ef3e72332..0ddd7144c492 100644 +--- a/arch/arc/include/asm/cache.h ++++ b/arch/arc/include/asm/cache.h +@@ -88,7 +88,9 @@ extern int ioc_exists; + #define ARC_REG_SLC_FLUSH 0x904 + #define ARC_REG_SLC_INVALIDATE 0x905 + #define ARC_REG_SLC_RGN_START 0x914 ++#define ARC_REG_SLC_RGN_START1 0x915 + #define ARC_REG_SLC_RGN_END 0x916 ++#define ARC_REG_SLC_RGN_END1 0x917 + + /* Bit val in SLC_CONTROL */ + #define SLC_CTRL_IM 0x040 +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c +index d81b6d7e11e7..9a84cbdd44b0 100644 +--- a/arch/arc/mm/cache.c ++++ b/arch/arc/mm/cache.c +@@ -543,6 +543,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) + static DEFINE_SPINLOCK(lock); + unsigned long flags; + unsigned int ctrl; ++ phys_addr_t end; + + spin_lock_irqsave(&lock, flags); + +@@ -572,8 +573,16 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) + * END needs to be setup before START (latter triggers the operation) + * END can't be same as START, so add (l2_line_sz - 1) to sz + */ +- write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1)); +- write_aux_reg(ARC_REG_SLC_RGN_START, paddr); ++ end = paddr + sz + l2_line_sz - 1; ++ if (is_pae40_enabled()) ++ write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end)); ++ ++ write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end)); ++ ++ if (is_pae40_enabled()) ++ write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr)); ++ ++ write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr)); + + while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); + +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c +index eac4f3b02df9..bb81cd05f0bc 100644 +--- a/drivers/acpi/apei/ghes.c ++++ b/drivers/acpi/apei/ghes.c +@@ -1067,6 +1067,7 @@ static int ghes_remove(struct platform_device *ghes_dev) + if (list_empty(&ghes_sci)) + unregister_acpi_hed_notifier(&ghes_notifier_sci); + mutex_unlock(&ghes_list_mutex); ++ synchronize_rcu(); + break; + case ACPI_HEST_NOTIFY_NMI: + ghes_nmi_remove(ghes); +diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c +index ccdc8db16bb8..fa2cf2dc4e33 100644 +--- a/drivers/acpi/ioapic.c ++++ b/drivers/acpi/ioapic.c +@@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data) + struct resource *res = data; + struct resource_win win; + ++ /* ++ * We might assign this to 'res' later, make sure all pointers are ++ * cleared before the resource is added to the global list ++ */ ++ memset(&win, 0, sizeof(win)); ++ + res->flags = 0; + if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0) + return AE_OK; +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index 47ddfefe2443..5531f020e561 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -1718,8 +1718,12 @@ static void binder_transaction(struct binder_proc *proc, + list_add_tail(&t->work.entry, target_list); + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; + list_add_tail(&tcomplete->entry, &thread->todo); +- if (target_wait) +- wake_up_interruptible(target_wait); ++ if (target_wait) { ++ if (reply || !(t->flags & TF_ONE_WAY)) ++ wake_up_interruptible_sync(target_wait); ++ else ++ wake_up_interruptible(target_wait); ++ } + return; + + err_get_unused_fd_failed: +@@ -2865,7 +2869,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) + const char *failure_string; + struct binder_buffer *buffer; + +- if (proc->tsk != current) ++ if (proc->tsk != current->group_leader) + return -EINVAL; + + if ((vma->vm_end - vma->vm_start) > SZ_4M) +@@ -2966,8 +2970,8 @@ static int binder_open(struct inode *nodp, struct file *filp) + proc = kzalloc(sizeof(*proc), GFP_KERNEL); + if (proc == NULL) + return -ENOMEM; +- get_task_struct(current); +- proc->tsk = current; ++ get_task_struct(current->group_leader); ++ proc->tsk = current->group_leader; + INIT_LIST_HEAD(&proc->todo); + init_waitqueue_head(&proc->wait); + proc->default_priority = task_nice(current); +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c +index 6253775b8d9c..50d74e5ce41b 100644 +--- a/drivers/gpu/drm/drm_atomic.c ++++ b/drivers/gpu/drm/drm_atomic.c +@@ -1247,6 +1247,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state) + if (config->funcs->atomic_check) + ret = config->funcs->atomic_check(state->dev, state); + ++ if (ret) ++ return ret; ++ + if (!state->allow_modeset) { + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) { +@@ -1257,7 +1260,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state) + } + } + +- return ret; ++ return 0; + } + EXPORT_SYMBOL(drm_atomic_check_only); + +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index b205224f1a44..9147113139be 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -715,13 +715,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) + struct drm_gem_object *obj = ptr; + struct drm_device *dev = obj->dev; + ++ if (dev->driver->gem_close_object) ++ dev->driver->gem_close_object(obj, file_priv); ++ + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_gem_remove_prime_handles(obj, file_priv); + drm_vma_node_revoke(&obj->vma_node, file_priv->filp); + +- if (dev->driver->gem_close_object) +- dev->driver->gem_close_object(obj, file_priv); +- + drm_gem_object_handle_unreference_unlocked(obj); + + return 0; +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +index 9255b9c096b6..9befd624a5f0 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +@@ -148,8 +148,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) + rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0); + + /* Signal polarities */ +- value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL) +- | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL) ++ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0) ++ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0) + | DSMR_DIPM_DE | DSMR_CSPM; + rcar_du_crtc_write(rcrtc, DSMR, value); + +@@ -171,7 +171,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) + mode->crtc_vsync_start - 1); + rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1); + +- rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start); ++ rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1); + rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); + } + +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c +index 46429c4be8e5..2b75a4891dec 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c +@@ -642,13 +642,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, + } + + ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector); +- of_node_put(encoder); +- of_node_put(connector); +- + if (ret && ret != -EPROBE_DEFER) + dev_warn(rcdu->dev, +- "failed to initialize encoder %s (%d), skipping\n", +- encoder->full_name, ret); ++ "failed to initialize encoder %s on output %u (%d), skipping\n", ++ of_node_full_name(encoder), output, ret); ++ ++ of_node_put(encoder); ++ of_node_put(connector); + + return ret; + } +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +index 85043c5bad03..873e04aa9352 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +@@ -56,11 +56,11 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, + return ret; + + /* PLL clock configuration */ +- if (freq <= 38000) ++ if (freq < 39000) + pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M; +- else if (freq <= 60000) ++ else if (freq < 61000) + pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M; +- else if (freq <= 121000) ++ else if (freq < 121000) + pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M; + else + pllcr = LVDPLLCR_PLLDLYCNT_150M; +@@ -102,7 +102,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, + /* Turn the PLL on, wait for the startup delay, and turn the output + * on. + */ +- lvdcr0 |= LVDCR0_PLLEN; ++ lvdcr0 |= LVDCR0_PLLON; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + + usleep_range(100, 150); +diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h +index 77cf9289ab65..b1eafd097a79 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h ++++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h +@@ -18,7 +18,7 @@ + #define LVDCR0_DMD (1 << 12) + #define LVDCR0_LVMD_MASK (0xf << 8) + #define LVDCR0_LVMD_SHIFT 8 +-#define LVDCR0_PLLEN (1 << 4) ++#define LVDCR0_PLLON (1 << 4) + #define LVDCR0_BEN (1 << 2) + #define LVDCR0_LVEN (1 << 1) + #define LVDCR0_LVRES (1 << 0) +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c +index 6b00061c3746..a2ae2213ef3e 100644 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c +@@ -294,7 +294,7 @@ static void dw_i2c_plat_complete(struct device *dev) + #endif + + #ifdef CONFIG_PM +-static int dw_i2c_plat_suspend(struct device *dev) ++static int dw_i2c_plat_runtime_suspend(struct device *dev) + { + struct platform_device *pdev = to_platform_device(dev); + struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); +@@ -318,11 +318,21 @@ static int dw_i2c_plat_resume(struct device *dev) + return 0; + } + ++#ifdef CONFIG_PM_SLEEP ++static int dw_i2c_plat_suspend(struct device *dev) ++{ ++ pm_runtime_resume(dev); ++ return dw_i2c_plat_runtime_suspend(dev); ++} ++#endif ++ + static const struct dev_pm_ops dw_i2c_dev_pm_ops = { + .prepare = dw_i2c_plat_prepare, + .complete = dw_i2c_plat_complete, + SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) +- SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL) ++ SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, ++ dw_i2c_plat_resume, ++ NULL) + }; + + #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) +diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +index 0a86ef43e781..a8db38db622e 100644 +--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c ++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +@@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) + s32 poll_value = 0; + + if (state) { +- if (!atomic_read(&st->user_requested_state)) +- return 0; + if (sensor_hub_device_open(st->hsdev)) + return -EIO; + +@@ -84,6 +82,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) + &report_val); + } + ++ pr_debug("HID_SENSOR %s set power_state %d report_state %d\n", ++ st->pdev->name, state_val, report_val); ++ + sensor_hub_get_feature(st->hsdev, st->power_state.report_id, + st->power_state.index, + sizeof(state_val), &state_val); +@@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) + ret = pm_runtime_get_sync(&st->pdev->dev); + else { + pm_runtime_mark_last_busy(&st->pdev->dev); ++ pm_runtime_use_autosuspend(&st->pdev->dev); + ret = pm_runtime_put_autosuspend(&st->pdev->dev); + } + if (ret < 0) { +@@ -175,8 +177,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, + /* Default to 3 seconds, but can be changed from sysfs */ + pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, + 3000); +- pm_runtime_use_autosuspend(&attrb->pdev->dev); +- + return ret; + error_unreg_trigger: + iio_trigger_unregister(trig); +diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c +index 2485b88ee1b6..1880105cc8c4 100644 +--- a/drivers/iio/imu/adis16480.c ++++ b/drivers/iio/imu/adis16480.c +@@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { + .gyro_max_val = IIO_RAD_TO_DEGREE(22500), + .gyro_max_scale = 450, + .accel_max_val = IIO_M_S_2_TO_G(12500), +- .accel_max_scale = 5, ++ .accel_max_scale = 10, + }, + [ADIS16485] = { + .channels = adis16485_channels, +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c +index 98d4e515587a..681dce15fbc8 100644 +--- a/drivers/input/mouse/elan_i2c_core.c ++++ b/drivers/input/mouse/elan_i2c_core.c +@@ -1234,6 +1234,7 @@ static const struct acpi_device_id elan_acpi_id[] = { + { "ELAN0000", 0 }, + { "ELAN0100", 0 }, + { "ELAN0600", 0 }, ++ { "ELAN0602", 0 }, + { "ELAN0605", 0 }, + { "ELAN0608", 0 }, + { "ELAN0605", 0 }, +diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c +index 354d47ecd66a..ce6ff9b301bb 100644 +--- a/drivers/input/mouse/trackpoint.c ++++ b/drivers/input/mouse/trackpoint.c +@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir + if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) + return -1; + +- if (param[0] != TP_MAGIC_IDENT) ++ /* add new TP ID. */ ++ if (!(param[0] & TP_MAGIC_IDENT)) + return -1; + + if (firmware_id) +diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h +index 5617ed3a7d7a..88055755f82e 100644 +--- a/drivers/input/mouse/trackpoint.h ++++ b/drivers/input/mouse/trackpoint.h +@@ -21,8 +21,9 @@ + #define TP_COMMAND 0xE2 /* Commands start with this */ + + #define TP_READ_ID 0xE1 /* Sent for device identification */ +-#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */ ++#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */ + /* by the firmware ID */ ++ /* Firmware ID includes 0x1, 0x2, 0x3 */ + + + /* +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h +index a2661381ddfc..d2774197fe58 100644 +--- a/drivers/misc/mei/hw-me-regs.h ++++ b/drivers/misc/mei/hw-me-regs.h +@@ -125,6 +125,11 @@ + #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ + #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ + ++#define MEI_DEV_ID_LBG 0xA1BA /* Lewisburg (SPT) */ ++ ++#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ ++#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ ++ + /* + * MEI HW Section + */ +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 01e20384ac44..adab5bbb642a 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -86,10 +86,14 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)}, + ++ {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)}, ++ + /* required last entry */ + {0, } + }; +diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c +index ecc6fb9ca92f..3bbdf60f8908 100644 +--- a/drivers/ntb/ntb_transport.c ++++ b/drivers/ntb/ntb_transport.c +@@ -599,7 +599,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, + if (!mw->virt_addr) + return -ENOMEM; + +- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) ++ if (mw_num < qp_count % mw_count) + num_qps_mw = qp_count / mw_count + 1; + else + num_qps_mw = qp_count / mw_count; +@@ -947,7 +947,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, + qp->event_handler = NULL; + ntb_qp_link_down_reset(qp); + +- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) ++ if (mw_num < qp_count % mw_count) + num_qps_mw = qp_count / mw_count + 1; + else + num_qps_mw = qp_count / mw_count; +@@ -1065,8 +1065,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) + qp_count = ilog2(qp_bitmap); + if (max_num_clients && max_num_clients < qp_count) + qp_count = max_num_clients; +- else if (mw_count < qp_count) +- qp_count = mw_count; ++ else if (nt->mw_count < qp_count) ++ qp_count = nt->mw_count; + + qp_bitmap &= BIT_ULL(qp_count) - 1; + +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +index 02c3feef4e36..c2d2c17550a7 100644 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +@@ -49,6 +49,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ + {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ ++ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ + {} /* Terminating entry */ + }; + +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index 26a3b389a265..fa8df3fef6fc 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -183,15 +183,20 @@ cifs_bp_rename_retry: + } + + /* ++ * Don't allow path components longer than the server max. + * Don't allow the separator character in a path component. + * The VFS will not allow "/", but "\" is allowed by posix. + */ + static int +-check_name(struct dentry *direntry) ++check_name(struct dentry *direntry, struct cifs_tcon *tcon) + { + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); + int i; + ++ if (unlikely(direntry->d_name.len > ++ tcon->fsAttrInfo.MaxPathNameComponentLength)) ++ return -ENAMETOOLONG; ++ + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { + for (i = 0; i < direntry->d_name.len; i++) { + if (direntry->d_name.name[i] == '\\') { +@@ -489,10 +494,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, + return finish_no_open(file, res); + } + +- rc = check_name(direntry); +- if (rc) +- return rc; +- + xid = get_xid(); + + cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", +@@ -505,6 +506,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, + } + + tcon = tlink_tcon(tlink); ++ ++ rc = check_name(direntry, tcon); ++ if (rc) ++ goto out_free_xid; ++ + server = tcon->ses->server; + + if (server->ops->new_lease_key) +@@ -765,7 +771,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, + } + pTcon = tlink_tcon(tlink); + +- rc = check_name(direntry); ++ rc = check_name(direntry, pTcon); + if (rc) + goto lookup_out; + +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index f4afa3b1cc56..6c484ddf26a9 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -2768,8 +2768,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, + kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * + le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); + kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); +- kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits); +- kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); ++ kst->f_bfree = kst->f_bavail = ++ le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); + return; + } + +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index 3f68a25f2169..544672b440de 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -129,7 +129,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp) + argp->p = page_address(argp->pagelist[0]); + argp->pagelist++; + if (argp->pagelen < PAGE_SIZE) { +- argp->end = argp->p + (argp->pagelen>>2); ++ argp->end = argp->p + XDR_QUADLEN(argp->pagelen); + argp->pagelen = 0; + } else { + argp->end = argp->p + (PAGE_SIZE>>2); +@@ -1246,9 +1246,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) + argp->pagelen -= pages * PAGE_SIZE; + len -= pages * PAGE_SIZE; + +- argp->p = (__be32 *)page_address(argp->pagelist[0]); +- argp->pagelist++; +- argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE); ++ next_decode_page(argp); + } + argp->p += XDR_QUADLEN(len); + +diff --git a/include/net/ip.h b/include/net/ip.h +index b450d8653b30..7476bb10ff37 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -314,7 +314,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, + !forwarding) + return dst_mtu(dst); + +- return min(dst->dev->mtu, IP_MAX_MTU); ++ return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); + } + + static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) +@@ -327,7 +327,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) + return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); + } + +- return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); ++ return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); + } + + u32 ip_idents_reserve(u32 hash, int segs); +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index e5bba897d206..7a5d6a073165 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -717,8 +717,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, + old = *pold; + *pold = new; + if (old != NULL) { +- qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); ++ unsigned int qlen = old->q.qlen; ++ unsigned int backlog = old->qstats.backlog; ++ + qdisc_reset(old); ++ qdisc_tree_reduce_backlog(old, qlen, backlog); + } + sch_tree_unlock(sch); + +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 784ab8fe8714..3697063dd09a 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -8473,28 +8473,27 @@ SYSCALL_DEFINE5(perf_event_open, + goto err_context; + + /* +- * Do not allow to attach to a group in a different +- * task or CPU context: ++ * Make sure we're both events for the same CPU; ++ * grouping events for different CPUs is broken; since ++ * you can never concurrently schedule them anyhow. + */ +- if (move_group) { +- /* +- * Make sure we're both on the same task, or both +- * per-cpu events. +- */ +- if (group_leader->ctx->task != ctx->task) +- goto err_context; ++ if (group_leader->cpu != event->cpu) ++ goto err_context; + +- /* +- * Make sure we're both events for the same CPU; +- * grouping events for different CPUs is broken; since +- * you can never concurrently schedule them anyhow. +- */ +- if (group_leader->cpu != event->cpu) +- goto err_context; +- } else { +- if (group_leader->ctx != ctx) +- goto err_context; +- } ++ /* ++ * Make sure we're both on the same task, or both ++ * per-CPU events. ++ */ ++ if (group_leader->ctx->task != ctx->task) ++ goto err_context; ++ ++ /* ++ * Do not allow to attach to a group in a different task ++ * or CPU context. If we're moving SW events, we'll fix ++ * this up later, so allow that. ++ */ ++ if (!move_group && group_leader->ctx != ctx) ++ goto err_context; + + /* + * Only a group leader can be exclusive or pinned +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c +index 6816302542b2..f0e5408499b6 100644 +--- a/kernel/trace/trace_events_filter.c ++++ b/kernel/trace/trace_events_filter.c +@@ -1979,6 +1979,10 @@ static int create_filter(struct trace_event_call *call, + if (err && set_str) + append_filter_err(ps, filter); + } ++ if (err && !set_str) { ++ free_event_filter(filter); ++ filter = NULL; ++ } + create_filter_finish(ps); + + *filterp = filter; +diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c +index 1641367e54ca..69f56073b337 100644 +--- a/net/bluetooth/bnep/core.c ++++ b/net/bluetooth/bnep/core.c +@@ -484,16 +484,16 @@ static int bnep_session(void *arg) + struct net_device *dev = s->dev; + struct sock *sk = s->sock->sk; + struct sk_buff *skb; +- wait_queue_t wait; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + + BT_DBG(""); + + set_user_nice(current, -15); + +- init_waitqueue_entry(&wait, current); + add_wait_queue(sk_sleep(sk), &wait); + while (1) { +- set_current_state(TASK_INTERRUPTIBLE); ++ /* Ensure session->terminate is updated */ ++ smp_mb__before_atomic(); + + if (atomic_read(&s->terminate)) + break; +@@ -515,9 +515,8 @@ static int bnep_session(void *arg) + break; + netif_wake_queue(dev); + +- schedule(); ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } +- __set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + + /* Cleanup session */ +@@ -663,7 +662,7 @@ int bnep_del_connection(struct bnep_conndel_req *req) + s = __bnep_get_session(req->dst); + if (s) { + atomic_inc(&s->terminate); +- wake_up_process(s->task); ++ wake_up_interruptible(sk_sleep(s->sock->sk)); + } else + err = -ENOENT; + +diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c +index 298ed37010e6..3a39fd523e40 100644 +--- a/net/bluetooth/cmtp/core.c ++++ b/net/bluetooth/cmtp/core.c +@@ -281,16 +281,16 @@ static int cmtp_session(void *arg) + struct cmtp_session *session = arg; + struct sock *sk = session->sock->sk; + struct sk_buff *skb; +- wait_queue_t wait; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + + BT_DBG("session %p", session); + + set_user_nice(current, -15); + +- init_waitqueue_entry(&wait, current); + add_wait_queue(sk_sleep(sk), &wait); + while (1) { +- set_current_state(TASK_INTERRUPTIBLE); ++ /* Ensure session->terminate is updated */ ++ smp_mb__before_atomic(); + + if (atomic_read(&session->terminate)) + break; +@@ -307,9 +307,8 @@ static int cmtp_session(void *arg) + + cmtp_process_transmit(session); + +- schedule(); ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } +- __set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + + down_write(&cmtp_session_sem); +@@ -394,7 +393,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) + err = cmtp_attach_device(session); + if (err < 0) { + atomic_inc(&session->terminate); +- wake_up_process(session->task); ++ wake_up_interruptible(sk_sleep(session->sock->sk)); + up_write(&cmtp_session_sem); + return err; + } +@@ -432,7 +431,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req) + + /* Stop session thread */ + atomic_inc(&session->terminate); +- wake_up_process(session->task); ++ ++ /* Ensure session->terminate is updated */ ++ smp_mb__after_atomic(); ++ ++ wake_up_interruptible(sk_sleep(session->sock->sk)); + } else + err = -ENOENT; + +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c +index 0bec4588c3c8..1fc076420d1e 100644 +--- a/net/bluetooth/hidp/core.c ++++ b/net/bluetooth/hidp/core.c +@@ -36,6 +36,7 @@ + #define VERSION "1.2" + + static DECLARE_RWSEM(hidp_session_sem); ++static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq); + static LIST_HEAD(hidp_session_list); + + static unsigned char hidp_keycode[256] = { +@@ -1068,12 +1069,12 @@ static int hidp_session_start_sync(struct hidp_session *session) + * Wake up session thread and notify it to stop. This is asynchronous and + * returns immediately. Call this whenever a runtime error occurs and you want + * the session to stop. +- * Note: wake_up_process() performs any necessary memory-barriers for us. ++ * Note: wake_up_interruptible() performs any necessary memory-barriers for us. + */ + static void hidp_session_terminate(struct hidp_session *session) + { + atomic_inc(&session->terminate); +- wake_up_process(session->task); ++ wake_up_interruptible(&hidp_session_wq); + } + + /* +@@ -1180,7 +1181,9 @@ static void hidp_session_run(struct hidp_session *session) + struct sock *ctrl_sk = session->ctrl_sock->sk; + struct sock *intr_sk = session->intr_sock->sk; + struct sk_buff *skb; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + ++ add_wait_queue(&hidp_session_wq, &wait); + for (;;) { + /* + * This thread can be woken up two ways: +@@ -1188,12 +1191,10 @@ static void hidp_session_run(struct hidp_session *session) + * session->terminate flag and wakes this thread up. + * - Via modifying the socket state of ctrl/intr_sock. This + * thread is woken up by ->sk_state_changed(). +- * +- * Note: set_current_state() performs any necessary +- * memory-barriers for us. + */ +- set_current_state(TASK_INTERRUPTIBLE); + ++ /* Ensure session->terminate is updated */ ++ smp_mb__before_atomic(); + if (atomic_read(&session->terminate)) + break; + +@@ -1227,11 +1228,22 @@ static void hidp_session_run(struct hidp_session *session) + hidp_process_transmit(session, &session->ctrl_transmit, + session->ctrl_sock); + +- schedule(); ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } ++ remove_wait_queue(&hidp_session_wq, &wait); + + atomic_inc(&session->terminate); +- set_current_state(TASK_RUNNING); ++ ++ /* Ensure session->terminate is updated */ ++ smp_mb__after_atomic(); ++} ++ ++static int hidp_session_wake_function(wait_queue_t *wait, ++ unsigned int mode, ++ int sync, void *key) ++{ ++ wake_up_interruptible(&hidp_session_wq); ++ return false; + } + + /* +@@ -1244,7 +1256,8 @@ static void hidp_session_run(struct hidp_session *session) + static int hidp_session_thread(void *arg) + { + struct hidp_session *session = arg; +- wait_queue_t ctrl_wait, intr_wait; ++ DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function); ++ DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function); + + BT_DBG("session %p", session); + +@@ -1254,8 +1267,6 @@ static int hidp_session_thread(void *arg) + set_user_nice(current, -15); + hidp_set_timer(session); + +- init_waitqueue_entry(&ctrl_wait, current); +- init_waitqueue_entry(&intr_wait, current); + add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); + add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); + /* This memory barrier is paired with wq_has_sleeper(). See +diff --git a/net/dccp/proto.c b/net/dccp/proto.c +index 9fe25bf63296..b68168fcc06a 100644 +--- a/net/dccp/proto.c ++++ b/net/dccp/proto.c +@@ -24,6 +24,7 @@ + #include + + #include ++#include + #include + #include + +@@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type) + + EXPORT_SYMBOL_GPL(dccp_packet_name); + ++static void dccp_sk_destruct(struct sock *sk) ++{ ++ struct dccp_sock *dp = dccp_sk(sk); ++ ++ ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); ++ dp->dccps_hc_tx_ccid = NULL; ++ inet_sock_destruct(sk); ++} ++ + int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) + { + struct dccp_sock *dp = dccp_sk(sk); +@@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) + icsk->icsk_syn_retries = sysctl_dccp_request_retries; + sk->sk_state = DCCP_CLOSED; + sk->sk_write_space = dccp_write_space; ++ sk->sk_destruct = dccp_sk_destruct; + icsk->icsk_sync_mss = dccp_sync_mss; + dp->dccps_mss_cache = 536; + dp->dccps_rate_last = jiffies; +@@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk) + { + struct dccp_sock *dp = dccp_sk(sk); + +- /* +- * DCCP doesn't use sk_write_queue, just sk_send_head +- * for retransmissions +- */ ++ __skb_queue_purge(&sk->sk_write_queue); + if (sk->sk_send_head != NULL) { + kfree_skb(sk->sk_send_head); + sk->sk_send_head = NULL; +@@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk) + dp->dccps_hc_rx_ackvec = NULL; + } + ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); +- ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); +- dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL; ++ dp->dccps_hc_rx_ccid = NULL; + + /* clean up feature negotiation state */ + dccp_feat_list_purge(&dp->dccps_featneg); +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index b2504712259f..313e3c11a15a 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -1044,15 +1044,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg) + fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); + if (!fi) + goto failure; +- fib_info_cnt++; + if (cfg->fc_mx) { + fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL); +- if (!fi->fib_metrics) +- goto failure; ++ if (unlikely(!fi->fib_metrics)) { ++ kfree(fi); ++ return ERR_PTR(err); ++ } + atomic_set(&fi->fib_metrics->refcnt, 1); +- } else ++ } else { + fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; +- ++ } ++ fib_info_cnt++; + fi->fib_net = net; + fi->fib_protocol = cfg->fc_protocol; + fi->fib_scope = cfg->fc_scope; +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index c295d882c6e0..0294f7c99c85 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -1241,7 +1241,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) + if (mtu) + return mtu; + +- mtu = dst->dev->mtu; ++ mtu = READ_ONCE(dst->dev->mtu); + + if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { + if (rt->rt_uses_gateway && mtu > 576) +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index f0dabd125c43..c4bbf704ff9c 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -3028,8 +3028,7 @@ void tcp_rearm_rto(struct sock *sk) + /* delta may not be positive if the socket is locked + * when the retrans timer fires and is rescheduled. + */ +- if (delta > 0) +- rto = delta; ++ rto = max(delta, 1); + } + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, + TCP_RTO_MAX); +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index f60e8caea767..aad8cdf15472 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -892,6 +892,8 @@ add: + } + nsiblings = iter->rt6i_nsiblings; + fib6_purge_rt(iter, fn, info->nl_net); ++ if (fn->rr_ptr == iter) ++ fn->rr_ptr = NULL; + rt6_release(iter); + + if (nsiblings) { +@@ -904,6 +906,8 @@ add: + if (rt6_qualify_for_ecmp(iter)) { + *ins = iter->dst.rt6_next; + fib6_purge_rt(iter, fn, info->nl_net); ++ if (fn->rr_ptr == iter) ++ fn->rr_ptr = NULL; + rt6_release(iter); + nsiblings--; + } else { +@@ -992,7 +996,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + /* Create subtree root node */ + sfn = node_alloc(); + if (!sfn) +- goto st_failure; ++ goto failure; + + sfn->leaf = info->nl_net->ipv6.ip6_null_entry; + atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref); +@@ -1008,12 +1012,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + + if (IS_ERR(sn)) { + /* If it is failed, discard just allocated +- root, and then (in st_failure) stale node ++ root, and then (in failure) stale node + in main tree. + */ + node_free(sfn); + err = PTR_ERR(sn); +- goto st_failure; ++ goto failure; + } + + /* Now link new subtree to main tree */ +@@ -1027,7 +1031,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, + + if (IS_ERR(sn)) { + err = PTR_ERR(sn); +- goto st_failure; ++ goto failure; + } + } + +@@ -1069,22 +1073,22 @@ out: + atomic_inc(&pn->leaf->rt6i_ref); + } + #endif +- if (!(rt->dst.flags & DST_NOCACHE)) +- dst_free(&rt->dst); ++ goto failure; + } + return err; + +-#ifdef CONFIG_IPV6_SUBTREES +- /* Subtree creation failed, probably main tree node +- is orphan. If it is, shoot it. ++failure: ++ /* fn->leaf could be NULL if fn is an intermediate node and we ++ * failed to add the new route to it in both subtree creation ++ * failure and fib6_add_rt2node() failure case. ++ * In both cases, fib6_repair_tree() should be called to fix ++ * fn->leaf. + */ +-st_failure: + if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) + fib6_repair_tree(info->nl_net, fn); + if (!(rt->dst.flags & DST_NOCACHE)) + dst_free(&rt->dst); + return err; +-#endif + } + + /* +diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c +index 8d2f7c9b491d..4a116d766c15 100644 +--- a/net/irda/af_irda.c ++++ b/net/irda/af_irda.c +@@ -2227,7 +2227,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname, + { + struct sock *sk = sock->sk; + struct irda_sock *self = irda_sk(sk); +- struct irda_device_list list; ++ struct irda_device_list list = { 0 }; + struct irda_device_info *discoveries; + struct irda_ias_set * ias_opt; /* IAS get/query params */ + struct ias_object * ias_obj; /* Object in IAS */ +diff --git a/net/key/af_key.c b/net/key/af_key.c +index 2e1050ec2cf0..94bf810ad242 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, + #define BROADCAST_ONE 1 + #define BROADCAST_REGISTERED 2 + #define BROADCAST_PROMISC_ONLY 4 +-static int pfkey_broadcast(struct sk_buff *skb, ++static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, + int broadcast_flags, struct sock *one_sk, + struct net *net) + { +@@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb, + rcu_read_unlock(); + + if (one_sk != NULL) +- err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk); ++ err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); + + kfree_skb(skb2); + kfree_skb(skb); +@@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk) + hdr = (struct sadb_msg *) pfk->dump.skb->data; + hdr->sadb_msg_seq = 0; + hdr->sadb_msg_errno = rc; +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, + &pfk->sk, sock_net(&pfk->sk)); + pfk->dump.skb = NULL; + } +@@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk) + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / + sizeof(uint64_t)); + +- pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); ++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk)); + + return 0; + } +@@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_ + + xfrm_state_put(x); + +- pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net); ++ pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net); + + return 0; + } +@@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c) + hdr->sadb_msg_seq = c->seq; + hdr->sadb_msg_pid = c->portid; + +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x)); ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x)); + + return 0; + } +@@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg + out_hdr->sadb_msg_reserved = 0; + out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; + out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; +- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk)); + + return 0; + } +@@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad + return -ENOBUFS; + } + +- pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk)); +- ++ pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, ++ sock_net(sk)); + return 0; + } + +@@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr) + hdr->sadb_msg_errno = (uint8_t) 0; + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + +- return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, ++ sock_net(sk)); + } + + static int key_notify_sa_flush(const struct km_event *c) +@@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c) + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + hdr->sadb_msg_reserved = 0; + +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net); ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); + + return 0; + } +@@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr) + out_hdr->sadb_msg_pid = pfk->dump.msg_portid; + + if (pfk->dump.skb) +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, + &pfk->sk, sock_net(&pfk->sk)); + pfk->dump.skb = out_skb; + +@@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb + new_hdr->sadb_msg_errno = 0; + } + +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk)); ++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk)); + return 0; + } + +@@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev + out_hdr->sadb_msg_errno = 0; + out_hdr->sadb_msg_seq = c->seq; + out_hdr->sadb_msg_pid = c->portid; +- pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); + return 0; + + } +@@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc + out_hdr->sadb_msg_errno = 0; + out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; + out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; +- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp)); + err = 0; + + out: +@@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) + out_hdr->sadb_msg_pid = pfk->dump.msg_portid; + + if (pfk->dump.skb) +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, + &pfk->sk, sock_net(&pfk->sk)); + pfk->dump.skb = out_skb; + +@@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event *c) + hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + hdr->sadb_msg_reserved = 0; +- pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net); ++ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); + return 0; + + } +@@ -2814,7 +2815,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb + void *ext_hdrs[SADB_EXT_MAX]; + int err; + +- pfkey_broadcast(skb_clone(skb, GFP_KERNEL), ++ pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL, + BROADCAST_PROMISC_ONLY, NULL, sock_net(sk)); + + memset(ext_hdrs, 0, sizeof(ext_hdrs)); +@@ -3036,7 +3037,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c) + out_hdr->sadb_msg_seq = 0; + out_hdr->sadb_msg_pid = 0; + +- pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x)); ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, ++ xs_net(x)); + return 0; + } + +@@ -3226,7 +3228,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct + xfrm_ctx->ctx_len); + } + +- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, ++ xs_net(x)); + } + + static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, +@@ -3424,7 +3427,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, + n_port->sadb_x_nat_t_port_port = sport; + n_port->sadb_x_nat_t_port_reserved = 0; + +- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, ++ xs_net(x)); + } + + #ifdef CONFIG_NET_KEY_MIGRATE +@@ -3616,7 +3620,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, + } + + /* broadcast migrate message to sockets */ +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net); ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net); + + return 0; + +diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c +index 0915d448ba23..075b0d22f213 100644 +--- a/net/sched/act_ipt.c ++++ b/net/sched/act_ipt.c +@@ -34,6 +34,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int + { + struct xt_tgchk_param par; + struct xt_target *target; ++ struct ipt_entry e = {}; + int ret = 0; + + target = xt_request_find_target(AF_INET, t->u.user.name, +@@ -44,6 +45,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int + t->u.kernel.target = target; + memset(&par, 0, sizeof(par)); + par.table = table; ++ par.entryinfo = &e; + par.target = target; + par.targinfo = t->data; + par.hook_mask = hook; +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c +index 4431e2833e45..3f2c3eed04da 100644 +--- a/net/sched/sch_sfq.c ++++ b/net/sched/sch_sfq.c +@@ -434,6 +434,7 @@ congestion_drop: + qdisc_drop(head, sch); + + slot_queue_add(slot, skb); ++ qdisc_tree_reduce_backlog(sch, 0, delta); + return NET_XMIT_CN; + } + +@@ -465,8 +466,10 @@ enqueue: + /* Return Congestion Notification only if we dropped a packet + * from this flow. + */ +- if (qlen != slot->qlen) ++ if (qlen != slot->qlen) { ++ qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb)); + return NET_XMIT_CN; ++ } + + /* As we dropped a packet, better let upper stack know this */ + qdisc_tree_reduce_backlog(sch, 1, dropped); +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c +index 7527c168e471..e33e9bd4ed5a 100644 +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -510,7 +510,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, + { + addr->sa.sa_family = AF_INET6; + addr->v6.sin6_port = port; ++ addr->v6.sin6_flowinfo = 0; + addr->v6.sin6_addr = *saddr; ++ addr->v6.sin6_scope_id = 0; + } + + /* Compare addresses exactly. +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c +index a0c90572d0e5..f86c6555a539 100644 +--- a/net/tipc/netlink_compat.c ++++ b/net/tipc/netlink_compat.c +@@ -258,13 +258,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, + arg = nlmsg_new(0, GFP_KERNEL); + if (!arg) { + kfree_skb(msg->rep); ++ msg->rep = NULL; + return -ENOMEM; + } + + err = __tipc_nl_compat_dumpit(cmd, msg, arg); +- if (err) ++ if (err) { + kfree_skb(msg->rep); +- ++ msg->rep = NULL; ++ } + kfree_skb(arg); + + return err; +diff --git a/sound/core/control.c b/sound/core/control.c +index b4fe9b002512..bd01d492f46a 100644 +--- a/sound/core/control.c ++++ b/sound/core/control.c +@@ -1126,7 +1126,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, + mutex_lock(&ue->card->user_ctl_lock); + change = ue->tlv_data_size != size; + if (!change) +- change = memcmp(ue->tlv_data, new_data, size); ++ change = memcmp(ue->tlv_data, new_data, size) != 0; + kfree(ue->tlv_data); + ue->tlv_data = new_data; + ue->tlv_data_size = size; +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 46f7b023f69c..ac5de4365e15 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -854,6 +854,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), ++ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI), + SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004), +diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c +index 2a5b3a293cd2..b123734f9fbd 100644 +--- a/sound/soc/sh/rcar/adg.c ++++ b/sound/soc/sh/rcar/adg.c +@@ -437,7 +437,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv, + struct device *dev = rsnd_priv_to_dev(priv); + struct device_node *np = dev->of_node; + u32 ckr, rbgx, rbga, rbgb; +- u32 rate, req_rate, div; ++ u32 rate, req_rate = 0, div; + uint32_t count = 0; + unsigned long req_48kHz_rate, req_441kHz_rate; + int i; +diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c +index deed48ef28b8..362446c36c9e 100644 +--- a/sound/soc/sh/rcar/core.c ++++ b/sound/soc/sh/rcar/core.c +@@ -192,19 +192,16 @@ void rsnd_mod_interrupt(struct rsnd_mod *mod, + struct rsnd_priv *priv = rsnd_mod_to_priv(mod); + struct rsnd_dai_stream *io; + struct rsnd_dai *rdai; +- int i, j; +- +- for_each_rsnd_dai(rdai, priv, j) { ++ int i; + +- for (i = 0; i < RSND_MOD_MAX; i++) { +- io = &rdai->playback; +- if (mod == io->mod[i]) +- callback(mod, io); ++ for_each_rsnd_dai(rdai, priv, i) { ++ io = &rdai->playback; ++ if (mod == io->mod[mod->type]) ++ callback(mod, io); + +- io = &rdai->capture; +- if (mod == io->mod[i]) +- callback(mod, io); +- } ++ io = &rdai->capture; ++ if (mod == io->mod[mod->type]) ++ callback(mod, io); + } + } + +@@ -1019,7 +1016,7 @@ static int rsnd_kctrl_put(struct snd_kcontrol *kctrl, + } + } + +- if (change) ++ if (change && cfg->update) + cfg->update(cfg->io, mod); + + return change; +diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c +index 68b439ed22d7..460d29cbaaa5 100644 +--- a/sound/soc/sh/rcar/src.c ++++ b/sound/soc/sh/rcar/src.c +@@ -691,13 +691,27 @@ static int _rsnd_src_stop_gen2(struct rsnd_mod *mod) + { + rsnd_src_irq_disable_gen2(mod); + +- rsnd_mod_write(mod, SRC_CTRL, 0); ++ /* ++ * stop SRC output only ++ * see rsnd_src_quit_gen2 ++ */ ++ rsnd_mod_write(mod, SRC_CTRL, 0x01); + + rsnd_src_error_record_gen2(mod); + + return rsnd_src_stop(mod); + } + ++static int rsnd_src_quit_gen2(struct rsnd_mod *mod, ++ struct rsnd_dai_stream *io, ++ struct rsnd_priv *priv) ++{ ++ /* stop both out/in */ ++ rsnd_mod_write(mod, SRC_CTRL, 0); ++ ++ return 0; ++} ++ + static void __rsnd_src_interrupt_gen2(struct rsnd_mod *mod, + struct rsnd_dai_stream *io) + { +@@ -971,7 +985,7 @@ static struct rsnd_mod_ops rsnd_src_gen2_ops = { + .probe = rsnd_src_probe_gen2, + .remove = rsnd_src_remove_gen2, + .init = rsnd_src_init_gen2, +- .quit = rsnd_src_quit, ++ .quit = rsnd_src_quit_gen2, + .start = rsnd_src_start_gen2, + .stop = rsnd_src_stop_gen2, + .hw_params = rsnd_src_hw_params, +diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c +index 1427ec21bd7e..c62a2947ac14 100644 +--- a/sound/soc/sh/rcar/ssi.c ++++ b/sound/soc/sh/rcar/ssi.c +@@ -39,6 +39,7 @@ + #define SCKP (1 << 13) /* Serial Bit Clock Polarity */ + #define SWSP (1 << 12) /* Serial WS Polarity */ + #define SDTA (1 << 10) /* Serial Data Alignment */ ++#define PDTA (1 << 9) /* Parallel Data Alignment */ + #define DEL (1 << 8) /* Serial Data Delay */ + #define CKDV(v) (v << 4) /* Serial Clock Division Ratio */ + #define TRMD (1 << 1) /* Transmit/Receive Mode Select */ +@@ -286,7 +287,7 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, + struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); + u32 cr; + +- cr = FORCE; ++ cr = FORCE | PDTA; + + /* + * always use 32bit system word for easy clock calculation. diff --git a/patch/kernel/rockchip-default/03-patch-4.4.85-86.patch b/patch/kernel/rockchip-default/03-patch-4.4.85-86.patch new file mode 100644 index 000000000..a20d51922 --- /dev/null +++ b/patch/kernel/rockchip-default/03-patch-4.4.85-86.patch @@ -0,0 +1,393 @@ +diff --git a/Makefile b/Makefile +index 0f3d843f42a7..1207bf6a0e7a 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 85 ++SUBLEVEL = 86 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c +index 4c46c54a3ad7..6638903f0cb9 100644 +--- a/arch/arm64/kernel/fpsimd.c ++++ b/arch/arm64/kernel/fpsimd.c +@@ -157,9 +157,11 @@ void fpsimd_thread_switch(struct task_struct *next) + + void fpsimd_flush_thread(void) + { ++ preempt_disable(); + memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); + fpsimd_flush_task_state(current); + set_thread_flag(TIF_FOREIGN_FPSTATE); ++ preempt_enable(); + } + + /* +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c +index a4b466424a32..7fabf49f2aeb 100644 +--- a/arch/arm64/mm/fault.c ++++ b/arch/arm64/mm/fault.c +@@ -313,8 +313,11 @@ retry: + * signal first. We do not need to release the mmap_sem because it + * would already be released in __lock_page_or_retry in mm/filemap.c. + */ +- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) ++ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { ++ if (!user_mode(regs)) ++ goto no_context; + return 0; ++ } + + /* + * Major/minor page fault accounting is only done on the initial +diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h +index de25aad07853..9016b4b70375 100644 +--- a/arch/x86/include/asm/io.h ++++ b/arch/x86/include/asm/io.h +@@ -304,13 +304,13 @@ static inline unsigned type in##bwl##_p(int port) \ + static inline void outs##bwl(int port, const void *addr, unsigned long count) \ + { \ + asm volatile("rep; outs" #bwl \ +- : "+S"(addr), "+c"(count) : "d"(port)); \ ++ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \ + } \ + \ + static inline void ins##bwl(int port, void *addr, unsigned long count) \ + { \ + asm volatile("rep; ins" #bwl \ +- : "+D"(addr), "+c"(count) : "d"(port)); \ ++ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \ + } + + BUILDIO(b, b, char) +diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c +index cc91ae832ffb..6fd7b50c5747 100644 +--- a/drivers/gpu/drm/i915/intel_uncore.c ++++ b/drivers/gpu/drm/i915/intel_uncore.c +@@ -635,7 +635,8 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) + "enabling oneshot unclaimed register reporting. " + "Please use i915.mmio_debug=N for more information.\n"); + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); +- i915.mmio_debug = mmio_debug_once--; ++ i915.mmio_debug = mmio_debug_once; ++ mmio_debug_once = false; + } + } + +diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c +index f325663c27c5..4b58e8aaf5c5 100644 +--- a/drivers/i2c/busses/i2c-jz4780.c ++++ b/drivers/i2c/busses/i2c-jz4780.c +@@ -786,10 +786,6 @@ static int jz4780_i2c_probe(struct platform_device *pdev) + + jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0); + +- i2c->cmd = 0; +- memset(i2c->cmd_buf, 0, BUFSIZE); +- memset(i2c->data_buf, 0, BUFSIZE); +- + i2c->irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0, + dev_name(&pdev->dev), i2c); +diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c +index 257a9eadd595..4ac6764f4897 100644 +--- a/drivers/net/wireless/p54/fwio.c ++++ b/drivers/net/wireless/p54/fwio.c +@@ -488,7 +488,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell) + + entry += sizeof(__le16); + chan->pa_points_per_curve = 8; +- memset(chan->curve_data, 0, sizeof(*chan->curve_data)); ++ memset(chan->curve_data, 0, sizeof(chan->curve_data)); + memcpy(chan->curve_data, entry, + sizeof(struct p54_pa_curve_data_sample) * + min((u8)8, curve_data->points_per_channel)); +diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c +index 1910100638a2..00602abec0ea 100644 +--- a/drivers/scsi/isci/remote_node_context.c ++++ b/drivers/scsi/isci/remote_node_context.c +@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state) + { + static const char * const strings[] = RNC_STATES; + ++ if (state >= ARRAY_SIZE(strings)) ++ return "UNKNOWN"; ++ + return strings[state]; + } + #undef C +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c +index 0e6aaef9a038..c74f74ab981c 100644 +--- a/drivers/scsi/lpfc/lpfc_els.c ++++ b/drivers/scsi/lpfc/lpfc_els.c +@@ -1054,7 +1054,10 @@ stop_rr_fcf_flogi: + lpfc_sli4_unreg_all_rpis(vport); + } + } +- lpfc_issue_reg_vfi(vport); ++ ++ /* Do not register VFI if the driver aborted FLOGI */ ++ if (!lpfc_error_lost_link(irsp)) ++ lpfc_issue_reg_vfi(vport); + lpfc_nlp_put(ndlp); + goto out; + } +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 6514636431ab..8a9e139e2853 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -153,6 +153,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ + struct sg_device *parentdp; /* owning device */ + wait_queue_head_t read_wait; /* queue read until command done */ + rwlock_t rq_list_lock; /* protect access to list in req_arr */ ++ struct mutex f_mutex; /* protect against changes in this fd */ + int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ + int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ + Sg_scatter_hold reserve; /* buffer held for this file descriptor */ +@@ -166,6 +167,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ + unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ + char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ + char mmap_called; /* 0 -> mmap() never called on this fd */ ++ char res_in_use; /* 1 -> 'reserve' array in use */ + struct kref f_ref; + struct execute_work ew; + } Sg_fd; +@@ -209,7 +211,6 @@ static void sg_remove_sfp(struct kref *); + static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); + static Sg_request *sg_add_request(Sg_fd * sfp); + static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); +-static int sg_res_in_use(Sg_fd * sfp); + static Sg_device *sg_get_dev(int dev); + static void sg_device_destroy(struct kref *kref); + +@@ -625,6 +626,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + } + buf += SZ_SG_HEADER; + __get_user(opcode, buf); ++ mutex_lock(&sfp->f_mutex); + if (sfp->next_cmd_len > 0) { + cmd_size = sfp->next_cmd_len; + sfp->next_cmd_len = 0; /* reset so only this write() effected */ +@@ -633,6 +635,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + if ((opcode >= 0xc0) && old_hdr.twelve_byte) + cmd_size = 12; + } ++ mutex_unlock(&sfp->f_mutex); + SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, + "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); + /* Determine buffer size. */ +@@ -732,7 +735,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, + sg_remove_request(sfp, srp); + return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ + } +- if (sg_res_in_use(sfp)) { ++ if (sfp->res_in_use) { + sg_remove_request(sfp, srp); + return -EBUSY; /* reserve buffer already being used */ + } +@@ -902,7 +905,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) + return result; + if (val) { + sfp->low_dma = 1; +- if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { ++ if ((0 == sfp->low_dma) && !sfp->res_in_use) { + val = (int) sfp->reserve.bufflen; + sg_remove_scat(sfp, &sfp->reserve); + sg_build_reserve(sfp, val); +@@ -977,12 +980,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) + return -EINVAL; + val = min_t(int, val, + max_sectors_bytes(sdp->device->request_queue)); ++ mutex_lock(&sfp->f_mutex); + if (val != sfp->reserve.bufflen) { +- if (sg_res_in_use(sfp) || sfp->mmap_called) ++ if (sfp->mmap_called || ++ sfp->res_in_use) { ++ mutex_unlock(&sfp->f_mutex); + return -EBUSY; ++ } ++ + sg_remove_scat(sfp, &sfp->reserve); + sg_build_reserve(sfp, val); + } ++ mutex_unlock(&sfp->f_mutex); + return 0; + case SG_GET_RESERVED_SIZE: + val = min_t(int, sfp->reserve.bufflen, +@@ -1737,13 +1746,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) + md = &map_data; + + if (md) { +- if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) ++ mutex_lock(&sfp->f_mutex); ++ if (dxfer_len <= rsv_schp->bufflen && ++ !sfp->res_in_use) { ++ sfp->res_in_use = 1; + sg_link_reserve(sfp, srp, dxfer_len); +- else { ++ } else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) { ++ mutex_unlock(&sfp->f_mutex); ++ return -EBUSY; ++ } else { + res = sg_build_indirect(req_schp, sfp, dxfer_len); +- if (res) ++ if (res) { ++ mutex_unlock(&sfp->f_mutex); + return res; ++ } + } ++ mutex_unlock(&sfp->f_mutex); + + md->pages = req_schp->pages; + md->page_order = req_schp->page_order; +@@ -2034,6 +2052,8 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) + req_schp->sglist_len = 0; + sfp->save_scat_len = 0; + srp->res_used = 0; ++ /* Called without mutex lock to avoid deadlock */ ++ sfp->res_in_use = 0; + } + + static Sg_request * +@@ -2145,6 +2165,7 @@ sg_add_sfp(Sg_device * sdp) + rwlock_init(&sfp->rq_list_lock); + + kref_init(&sfp->f_ref); ++ mutex_init(&sfp->f_mutex); + sfp->timeout = SG_DEFAULT_TIMEOUT; + sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; + sfp->force_packid = SG_DEF_FORCE_PACK_ID; +@@ -2220,20 +2241,6 @@ sg_remove_sfp(struct kref *kref) + schedule_work(&sfp->ew.work); + } + +-static int +-sg_res_in_use(Sg_fd * sfp) +-{ +- const Sg_request *srp; +- unsigned long iflags; +- +- read_lock_irqsave(&sfp->rq_list_lock, iflags); +- for (srp = sfp->headrp; srp; srp = srp->nextrp) +- if (srp->res_used) +- break; +- read_unlock_irqrestore(&sfp->rq_list_lock, iflags); +- return srp ? 1 : 0; +-} +- + #ifdef CONFIG_SCSI_PROC_FS + static int + sg_idr_max_id(int id, void *p, void *data) +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 9c62a6f9757a..600c67ef8a03 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -108,7 +108,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { + }, + }; + +-const u64 const btrfs_raid_group[BTRFS_NR_RAID_TYPES] = { ++const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = { + [BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10, + [BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1, + [BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP, +diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h +index 782d4e814e21..4bc4b1b13193 100644 +--- a/include/linux/lightnvm.h ++++ b/include/linux/lightnvm.h +@@ -310,6 +310,7 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, + { + struct ppa_addr l; + ++ l.ppa = 0; + /* + * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. + */ +diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c +index 7080ae1eb6c1..f850e906564b 100644 +--- a/kernel/gcov/base.c ++++ b/kernel/gcov/base.c +@@ -98,6 +98,12 @@ void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters) + } + EXPORT_SYMBOL(__gcov_merge_icall_topn); + ++void __gcov_exit(void) ++{ ++ /* Unused. */ ++} ++EXPORT_SYMBOL(__gcov_exit); ++ + /** + * gcov_enable_events - enable event reporting through gcov_event() + * +diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c +index e25e92fb44fa..46a18e72bce6 100644 +--- a/kernel/gcov/gcc_4_7.c ++++ b/kernel/gcov/gcc_4_7.c +@@ -18,7 +18,9 @@ + #include + #include "gcov.h" + +-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1 ++#if (__GNUC__ >= 7) ++#define GCOV_COUNTERS 9 ++#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) + #define GCOV_COUNTERS 10 + #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 + #define GCOV_COUNTERS 9 +diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c +index 74177189063c..d3125c169684 100644 +--- a/sound/pci/au88x0/au88x0_core.c ++++ b/sound/pci/au88x0/au88x0_core.c +@@ -2150,8 +2150,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + stream->resources, en, + VORTEX_RESOURCE_SRC)) < 0) { + memset(stream->resources, 0, +- sizeof(unsigned char) * +- VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + return -EBUSY; + } + if (stream->type != VORTEX_PCM_A3D) { +@@ -2161,7 +2160,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + VORTEX_RESOURCE_MIXIN)) < 0) { + memset(stream->resources, + 0, +- sizeof(unsigned char) * VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + return -EBUSY; + } + } +@@ -2174,8 +2173,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + stream->resources, en, + VORTEX_RESOURCE_A3D)) < 0) { + memset(stream->resources, 0, +- sizeof(unsigned char) * +- VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + dev_err(vortex->card->dev, + "out of A3D sources. Sorry\n"); + return -EBUSY; +@@ -2289,8 +2287,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + VORTEX_RESOURCE_MIXOUT)) + < 0) { + memset(stream->resources, 0, +- sizeof(unsigned char) * +- VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + return -EBUSY; + } + if ((src[i] = +@@ -2298,8 +2295,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir, + stream->resources, en, + VORTEX_RESOURCE_SRC)) < 0) { + memset(stream->resources, 0, +- sizeof(unsigned char) * +- VORTEX_RESOURCE_LAST); ++ sizeof(stream->resources)); + return -EBUSY; + } + } diff --git a/patch/kernel/rockchip-default/03-patch-4.4.86-87.patch b/patch/kernel/rockchip-default/03-patch-4.4.86-87.patch new file mode 100644 index 000000000..5292853d3 --- /dev/null +++ b/patch/kernel/rockchip-default/03-patch-4.4.86-87.patch @@ -0,0 +1,408 @@ +diff --git a/Makefile b/Makefile +index 1207bf6a0e7a..f6838187b568 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 86 ++SUBLEVEL = 87 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h +index 4cb4b6d3452c..0bc66e1d3a7e 100644 +--- a/arch/alpha/include/asm/types.h ++++ b/arch/alpha/include/asm/types.h +@@ -1,6 +1,6 @@ + #ifndef _ALPHA_TYPES_H + #define _ALPHA_TYPES_H + +-#include ++#include + + #endif /* _ALPHA_TYPES_H */ +diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h +index 9fd3cd459777..8d1024d7be05 100644 +--- a/arch/alpha/include/uapi/asm/types.h ++++ b/arch/alpha/include/uapi/asm/types.h +@@ -9,8 +9,18 @@ + * need to be careful to avoid a name clashes. + */ + +-#ifndef __KERNEL__ ++/* ++ * This is here because we used to use l64 for alpha ++ * and we don't want to impact user mode with our change to ll64 ++ * in the kernel. ++ * ++ * However, some user programs are fine with this. They can ++ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here. ++ */ ++#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__) + #include ++#else ++#include + #endif + + #endif /* _UAPI_ALPHA_TYPES_H */ +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c +index ba079e279b58..e8835d4e173c 100644 +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -824,24 +824,25 @@ void stage2_unmap_vm(struct kvm *kvm) + * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all + * underlying level-2 and level-3 tables before freeing the actual level-1 table + * and setting the struct pointer to NULL. +- * +- * Note we don't need locking here as this is only called when the VM is +- * destroyed, which can only be done once. + */ + void kvm_free_stage2_pgd(struct kvm *kvm) + { +- if (kvm->arch.pgd == NULL) +- return; ++ void *pgd = NULL; ++ void *hwpgd = NULL; + + spin_lock(&kvm->mmu_lock); +- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); ++ if (kvm->arch.pgd) { ++ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); ++ pgd = READ_ONCE(kvm->arch.pgd); ++ hwpgd = kvm_get_hwpgd(kvm); ++ kvm->arch.pgd = NULL; ++ } + spin_unlock(&kvm->mmu_lock); + +- kvm_free_hwpgd(kvm_get_hwpgd(kvm)); +- if (KVM_PREALLOC_LEVEL > 0) +- kfree(kvm->arch.pgd); +- +- kvm->arch.pgd = NULL; ++ if (hwpgd) ++ kvm_free_hwpgd(hwpgd); ++ if (KVM_PREALLOC_LEVEL > 0 && pgd) ++ kfree(pgd); + } + + static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c +index f5e9f9310b48..b3b0004ea8ac 100644 +--- a/crypto/algif_skcipher.c ++++ b/crypto/algif_skcipher.c +@@ -86,8 +86,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) + } + sgl = sreq->tsg; + n = sg_nents(sgl); +- for_each_sg(sgl, sg, n, i) +- put_page(sg_page(sg)); ++ for_each_sg(sgl, sg, n, i) { ++ struct page *page = sg_page(sg); ++ ++ /* some SGs may not have a page mapped */ ++ if (page && atomic_read(&page->_count)) ++ put_page(page); ++ } + + kfree(sreq->tsg); + } +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c +index 025c429050c0..5d8dfe027b30 100644 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c +@@ -612,7 +612,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, + } else { + pr_err("Failed to fill pool (%p)\n", pool); + /* If we have any pages left put them to the pool. */ +- list_for_each_entry(p, &pool->list, lru) { ++ list_for_each_entry(p, &new_pages, lru) { + ++cpages; + } + list_splice(&new_pages, &pool->list); +diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c +index 7ba795b24e75..639d1a9c8793 100644 +--- a/drivers/i2c/busses/i2c-ismt.c ++++ b/drivers/i2c/busses/i2c-ismt.c +@@ -339,8 +339,10 @@ static int ismt_process_desc(const struct ismt_desc *desc, + break; + case I2C_SMBUS_BLOCK_DATA: + case I2C_SMBUS_I2C_BLOCK_DATA: +- memcpy(&data->block[1], dma_buffer, desc->rxbytes); +- data->block[0] = desc->rxbytes; ++ if (desc->rxbytes != dma_buffer[0] + 1) ++ return -EMSGSIZE; ++ ++ memcpy(data->block, dma_buffer, desc->rxbytes); + break; + } + return 0; +diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c +index 9e17ef27a183..6f1dbd52ec91 100644 +--- a/drivers/irqchip/irq-mips-gic.c ++++ b/drivers/irqchip/irq-mips-gic.c +@@ -915,8 +915,11 @@ static int __init gic_of_init(struct device_node *node, + gic_len = resource_size(&res); + } + +- if (mips_cm_present()) ++ if (mips_cm_present()) { + write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); ++ /* Ensure GIC region is enabled before trying to access it */ ++ __sync(); ++ } + gic_present = true; + + __gic_init(gic_base, gic_len, cpu_vec, 0, node); +diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c +index cd4777954f87..9bee3f11898a 100644 +--- a/drivers/net/wireless/ti/wl1251/main.c ++++ b/drivers/net/wireless/ti/wl1251/main.c +@@ -1567,6 +1567,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void) + + wl->state = WL1251_STATE_OFF; + mutex_init(&wl->mutex); ++ spin_lock_init(&wl->wl_lock); + + wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE; + wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE; +diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c +index c6a1ec110c01..22bae2b434e2 100644 +--- a/fs/ceph/addr.c ++++ b/fs/ceph/addr.c +@@ -189,7 +189,7 @@ static int ceph_releasepage(struct page *page, gfp_t g) + /* + * read a single page, without unlocking it. + */ +-static int readpage_nounlock(struct file *filp, struct page *page) ++static int ceph_do_readpage(struct file *filp, struct page *page) + { + struct inode *inode = file_inode(filp); + struct ceph_inode_info *ci = ceph_inode(inode); +@@ -219,7 +219,7 @@ static int readpage_nounlock(struct file *filp, struct page *page) + + err = ceph_readpage_from_fscache(inode, page); + if (err == 0) +- goto out; ++ return -EINPROGRESS; + + dout("readpage inode %p file %p page %p index %lu\n", + inode, filp, page, page->index); +@@ -249,8 +249,11 @@ out: + + static int ceph_readpage(struct file *filp, struct page *page) + { +- int r = readpage_nounlock(filp, page); +- unlock_page(page); ++ int r = ceph_do_readpage(filp, page); ++ if (r != -EINPROGRESS) ++ unlock_page(page); ++ else ++ r = 0; + return r; + } + +@@ -1094,7 +1097,7 @@ retry_locked: + goto retry_locked; + r = writepage_nounlock(page, NULL); + if (r < 0) +- goto fail_nosnap; ++ goto fail_unlock; + goto retry_locked; + } + +@@ -1122,11 +1125,14 @@ retry_locked: + } + + /* we need to read it. */ +- r = readpage_nounlock(file, page); +- if (r < 0) +- goto fail_nosnap; ++ r = ceph_do_readpage(file, page); ++ if (r < 0) { ++ if (r == -EINPROGRESS) ++ return -EAGAIN; ++ goto fail_unlock; ++ } + goto retry_locked; +-fail_nosnap: ++fail_unlock: + unlock_page(page); + return r; + } +diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c +index a4766ded1ba7..ff1cfd7b1083 100644 +--- a/fs/ceph/cache.c ++++ b/fs/ceph/cache.c +@@ -224,13 +224,7 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) + fscache_relinquish_cookie(cookie, 0); + } + +-static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) +-{ +- if (!error) +- SetPageUptodate(page); +-} +- +-static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error) ++static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error) + { + if (!error) + SetPageUptodate(page); +@@ -259,7 +253,7 @@ int ceph_readpage_from_fscache(struct inode *inode, struct page *page) + return -ENOBUFS; + + ret = fscache_read_or_alloc_page(ci->fscache, page, +- ceph_vfs_readpage_complete, NULL, ++ ceph_readpage_from_fscache_complete, NULL, + GFP_KERNEL); + + switch (ret) { +@@ -288,7 +282,7 @@ int ceph_readpages_from_fscache(struct inode *inode, + return -ENOBUFS; + + ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages, +- ceph_vfs_readpage_complete_unlock, ++ ceph_readpage_from_fscache_complete, + NULL, mapping_gfp_mask(mapping)); + + switch (ret) { +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index fa8df3fef6fc..297e05c9e2b0 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -194,7 +194,7 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon) + int i; + + if (unlikely(direntry->d_name.len > +- tcon->fsAttrInfo.MaxPathNameComponentLength)) ++ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength))) + return -ENAMETOOLONG; + + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h +index b8f553b32dda..aacb15bd56fe 100644 +--- a/fs/cifs/smb2pdu.h ++++ b/fs/cifs/smb2pdu.h +@@ -82,8 +82,8 @@ + + #define NUMBER_OF_SMB2_COMMANDS 0x0013 + +-/* BB FIXME - analyze following length BB */ +-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */ ++/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */ ++#define MAX_SMB2_HDR_SIZE 0x00b0 + + #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) + +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 1e009cad8d5c..1b08556776ce 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -518,8 +518,13 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq) + wait_queue_head_t *whead; + + rcu_read_lock(); +- /* If it is cleared by POLLFREE, it should be rcu-safe */ +- whead = rcu_dereference(pwq->whead); ++ /* ++ * If it is cleared by POLLFREE, it should be rcu-safe. ++ * If we read NULL we need a barrier paired with ++ * smp_store_release() in ep_poll_callback(), otherwise ++ * we rely on whead->lock. ++ */ ++ whead = smp_load_acquire(&pwq->whead); + if (whead) + remove_wait_queue(whead, &pwq->wait); + rcu_read_unlock(); +@@ -1003,17 +1008,6 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k + struct epitem *epi = ep_item_from_wait(wait); + struct eventpoll *ep = epi->ep; + +- if ((unsigned long)key & POLLFREE) { +- ep_pwq_from_wait(wait)->whead = NULL; +- /* +- * whead = NULL above can race with ep_remove_wait_queue() +- * which can do another remove_wait_queue() after us, so we +- * can't use __remove_wait_queue(). whead->lock is held by +- * the caller. +- */ +- list_del_init(&wait->task_list); +- } +- + spin_lock_irqsave(&ep->lock, flags); + + /* +@@ -1078,6 +1072,23 @@ out_unlock: + if (pwake) + ep_poll_safewake(&ep->poll_wait); + ++ ++ if ((unsigned long)key & POLLFREE) { ++ /* ++ * If we race with ep_remove_wait_queue() it can miss ++ * ->whead = NULL and do another remove_wait_queue() after ++ * us, so we can't use __remove_wait_queue(). ++ */ ++ list_del_init(&wait->task_list); ++ /* ++ * ->whead != NULL protects us from the race with ep_free() ++ * or ep_remove(), ep_remove_wait_queue() takes whead->lock ++ * held by the caller. Once we nullify it, nothing protects ++ * ep/epi or even wait. ++ */ ++ smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL); ++ } ++ + return 1; + } + +diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h +index fc824e2828f3..5d2add1a6c96 100644 +--- a/include/asm-generic/topology.h ++++ b/include/asm-generic/topology.h +@@ -48,7 +48,11 @@ + #define parent_node(node) ((void)(node),0) + #endif + #ifndef cpumask_of_node +-#define cpumask_of_node(node) ((void)node, cpu_online_mask) ++ #ifdef CONFIG_NEED_MULTIPLE_NODES ++ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) ++ #else ++ #define cpumask_of_node(node) ((void)node, cpu_online_mask) ++ #endif + #endif + #ifndef pcibus_to_node + #define pcibus_to_node(bus) ((void)(bus), -1) +diff --git a/kernel/cpuset.c b/kernel/cpuset.c +index 8ccd66a97c8b..2924b6faa469 100644 +--- a/kernel/cpuset.c ++++ b/kernel/cpuset.c +@@ -1910,6 +1910,7 @@ static struct cftype files[] = { + { + .name = "memory_pressure", + .read_u64 = cpuset_read_u64, ++ .private = FILE_MEMORY_PRESSURE, + }, + + { +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 77055a362041..0e01250f2072 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -3275,9 +3275,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, + struct xfrm_state *x_new[XFRM_MAX_DEPTH]; + struct xfrm_migrate *mp; + ++ /* Stage 0 - sanity checks */ + if ((err = xfrm_migrate_check(m, num_migrate)) < 0) + goto out; + ++ if (dir >= XFRM_POLICY_MAX) { ++ err = -EINVAL; ++ goto out; ++ } ++ + /* Stage 1 - find policy */ + if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { + err = -ENOENT; diff --git a/patch/kernel/udoo-next/03-patch-4.4.86-87.patch b/patch/kernel/udoo-next/03-patch-4.4.86-87.patch new file mode 100644 index 000000000..5292853d3 --- /dev/null +++ b/patch/kernel/udoo-next/03-patch-4.4.86-87.patch @@ -0,0 +1,408 @@ +diff --git a/Makefile b/Makefile +index 1207bf6a0e7a..f6838187b568 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 86 ++SUBLEVEL = 87 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h +index 4cb4b6d3452c..0bc66e1d3a7e 100644 +--- a/arch/alpha/include/asm/types.h ++++ b/arch/alpha/include/asm/types.h +@@ -1,6 +1,6 @@ + #ifndef _ALPHA_TYPES_H + #define _ALPHA_TYPES_H + +-#include ++#include + + #endif /* _ALPHA_TYPES_H */ +diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h +index 9fd3cd459777..8d1024d7be05 100644 +--- a/arch/alpha/include/uapi/asm/types.h ++++ b/arch/alpha/include/uapi/asm/types.h +@@ -9,8 +9,18 @@ + * need to be careful to avoid a name clashes. + */ + +-#ifndef __KERNEL__ ++/* ++ * This is here because we used to use l64 for alpha ++ * and we don't want to impact user mode with our change to ll64 ++ * in the kernel. ++ * ++ * However, some user programs are fine with this. They can ++ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here. ++ */ ++#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__) + #include ++#else ++#include + #endif + + #endif /* _UAPI_ALPHA_TYPES_H */ +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c +index ba079e279b58..e8835d4e173c 100644 +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -824,24 +824,25 @@ void stage2_unmap_vm(struct kvm *kvm) + * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all + * underlying level-2 and level-3 tables before freeing the actual level-1 table + * and setting the struct pointer to NULL. +- * +- * Note we don't need locking here as this is only called when the VM is +- * destroyed, which can only be done once. + */ + void kvm_free_stage2_pgd(struct kvm *kvm) + { +- if (kvm->arch.pgd == NULL) +- return; ++ void *pgd = NULL; ++ void *hwpgd = NULL; + + spin_lock(&kvm->mmu_lock); +- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); ++ if (kvm->arch.pgd) { ++ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); ++ pgd = READ_ONCE(kvm->arch.pgd); ++ hwpgd = kvm_get_hwpgd(kvm); ++ kvm->arch.pgd = NULL; ++ } + spin_unlock(&kvm->mmu_lock); + +- kvm_free_hwpgd(kvm_get_hwpgd(kvm)); +- if (KVM_PREALLOC_LEVEL > 0) +- kfree(kvm->arch.pgd); +- +- kvm->arch.pgd = NULL; ++ if (hwpgd) ++ kvm_free_hwpgd(hwpgd); ++ if (KVM_PREALLOC_LEVEL > 0 && pgd) ++ kfree(pgd); + } + + static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c +index f5e9f9310b48..b3b0004ea8ac 100644 +--- a/crypto/algif_skcipher.c ++++ b/crypto/algif_skcipher.c +@@ -86,8 +86,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) + } + sgl = sreq->tsg; + n = sg_nents(sgl); +- for_each_sg(sgl, sg, n, i) +- put_page(sg_page(sg)); ++ for_each_sg(sgl, sg, n, i) { ++ struct page *page = sg_page(sg); ++ ++ /* some SGs may not have a page mapped */ ++ if (page && atomic_read(&page->_count)) ++ put_page(page); ++ } + + kfree(sreq->tsg); + } +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c +index 025c429050c0..5d8dfe027b30 100644 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c +@@ -612,7 +612,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, + } else { + pr_err("Failed to fill pool (%p)\n", pool); + /* If we have any pages left put them to the pool. */ +- list_for_each_entry(p, &pool->list, lru) { ++ list_for_each_entry(p, &new_pages, lru) { + ++cpages; + } + list_splice(&new_pages, &pool->list); +diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c +index 7ba795b24e75..639d1a9c8793 100644 +--- a/drivers/i2c/busses/i2c-ismt.c ++++ b/drivers/i2c/busses/i2c-ismt.c +@@ -339,8 +339,10 @@ static int ismt_process_desc(const struct ismt_desc *desc, + break; + case I2C_SMBUS_BLOCK_DATA: + case I2C_SMBUS_I2C_BLOCK_DATA: +- memcpy(&data->block[1], dma_buffer, desc->rxbytes); +- data->block[0] = desc->rxbytes; ++ if (desc->rxbytes != dma_buffer[0] + 1) ++ return -EMSGSIZE; ++ ++ memcpy(data->block, dma_buffer, desc->rxbytes); + break; + } + return 0; +diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c +index 9e17ef27a183..6f1dbd52ec91 100644 +--- a/drivers/irqchip/irq-mips-gic.c ++++ b/drivers/irqchip/irq-mips-gic.c +@@ -915,8 +915,11 @@ static int __init gic_of_init(struct device_node *node, + gic_len = resource_size(&res); + } + +- if (mips_cm_present()) ++ if (mips_cm_present()) { + write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); ++ /* Ensure GIC region is enabled before trying to access it */ ++ __sync(); ++ } + gic_present = true; + + __gic_init(gic_base, gic_len, cpu_vec, 0, node); +diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c +index cd4777954f87..9bee3f11898a 100644 +--- a/drivers/net/wireless/ti/wl1251/main.c ++++ b/drivers/net/wireless/ti/wl1251/main.c +@@ -1567,6 +1567,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void) + + wl->state = WL1251_STATE_OFF; + mutex_init(&wl->mutex); ++ spin_lock_init(&wl->wl_lock); + + wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE; + wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE; +diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c +index c6a1ec110c01..22bae2b434e2 100644 +--- a/fs/ceph/addr.c ++++ b/fs/ceph/addr.c +@@ -189,7 +189,7 @@ static int ceph_releasepage(struct page *page, gfp_t g) + /* + * read a single page, without unlocking it. + */ +-static int readpage_nounlock(struct file *filp, struct page *page) ++static int ceph_do_readpage(struct file *filp, struct page *page) + { + struct inode *inode = file_inode(filp); + struct ceph_inode_info *ci = ceph_inode(inode); +@@ -219,7 +219,7 @@ static int readpage_nounlock(struct file *filp, struct page *page) + + err = ceph_readpage_from_fscache(inode, page); + if (err == 0) +- goto out; ++ return -EINPROGRESS; + + dout("readpage inode %p file %p page %p index %lu\n", + inode, filp, page, page->index); +@@ -249,8 +249,11 @@ out: + + static int ceph_readpage(struct file *filp, struct page *page) + { +- int r = readpage_nounlock(filp, page); +- unlock_page(page); ++ int r = ceph_do_readpage(filp, page); ++ if (r != -EINPROGRESS) ++ unlock_page(page); ++ else ++ r = 0; + return r; + } + +@@ -1094,7 +1097,7 @@ retry_locked: + goto retry_locked; + r = writepage_nounlock(page, NULL); + if (r < 0) +- goto fail_nosnap; ++ goto fail_unlock; + goto retry_locked; + } + +@@ -1122,11 +1125,14 @@ retry_locked: + } + + /* we need to read it. */ +- r = readpage_nounlock(file, page); +- if (r < 0) +- goto fail_nosnap; ++ r = ceph_do_readpage(file, page); ++ if (r < 0) { ++ if (r == -EINPROGRESS) ++ return -EAGAIN; ++ goto fail_unlock; ++ } + goto retry_locked; +-fail_nosnap: ++fail_unlock: + unlock_page(page); + return r; + } +diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c +index a4766ded1ba7..ff1cfd7b1083 100644 +--- a/fs/ceph/cache.c ++++ b/fs/ceph/cache.c +@@ -224,13 +224,7 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) + fscache_relinquish_cookie(cookie, 0); + } + +-static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) +-{ +- if (!error) +- SetPageUptodate(page); +-} +- +-static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error) ++static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error) + { + if (!error) + SetPageUptodate(page); +@@ -259,7 +253,7 @@ int ceph_readpage_from_fscache(struct inode *inode, struct page *page) + return -ENOBUFS; + + ret = fscache_read_or_alloc_page(ci->fscache, page, +- ceph_vfs_readpage_complete, NULL, ++ ceph_readpage_from_fscache_complete, NULL, + GFP_KERNEL); + + switch (ret) { +@@ -288,7 +282,7 @@ int ceph_readpages_from_fscache(struct inode *inode, + return -ENOBUFS; + + ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages, +- ceph_vfs_readpage_complete_unlock, ++ ceph_readpage_from_fscache_complete, + NULL, mapping_gfp_mask(mapping)); + + switch (ret) { +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index fa8df3fef6fc..297e05c9e2b0 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -194,7 +194,7 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon) + int i; + + if (unlikely(direntry->d_name.len > +- tcon->fsAttrInfo.MaxPathNameComponentLength)) ++ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength))) + return -ENAMETOOLONG; + + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h +index b8f553b32dda..aacb15bd56fe 100644 +--- a/fs/cifs/smb2pdu.h ++++ b/fs/cifs/smb2pdu.h +@@ -82,8 +82,8 @@ + + #define NUMBER_OF_SMB2_COMMANDS 0x0013 + +-/* BB FIXME - analyze following length BB */ +-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */ ++/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */ ++#define MAX_SMB2_HDR_SIZE 0x00b0 + + #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) + +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 1e009cad8d5c..1b08556776ce 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -518,8 +518,13 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq) + wait_queue_head_t *whead; + + rcu_read_lock(); +- /* If it is cleared by POLLFREE, it should be rcu-safe */ +- whead = rcu_dereference(pwq->whead); ++ /* ++ * If it is cleared by POLLFREE, it should be rcu-safe. ++ * If we read NULL we need a barrier paired with ++ * smp_store_release() in ep_poll_callback(), otherwise ++ * we rely on whead->lock. ++ */ ++ whead = smp_load_acquire(&pwq->whead); + if (whead) + remove_wait_queue(whead, &pwq->wait); + rcu_read_unlock(); +@@ -1003,17 +1008,6 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k + struct epitem *epi = ep_item_from_wait(wait); + struct eventpoll *ep = epi->ep; + +- if ((unsigned long)key & POLLFREE) { +- ep_pwq_from_wait(wait)->whead = NULL; +- /* +- * whead = NULL above can race with ep_remove_wait_queue() +- * which can do another remove_wait_queue() after us, so we +- * can't use __remove_wait_queue(). whead->lock is held by +- * the caller. +- */ +- list_del_init(&wait->task_list); +- } +- + spin_lock_irqsave(&ep->lock, flags); + + /* +@@ -1078,6 +1072,23 @@ out_unlock: + if (pwake) + ep_poll_safewake(&ep->poll_wait); + ++ ++ if ((unsigned long)key & POLLFREE) { ++ /* ++ * If we race with ep_remove_wait_queue() it can miss ++ * ->whead = NULL and do another remove_wait_queue() after ++ * us, so we can't use __remove_wait_queue(). ++ */ ++ list_del_init(&wait->task_list); ++ /* ++ * ->whead != NULL protects us from the race with ep_free() ++ * or ep_remove(), ep_remove_wait_queue() takes whead->lock ++ * held by the caller. Once we nullify it, nothing protects ++ * ep/epi or even wait. ++ */ ++ smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL); ++ } ++ + return 1; + } + +diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h +index fc824e2828f3..5d2add1a6c96 100644 +--- a/include/asm-generic/topology.h ++++ b/include/asm-generic/topology.h +@@ -48,7 +48,11 @@ + #define parent_node(node) ((void)(node),0) + #endif + #ifndef cpumask_of_node +-#define cpumask_of_node(node) ((void)node, cpu_online_mask) ++ #ifdef CONFIG_NEED_MULTIPLE_NODES ++ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) ++ #else ++ #define cpumask_of_node(node) ((void)node, cpu_online_mask) ++ #endif + #endif + #ifndef pcibus_to_node + #define pcibus_to_node(bus) ((void)(bus), -1) +diff --git a/kernel/cpuset.c b/kernel/cpuset.c +index 8ccd66a97c8b..2924b6faa469 100644 +--- a/kernel/cpuset.c ++++ b/kernel/cpuset.c +@@ -1910,6 +1910,7 @@ static struct cftype files[] = { + { + .name = "memory_pressure", + .read_u64 = cpuset_read_u64, ++ .private = FILE_MEMORY_PRESSURE, + }, + + { +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 77055a362041..0e01250f2072 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -3275,9 +3275,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, + struct xfrm_state *x_new[XFRM_MAX_DEPTH]; + struct xfrm_migrate *mp; + ++ /* Stage 0 - sanity checks */ + if ((err = xfrm_migrate_check(m, num_migrate)) < 0) + goto out; + ++ if (dir >= XFRM_POLICY_MAX) { ++ err = -EINVAL; ++ goto out; ++ } ++ + /* Stage 1 - find policy */ + if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { + err = -ENOENT;