From 21946971fa13f9be2fba319ebf1d47e596b7aa7f Mon Sep 17 00:00:00 2001 From: Igor Pecovnik Date: Wed, 16 Mar 2016 19:42:28 +0100 Subject: [PATCH 1/2] Upstream patches for Udoo / Neo / Cubox / Clearfog / Odroid XU4 legacy kernels: 3.14.65 / 3.10.101 --- .../cubox-default/patch-3.14.61-62.patch | 2180 +++++++++ .../cubox-default/patch-3.14.62-63.patch | 4346 +++++++++++++++++ .../cubox-default/patch-3.14.63-64.patch | 2749 +++++++++++ .../cubox-default/patch-3.14.64-65.patch | 1518 ++++++ .../marvell-default/patch-3.10.96-97.patch | 2341 +++++++++ .../marvell-default/patch-3.10.97-98.patch | 1792 +++++++ .../marvell-default/patch-3.10.98-99.patch | 2573 ++++++++++ .../marvell-default/patch-3.10.99-100.patch | 1156 +++++ .../marvell-default/rpatch-3.10.100-101.patch | 1493 ++++++ .../kernel/neo-default/patch-3.14.63-64.patch | 2749 +++++++++++ .../kernel/neo-default/patch-3.14.64-65.patch | 1518 ++++++ .../odroidxu4-default/patch-3.10.98-99.patch | 2573 ++++++++++ .../odroidxu4-default/patch-3.10.99-100.patch | 1156 +++++ .../rpatch-3.10.100-101.patch | 1493 ++++++ .../udoo-default/patch-3.14.63-64.patch | 2749 +++++++++++ .../udoo-default/patch-3.14.64-65.patch | 1518 ++++++ 16 files changed, 33904 insertions(+) create mode 100644 patch/kernel/cubox-default/patch-3.14.61-62.patch create mode 100644 patch/kernel/cubox-default/patch-3.14.62-63.patch create mode 100644 patch/kernel/cubox-default/patch-3.14.63-64.patch create mode 100644 patch/kernel/cubox-default/patch-3.14.64-65.patch create mode 100644 patch/kernel/marvell-default/patch-3.10.96-97.patch create mode 100644 patch/kernel/marvell-default/patch-3.10.97-98.patch create mode 100644 patch/kernel/marvell-default/patch-3.10.98-99.patch create mode 100644 patch/kernel/marvell-default/patch-3.10.99-100.patch create mode 100644 patch/kernel/marvell-default/rpatch-3.10.100-101.patch create mode 100644 patch/kernel/neo-default/patch-3.14.63-64.patch create mode 100644 patch/kernel/neo-default/patch-3.14.64-65.patch create mode 100644 patch/kernel/odroidxu4-default/patch-3.10.98-99.patch create mode 100644 patch/kernel/odroidxu4-default/patch-3.10.99-100.patch create mode 100644 patch/kernel/odroidxu4-default/rpatch-3.10.100-101.patch create mode 100644 patch/kernel/udoo-default/patch-3.14.63-64.patch create mode 100644 patch/kernel/udoo-default/patch-3.14.64-65.patch diff --git a/patch/kernel/cubox-default/patch-3.14.61-62.patch b/patch/kernel/cubox-default/patch-3.14.61-62.patch new file mode 100644 index 000000000..0ff567447 --- /dev/null +++ b/patch/kernel/cubox-default/patch-3.14.61-62.patch @@ -0,0 +1,2180 @@ +diff --git a/Makefile b/Makefile +index fbf4ec689957..b738f644c71e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 61 ++SUBLEVEL = 62 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/arm/boot/dts/kirkwood-ts219.dtsi b/arch/arm/boot/dts/kirkwood-ts219.dtsi +index 911f3a8cee23..f4227da77265 100644 +--- a/arch/arm/boot/dts/kirkwood-ts219.dtsi ++++ b/arch/arm/boot/dts/kirkwood-ts219.dtsi +@@ -47,7 +47,7 @@ + }; + poweroff@12100 { + compatible = "qnap,power-off"; +- reg = <0x12000 0x100>; ++ reg = <0x12100 0x100>; + clocks = <&gate_clk 7>; + }; + spi@10600 { +diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c +index 2dc6da70ae59..d7ed252708c5 100644 +--- a/arch/arm/common/icst.c ++++ b/arch/arm/common/icst.c +@@ -16,7 +16,7 @@ + */ + #include + #include +- ++#include + #include + + /* +@@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div); + + unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco) + { +- return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]); ++ u64 dividend = p->ref * 2 * (u64)(vco.v + 8); ++ u32 divisor = (vco.r + 2) * p->s2div[vco.s]; ++ ++ do_div(dividend, divisor); ++ return (unsigned long)dividend; + } + + EXPORT_SYMBOL(icst_hz); +@@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq) + + if (f > p->vco_min && f <= p->vco_max) + break; ++ i++; + } while (i < 8); + + if (i >= 8) +diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S +index fdedc31e0f40..093eff182d63 100644 +--- a/arch/arm/mm/proc-v7.S ++++ b/arch/arm/mm/proc-v7.S +@@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area) + .equ cpu_v7_suspend_size, 4 * 9 + #ifdef CONFIG_ARM_CPU_SUSPEND + ENTRY(cpu_v7_do_suspend) +- stmfd sp!, {r4 - r10, lr} ++ stmfd sp!, {r4 - r11, lr} + mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID + mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID + stmia r0!, {r4 - r5} +@@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend) + mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register + mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control + stmia r0, {r5 - r11} +- ldmfd sp!, {r4 - r10, pc} ++ ldmfd sp!, {r4 - r11, pc} + ENDPROC(cpu_v7_do_suspend) + + ENTRY(cpu_v7_do_resume) +diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c +index 0392112a5d70..a5ecef7188ba 100644 +--- a/arch/m32r/kernel/setup.c ++++ b/arch/m32r/kernel/setup.c +@@ -81,7 +81,10 @@ static struct resource code_resource = { + }; + + unsigned long memory_start; ++EXPORT_SYMBOL(memory_start); ++ + unsigned long memory_end; ++EXPORT_SYMBOL(memory_end); + + void __init setup_arch(char **); + int get_cpuinfo(char *); +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +index 818dce344e82..b3bad672e5d9 100644 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +@@ -1805,7 +1805,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + + /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ + 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW +- rlwimi r5, r4, 1, DAWRX_WT ++ rlwimi r5, r4, 2, DAWRX_WT + clrrdi r4, r4, 3 + std r4, VCPU_DAWR(r3) + std r5, VCPU_DAWRX(r3) +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c +index fed892de9baf..bdd2a44fbb1a 100644 +--- a/arch/x86/mm/pageattr.c ++++ b/arch/x86/mm/pageattr.c +@@ -33,7 +33,7 @@ struct cpa_data { + pgd_t *pgd; + pgprot_t mask_set; + pgprot_t mask_clr; +- int numpages; ++ unsigned long numpages; + int flags; + unsigned long pfn; + unsigned force_split : 1; +@@ -1289,7 +1289,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) + * CPA operation. Either a large page has been + * preserved or a single page update happened. + */ +- BUG_ON(cpa->numpages > numpages); ++ BUG_ON(cpa->numpages > numpages || !cpa->numpages); + numpages -= cpa->numpages; + if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) + cpa->curpage++; +diff --git a/block/blk-core.c b/block/blk-core.c +index e45b321cf6a0..46be0cddf819 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -3164,6 +3164,9 @@ int blk_pre_runtime_suspend(struct request_queue *q) + { + int ret = 0; + ++ if (!q->dev) ++ return ret; ++ + spin_lock_irq(q->queue_lock); + if (q->nr_pending) { + ret = -EBUSY; +@@ -3191,6 +3194,9 @@ EXPORT_SYMBOL(blk_pre_runtime_suspend); + */ + void blk_post_runtime_suspend(struct request_queue *q, int err) + { ++ if (!q->dev) ++ return; ++ + spin_lock_irq(q->queue_lock); + if (!err) { + q->rpm_status = RPM_SUSPENDED; +@@ -3215,6 +3221,9 @@ EXPORT_SYMBOL(blk_post_runtime_suspend); + */ + void blk_pre_runtime_resume(struct request_queue *q) + { ++ if (!q->dev) ++ return; ++ + spin_lock_irq(q->queue_lock); + q->rpm_status = RPM_RESUMING; + spin_unlock_irq(q->queue_lock); +@@ -3237,6 +3246,9 @@ EXPORT_SYMBOL(blk_pre_runtime_resume); + */ + void blk_post_runtime_resume(struct request_queue *q, int err) + { ++ if (!q->dev) ++ return; ++ + spin_lock_irq(q->queue_lock); + if (!err) { + q->rpm_status = RPM_ACTIVE; +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index de5ab4876a89..951ecd52226c 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -3655,6 +3655,7 @@ unlock: + int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) + { ++ struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_caching *args = data; + struct drm_i915_gem_object *obj; + enum i915_cache_level level; +@@ -3674,9 +3675,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, + return -EINVAL; + } + ++ intel_runtime_pm_get(dev_priv); ++ + ret = i915_mutex_lock_interruptible(dev); + if (ret) +- return ret; ++ goto rpm_put; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { +@@ -3689,6 +3692,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, + drm_gem_object_unreference(&obj->base); + unlock: + mutex_unlock(&dev->struct_mutex); ++rpm_put: ++ intel_runtime_pm_put(dev_priv); ++ + return ret; + } + +diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c +index 4dddeabdfbb0..5da07546e182 100644 +--- a/drivers/iio/adc/ad7793.c ++++ b/drivers/iio/adc/ad7793.c +@@ -101,7 +101,7 @@ + #define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ + + /* ID Register Bit Designations (AD7793_REG_ID) */ +-#define AD7785_ID 0xB ++#define AD7785_ID 0x3 + #define AD7792_ID 0xA + #define AD7793_ID 0xB + #define AD7794_ID 0xF +diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c +index f03b92fd3803..aac16fecdfa8 100644 +--- a/drivers/iio/dac/ad5064.c ++++ b/drivers/iio/dac/ad5064.c +@@ -113,12 +113,16 @@ enum ad5064_type { + ID_AD5065, + ID_AD5628_1, + ID_AD5628_2, ++ ID_AD5629_1, ++ ID_AD5629_2, + ID_AD5648_1, + ID_AD5648_2, + ID_AD5666_1, + ID_AD5666_2, + ID_AD5668_1, + ID_AD5668_2, ++ ID_AD5669_1, ++ ID_AD5669_2, + }; + + static int ad5064_write(struct ad5064_state *st, unsigned int cmd, +@@ -291,7 +295,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { + { }, + }; + +-#define AD5064_CHANNEL(chan, addr, bits) { \ ++#define AD5064_CHANNEL(chan, addr, bits, _shift) { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .output = 1, \ +@@ -303,36 +307,39 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { + .sign = 'u', \ + .realbits = (bits), \ + .storagebits = 16, \ +- .shift = 20 - bits, \ ++ .shift = (_shift), \ + }, \ + .ext_info = ad5064_ext_info, \ + } + +-#define DECLARE_AD5064_CHANNELS(name, bits) \ ++#define DECLARE_AD5064_CHANNELS(name, bits, shift) \ + const struct iio_chan_spec name[] = { \ +- AD5064_CHANNEL(0, 0, bits), \ +- AD5064_CHANNEL(1, 1, bits), \ +- AD5064_CHANNEL(2, 2, bits), \ +- AD5064_CHANNEL(3, 3, bits), \ +- AD5064_CHANNEL(4, 4, bits), \ +- AD5064_CHANNEL(5, 5, bits), \ +- AD5064_CHANNEL(6, 6, bits), \ +- AD5064_CHANNEL(7, 7, bits), \ ++ AD5064_CHANNEL(0, 0, bits, shift), \ ++ AD5064_CHANNEL(1, 1, bits, shift), \ ++ AD5064_CHANNEL(2, 2, bits, shift), \ ++ AD5064_CHANNEL(3, 3, bits, shift), \ ++ AD5064_CHANNEL(4, 4, bits, shift), \ ++ AD5064_CHANNEL(5, 5, bits, shift), \ ++ AD5064_CHANNEL(6, 6, bits, shift), \ ++ AD5064_CHANNEL(7, 7, bits, shift), \ + } + +-#define DECLARE_AD5065_CHANNELS(name, bits) \ ++#define DECLARE_AD5065_CHANNELS(name, bits, shift) \ + const struct iio_chan_spec name[] = { \ +- AD5064_CHANNEL(0, 0, bits), \ +- AD5064_CHANNEL(1, 3, bits), \ ++ AD5064_CHANNEL(0, 0, bits, shift), \ ++ AD5064_CHANNEL(1, 3, bits, shift), \ + } + +-static DECLARE_AD5064_CHANNELS(ad5024_channels, 12); +-static DECLARE_AD5064_CHANNELS(ad5044_channels, 14); +-static DECLARE_AD5064_CHANNELS(ad5064_channels, 16); ++static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8); ++static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6); ++static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4); + +-static DECLARE_AD5065_CHANNELS(ad5025_channels, 12); +-static DECLARE_AD5065_CHANNELS(ad5045_channels, 14); +-static DECLARE_AD5065_CHANNELS(ad5065_channels, 16); ++static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8); ++static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6); ++static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4); ++ ++static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4); ++static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0); + + static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { + [ID_AD5024] = { +@@ -382,6 +389,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { + .channels = ad5024_channels, + .num_channels = 8, + }, ++ [ID_AD5629_1] = { ++ .shared_vref = true, ++ .internal_vref = 2500000, ++ .channels = ad5629_channels, ++ .num_channels = 8, ++ }, ++ [ID_AD5629_2] = { ++ .shared_vref = true, ++ .internal_vref = 5000000, ++ .channels = ad5629_channels, ++ .num_channels = 8, ++ }, + [ID_AD5648_1] = { + .shared_vref = true, + .internal_vref = 2500000, +@@ -418,6 +437,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { + .channels = ad5064_channels, + .num_channels = 8, + }, ++ [ID_AD5669_1] = { ++ .shared_vref = true, ++ .internal_vref = 2500000, ++ .channels = ad5669_channels, ++ .num_channels = 8, ++ }, ++ [ID_AD5669_2] = { ++ .shared_vref = true, ++ .internal_vref = 5000000, ++ .channels = ad5669_channels, ++ .num_channels = 8, ++ }, + }; + + static inline unsigned int ad5064_num_vref(struct ad5064_state *st) +@@ -598,10 +629,16 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd, + unsigned int addr, unsigned int val) + { + struct i2c_client *i2c = to_i2c_client(st->dev); ++ int ret; + + st->data.i2c[0] = (cmd << 4) | addr; + put_unaligned_be16(val, &st->data.i2c[1]); +- return i2c_master_send(i2c, st->data.i2c, 3); ++ ++ ret = i2c_master_send(i2c, st->data.i2c, 3); ++ if (ret < 0) ++ return ret; ++ ++ return 0; + } + + static int ad5064_i2c_probe(struct i2c_client *i2c, +@@ -617,12 +654,12 @@ static int ad5064_i2c_remove(struct i2c_client *i2c) + } + + static const struct i2c_device_id ad5064_i2c_ids[] = { +- {"ad5629-1", ID_AD5628_1}, +- {"ad5629-2", ID_AD5628_2}, +- {"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */ +- {"ad5669-1", ID_AD5668_1}, +- {"ad5669-2", ID_AD5668_2}, +- {"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */ ++ {"ad5629-1", ID_AD5629_1}, ++ {"ad5629-2", ID_AD5629_2}, ++ {"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */ ++ {"ad5669-1", ID_AD5669_1}, ++ {"ad5669-2", ID_AD5669_2}, ++ {"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */ + {} + }; + MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids); +diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c +index 7d9f5c31d2fc..0cedc81be312 100644 +--- a/drivers/iio/dac/mcp4725.c ++++ b/drivers/iio/dac/mcp4725.c +@@ -301,6 +301,7 @@ static int mcp4725_probe(struct i2c_client *client, + data->client = client; + + indio_dev->dev.parent = &client->dev; ++ indio_dev->name = id->name; + indio_dev->info = &mcp4725_info; + indio_dev->channels = &mcp4725_channel; + indio_dev->num_channels = 1; +diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c +index cb32b593f1c5..36607d52fee0 100644 +--- a/drivers/iio/imu/adis_buffer.c ++++ b/drivers/iio/imu/adis_buffer.c +@@ -43,7 +43,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, + return -ENOMEM; + + rx = adis->buffer; +- tx = rx + indio_dev->scan_bytes; ++ tx = rx + scan_count; + + spi_message_init(&adis->msg); + +diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c +index 37b52bd44f86..a9164b51b5dc 100644 +--- a/drivers/iio/industrialio-buffer.c ++++ b/drivers/iio/industrialio-buffer.c +@@ -830,7 +830,7 @@ int iio_scan_mask_set(struct iio_dev *indio_dev, + if (trialmask == NULL) + return -ENOMEM; + if (!indio_dev->masklength) { +- WARN_ON("Trying to set scanmask prior to registering buffer\n"); ++ WARN(1, "Trying to set scanmask prior to registering buffer\n"); + goto err_invalid_mask; + } + bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); +diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c +index acc911a836ca..a7b073b5e263 100644 +--- a/drivers/iio/industrialio-core.c ++++ b/drivers/iio/industrialio-core.c +@@ -589,7 +589,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr, + break; + case IIO_SEPARATE: + if (!chan->indexed) { +- WARN_ON("Differential channels must be indexed\n"); ++ WARN(1, "Differential channels must be indexed\n"); + ret = -EINVAL; + goto error_free_full_postfix; + } +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c +index 94eaaf0c49b3..9e497e4041a6 100644 +--- a/drivers/input/mouse/elantech.c ++++ b/drivers/input/mouse/elantech.c +@@ -1111,7 +1111,7 @@ static int elantech_set_input_params(struct psmouse *psmouse) + input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2, + ETP_WMAX_V2, 0, 0); + } +- input_mt_init_slots(dev, 2, 0); ++ input_mt_init_slots(dev, 2, INPUT_MT_SEMI_MT); + input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); + input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); + break; +@@ -1387,6 +1387,13 @@ static const struct dmi_system_id no_hw_res_dmi_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "U2442"), + }, + }, ++ { ++ /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"), ++ }, ++ }, + #endif + { } + }; +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index dd6d14d2337f..a4baf9677a45 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -258,6 +258,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { + }, + }, + { ++ /* Fujitsu Lifebook U745 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"), ++ }, ++ }, ++ { + /* Fujitsu T70H */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c +index 158156543410..2d7bb594626c 100644 +--- a/drivers/iommu/dmar.c ++++ b/drivers/iommu/dmar.c +@@ -986,7 +986,7 @@ void dmar_disable_qi(struct intel_iommu *iommu) + + raw_spin_lock_irqsave(&iommu->register_lock, flags); + +- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); ++ sts = readl(iommu->reg + DMAR_GSTS_REG); + if (!(sts & DMA_GSTS_QIES)) + goto end; + +diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c +index ef5f65dbafe9..ffad080db013 100644 +--- a/drivers/iommu/intel_irq_remapping.c ++++ b/drivers/iommu/intel_irq_remapping.c +@@ -489,7 +489,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu) + + raw_spin_lock_irqsave(&iommu->register_lock, flags); + +- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); ++ sts = readl(iommu->reg + DMAR_GSTS_REG); + if (!(sts & DMA_GSTS_IRES)) + goto end; + +diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c +index 5895f1978691..e98de425f8e0 100644 +--- a/drivers/net/wan/x25_asy.c ++++ b/drivers/net/wan/x25_asy.c +@@ -545,16 +545,12 @@ static void x25_asy_receive_buf(struct tty_struct *tty, + + static int x25_asy_open_tty(struct tty_struct *tty) + { +- struct x25_asy *sl = tty->disc_data; ++ struct x25_asy *sl; + int err; + + if (tty->ops->write == NULL) + return -EOPNOTSUPP; + +- /* First make sure we're not already connected. */ +- if (sl && sl->magic == X25_ASY_MAGIC) +- return -EEXIST; +- + /* OK. Find a free X.25 channel to use. */ + sl = x25_asy_alloc(); + if (sl == NULL) +diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c +index aaac3594f83b..452b637be676 100644 +--- a/drivers/phy/phy-twl4030-usb.c ++++ b/drivers/phy/phy-twl4030-usb.c +@@ -777,6 +777,7 @@ static int twl4030_usb_remove(struct platform_device *pdev) + struct twl4030_usb *twl = platform_get_drvdata(pdev); + int val; + ++ usb_remove_phy(&twl->phy); + pm_runtime_get_sync(twl->dev); + cancel_delayed_work(&twl->id_workaround_work); + device_remove_file(twl->dev, &dev_attr_vbus); +diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c +index 02bc5a6343c3..aa454241489c 100644 +--- a/drivers/platform/x86/intel_scu_ipcutil.c ++++ b/drivers/platform/x86/intel_scu_ipcutil.c +@@ -49,7 +49,7 @@ struct scu_ipc_data { + + static int scu_reg_access(u32 cmd, struct scu_ipc_data *data) + { +- int count = data->count; ++ unsigned int count = data->count; + + if (count == 0 || count == 3 || count > 4) + return -EINVAL; +diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c +index 4b9cf93f3fb6..d233170cd439 100644 +--- a/drivers/scsi/device_handler/scsi_dh_rdac.c ++++ b/drivers/scsi/device_handler/scsi_dh_rdac.c +@@ -569,7 +569,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev, + /* + * Command Lock contention + */ +- err = SCSI_DH_RETRY; ++ err = SCSI_DH_IMM_RETRY; + break; + default: + break; +@@ -619,6 +619,8 @@ retry: + err = mode_select_handle_sense(sdev, h->sense); + if (err == SCSI_DH_RETRY && retry_cnt--) + goto retry; ++ if (err == SCSI_DH_IMM_RETRY) ++ goto retry; + } + if (err == SCSI_DH_OK) { + h->state = RDAC_STATE_ACTIVE; +diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c +index f28ea070d3df..bd5fd3b7e178 100644 +--- a/drivers/scsi/hosts.c ++++ b/drivers/scsi/hosts.c +@@ -308,6 +308,17 @@ static void scsi_host_dev_release(struct device *dev) + kfree(queuedata); + } + ++ if (shost->shost_state == SHOST_CREATED) { ++ /* ++ * Free the shost_dev device name here if scsi_host_alloc() ++ * and scsi_host_put() have been called but neither ++ * scsi_host_add() nor scsi_host_remove() has been called. ++ * This avoids that the memory allocated for the shost_dev ++ * name is leaked. ++ */ ++ kfree(dev_name(&shost->shost_dev)); ++ } ++ + scsi_destroy_command_freelist(shost); + if (shost->bqt) + blk_free_tags(shost->bqt); +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c +index 262ab837a704..8790e8640acd 100644 +--- a/drivers/scsi/scsi_devinfo.c ++++ b/drivers/scsi/scsi_devinfo.c +@@ -205,6 +205,7 @@ static struct { + {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, + {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, + {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, ++ {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES}, + {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, + {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, +diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c +index a59be67b92d5..88e136c716e9 100644 +--- a/drivers/scsi/scsi_pm.c ++++ b/drivers/scsi/scsi_pm.c +@@ -151,13 +151,13 @@ static int sdev_runtime_suspend(struct device *dev) + struct scsi_device *sdev = to_scsi_device(dev); + int err = 0; + +- if (pm && pm->runtime_suspend) { +- err = blk_pre_runtime_suspend(sdev->request_queue); +- if (err) +- return err; ++ err = blk_pre_runtime_suspend(sdev->request_queue); ++ if (err) ++ return err; ++ if (pm && pm->runtime_suspend) + err = pm->runtime_suspend(dev); +- blk_post_runtime_suspend(sdev->request_queue, err); +- } ++ blk_post_runtime_suspend(sdev->request_queue, err); ++ + return err; + } + +@@ -180,11 +180,11 @@ static int sdev_runtime_resume(struct device *dev) + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int err = 0; + +- if (pm && pm->runtime_resume) { +- blk_pre_runtime_resume(sdev->request_queue); ++ blk_pre_runtime_resume(sdev->request_queue); ++ if (pm && pm->runtime_resume) + err = pm->runtime_resume(dev); +- blk_post_runtime_resume(sdev->request_queue, err); +- } ++ blk_post_runtime_resume(sdev->request_queue, err); ++ + return err; + } + +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index 665acbf83693..5706bb89f6a0 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -882,7 +882,7 @@ sdev_store_queue_ramp_up_period(struct device *dev, + return -EINVAL; + + sdev->queue_ramp_up_period = msecs_to_jiffies(period); +- return period; ++ return count; + } + + static struct device_attribute sdev_attr_queue_ramp_up_period = +@@ -1123,31 +1123,25 @@ static void __scsi_remove_target(struct scsi_target *starget) + void scsi_remove_target(struct device *dev) + { + struct Scsi_Host *shost = dev_to_shost(dev->parent); +- struct scsi_target *starget, *last = NULL; ++ struct scsi_target *starget, *last_target = NULL; + unsigned long flags; + +- /* remove targets being careful to lookup next entry before +- * deleting the last +- */ ++restart: + spin_lock_irqsave(shost->host_lock, flags); + list_for_each_entry(starget, &shost->__targets, siblings) { +- if (starget->state == STARGET_DEL) ++ if (starget->state == STARGET_DEL || ++ starget == last_target) + continue; + if (starget->dev.parent == dev || &starget->dev == dev) { +- /* assuming new targets arrive at the end */ + kref_get(&starget->reap_ref); ++ last_target = starget; + spin_unlock_irqrestore(shost->host_lock, flags); +- if (last) +- scsi_target_reap(last); +- last = starget; + __scsi_remove_target(starget); +- spin_lock_irqsave(shost->host_lock, flags); ++ scsi_target_reap(starget); ++ goto restart; + } + } + spin_unlock_irqrestore(shost->host_lock, flags); +- +- if (last) +- scsi_target_reap(last); + } + EXPORT_SYMBOL(scsi_remove_target); + +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index a10706409927..614531c14777 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -3129,8 +3129,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) + struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); + int ret = 0; + +- if (!sdkp) +- return 0; /* this can happen */ ++ if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ ++ return 0; + + if (sdkp->WCE && sdkp->media_present) { + sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); +@@ -3171,6 +3171,9 @@ static int sd_resume(struct device *dev) + struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); + int ret = 0; + ++ if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ ++ return 0; ++ + if (!sdkp->device->manage_start_stop) + goto done; + +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 721d839d6c54..0be16bf5f0cd 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -1258,7 +1258,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) + } + + sfp->mmap_called = 1; +- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; ++ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_private_data = sfp; + vma->vm_ops = &sg_mmap_vm_ops; + return 0; +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c +index 40d85929aefe..2d3d11021bb6 100644 +--- a/drivers/scsi/sr.c ++++ b/drivers/scsi/sr.c +@@ -142,6 +142,9 @@ static int sr_runtime_suspend(struct device *dev) + { + struct scsi_cd *cd = dev_get_drvdata(dev); + ++ if (!cd) /* E.g.: runtime suspend following sr_remove() */ ++ return 0; ++ + if (cd->media_present) + return -EBUSY; + else +@@ -995,6 +998,7 @@ static int sr_remove(struct device *dev) + + blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn); + del_gendisk(cd->disk); ++ dev_set_drvdata(dev, NULL); + + mutex_lock(&sr_ref_mutex); + kref_put(&cd->kref, sr_kref_release); +diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c +index a876ce755351..22818550efd3 100644 +--- a/drivers/staging/iio/adc/lpc32xx_adc.c ++++ b/drivers/staging/iio/adc/lpc32xx_adc.c +@@ -76,7 +76,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev, + + if (mask == IIO_CHAN_INFO_RAW) { + mutex_lock(&indio_dev->mlock); +- clk_enable(info->clk); ++ clk_prepare_enable(info->clk); + /* Measurement setup */ + __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm, + LPC32XX_ADC_SELECT(info->adc_base)); +@@ -84,7 +84,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev, + __raw_writel(AD_PDN_CTRL | AD_STROBE, + LPC32XX_ADC_CTRL(info->adc_base)); + wait_for_completion(&info->completion); /* set by ISR */ +- clk_disable(info->clk); ++ clk_disable_unprepare(info->clk); + *val = info->value; + mutex_unlock(&indio_dev->mlock); + +diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c +index ca04d3669acc..34a6deef1d6c 100644 +--- a/drivers/staging/speakup/selection.c ++++ b/drivers/staging/speakup/selection.c +@@ -140,7 +140,9 @@ static void __speakup_paste_selection(struct work_struct *work) + struct tty_ldisc *ld; + DECLARE_WAITQUEUE(wait, current); + +- ld = tty_ldisc_ref_wait(tty); ++ ld = tty_ldisc_ref(tty); ++ if (!ld) ++ goto tty_unref; + tty_buffer_lock_exclusive(&vc->port); + + add_wait_queue(&vc->paste_wait, &wait); +@@ -160,6 +162,7 @@ static void __speakup_paste_selection(struct work_struct *work) + + tty_buffer_unlock_exclusive(&vc->port); + tty_ldisc_deref(ld); ++tty_unref: + tty_kref_put(tty); + } + +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 9dbf17671439..c066e6e298c3 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -4027,6 +4027,17 @@ reject: + return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + } + ++static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) ++{ ++ bool ret; ++ ++ spin_lock_bh(&conn->state_lock); ++ ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN); ++ spin_unlock_bh(&conn->state_lock); ++ ++ return ret; ++} ++ + int iscsi_target_rx_thread(void *arg) + { + int ret, rc; +@@ -4044,7 +4055,7 @@ int iscsi_target_rx_thread(void *arg) + * incoming iscsi/tcp socket I/O, and/or failing the connection. + */ + rc = wait_for_completion_interruptible(&conn->rx_login_comp); +- if (rc < 0) ++ if (rc < 0 || iscsi_target_check_conn_state(conn)) + return 0; + + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { +diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c +index 1c0088fe9e99..83465617ad6d 100644 +--- a/drivers/target/iscsi/iscsi_target_configfs.c ++++ b/drivers/target/iscsi/iscsi_target_configfs.c +@@ -1876,7 +1876,8 @@ static void lio_tpg_release_fabric_acl( + } + + /* +- * Called with spin_lock_bh(struct se_portal_group->session_lock) held.. ++ * Called with spin_lock_irq(struct se_portal_group->session_lock) held ++ * or not held. + * + * Also, this function calls iscsit_inc_session_usage_count() on the + * struct iscsi_session in question. +@@ -1884,19 +1885,32 @@ static void lio_tpg_release_fabric_acl( + static int lio_tpg_shutdown_session(struct se_session *se_sess) + { + struct iscsi_session *sess = se_sess->fabric_sess_ptr; ++ struct se_portal_group *se_tpg = se_sess->se_tpg; ++ bool local_lock = false; ++ ++ if (!spin_is_locked(&se_tpg->session_lock)) { ++ spin_lock_irq(&se_tpg->session_lock); ++ local_lock = true; ++ } + + spin_lock(&sess->conn_lock); + if (atomic_read(&sess->session_fall_back_to_erl0) || + atomic_read(&sess->session_logout) || + (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { + spin_unlock(&sess->conn_lock); ++ if (local_lock) ++ spin_unlock_irq(&sess->conn_lock); + return 0; + } + atomic_set(&sess->session_reinstatement, 1); + spin_unlock(&sess->conn_lock); + + iscsit_stop_time2retain_timer(sess); ++ spin_unlock_irq(&se_tpg->session_lock); ++ + iscsit_stop_session(sess, 1, 1); ++ if (!local_lock) ++ spin_lock_irq(&se_tpg->session_lock); + + return 1; + } +diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c +index 25ad113c5978..abbac7fe64e2 100644 +--- a/drivers/target/iscsi/iscsi_target_nego.c ++++ b/drivers/target/iscsi/iscsi_target_nego.c +@@ -393,6 +393,7 @@ err: + if (login->login_complete) { + if (conn->rx_thread && conn->rx_thread_active) { + send_sig(SIGINT, conn->rx_thread, 1); ++ complete(&conn->rx_login_comp); + kthread_stop(conn->rx_thread); + } + if (conn->tx_thread && conn->tx_thread_active) { +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index e49616eeb1cc..c3f9b9920d8d 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -617,7 +617,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) + /* this is called once with whichever end is closed last */ + static void pty_unix98_shutdown(struct tty_struct *tty) + { +- devpts_kill_index(tty->driver_data, tty->index); ++ struct inode *ptmx_inode; ++ ++ if (tty->driver->subtype == PTY_TYPE_MASTER) ++ ptmx_inode = tty->driver_data; ++ else ++ ptmx_inode = tty->link->driver_data; ++ devpts_kill_index(ptmx_inode, tty->index); ++ devpts_del_ref(ptmx_inode); + } + + static const struct tty_operations ptm_unix98_ops = { +@@ -708,6 +715,18 @@ static int ptmx_open(struct inode *inode, struct file *filp) + set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ + tty->driver_data = inode; + ++ /* ++ * In the case where all references to ptmx inode are dropped and we ++ * still have /dev/tty opened pointing to the master/slave pair (ptmx ++ * is closed/released before /dev/tty), we must make sure that the inode ++ * is still valid when we call the final pty_unix98_shutdown, thus we ++ * hold an additional reference to the ptmx inode. For the same /dev/tty ++ * last close case, we also need to make sure the super_block isn't ++ * destroyed (devpts instance unmounted), before /dev/tty is closed and ++ * on its release devpts_kill_index is called. ++ */ ++ devpts_add_ref(inode); ++ + tty_add_file(tty, filp); + + slave_inode = devpts_pty_new(inode, +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index 996f8f3fd423..1e1fed47eabc 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -1613,6 +1613,9 @@ static int pci_eg20t_init(struct pci_dev *dev) + #endif + } + ++#define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358 ++#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358 ++ + static int + pci_xr17c154_setup(struct serial_private *priv, + const struct pciserial_board *board, +@@ -1622,6 +1625,15 @@ pci_xr17c154_setup(struct serial_private *priv, + return pci_default_setup(priv, board, port, idx); + } + ++static inline int ++xr17v35x_has_slave(struct serial_private *priv) ++{ ++ const int dev_id = priv->dev->device; ++ ++ return ((dev_id == PCI_DEVICE_ID_EXAR_XR17V4358) || ++ (dev_id == PCI_DEVICE_ID_EXAR_XR17V8358)); ++} ++ + static int + pci_xr17v35x_setup(struct serial_private *priv, + const struct pciserial_board *board, +@@ -1636,6 +1648,13 @@ pci_xr17v35x_setup(struct serial_private *priv, + port->port.flags |= UPF_EXAR_EFR; + + /* ++ * Setup the uart clock for the devices on expansion slot to ++ * half the clock speed of the main chip (which is 125MHz) ++ */ ++ if (xr17v35x_has_slave(priv) && idx >= 8) ++ port->port.uartclk = (7812500 * 16 / 2); ++ ++ /* + * Setup Multipurpose Input/Output pins. + */ + if (idx == 0) { +@@ -1772,9 +1791,6 @@ pci_wch_ch353_setup(struct serial_private *priv, + #define PCI_DEVICE_ID_SUNIX_1999 0x1999 + + +-#define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358 +-#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358 +- + /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ + #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 + #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 27688e3b0a4c..b63507bbf74b 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1523,7 +1523,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "HW died, freeing TD."); + urb_priv = urb->hcpriv; +- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { ++ for (i = urb_priv->td_cnt; ++ i < urb_priv->length && xhci->devs[urb->dev->slot_id]; ++ i++) { + td = urb_priv->td[i]; + if (!list_empty(&td->td_list)) + list_del_init(&td->td_list); +diff --git a/fs/aio.c b/fs/aio.c +index 3241659491b1..0612022162ad 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -1375,11 +1375,16 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb, + unsigned long *nr_segs, + struct iovec *iovec) + { +- if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes))) ++ size_t len = kiocb->ki_nbytes; ++ ++ if (len > MAX_RW_COUNT) ++ len = MAX_RW_COUNT; ++ ++ if (unlikely(!access_ok(!rw, buf, len))) + return -EFAULT; + + iovec->iov_base = buf; +- iovec->iov_len = kiocb->ki_nbytes; ++ iovec->iov_len = len; + *nr_segs = 1; + return 0; + } +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c +index 0529defbdf73..9b8447e4ac3f 100644 +--- a/fs/btrfs/backref.c ++++ b/fs/btrfs/backref.c +@@ -1264,7 +1264,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, + read_extent_buffer(eb, dest + bytes_left, + name_off, name_len); + if (eb != eb_in) { +- btrfs_tree_read_unlock_blocking(eb); ++ if (!path->skip_locking) ++ btrfs_tree_read_unlock_blocking(eb); + free_extent_buffer(eb); + } + ret = inode_ref_info(parent, 0, fs_root, path, &found_key); +@@ -1283,9 +1284,10 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, + eb = path->nodes[0]; + /* make sure we can use eb after releasing the path */ + if (eb != eb_in) { +- atomic_inc(&eb->refs); +- btrfs_tree_read_lock(eb); +- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); ++ if (!path->skip_locking) ++ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); ++ path->nodes[0] = NULL; ++ path->locks[0] = 0; + } + btrfs_release_path(path); + iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c +index 12e35566d2fc..cc4790f11795 100644 +--- a/fs/btrfs/delayed-inode.c ++++ b/fs/btrfs/delayed-inode.c +@@ -1686,7 +1686,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list, + * + */ + int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, +- struct list_head *ins_list) ++ struct list_head *ins_list, bool *emitted) + { + struct btrfs_dir_item *di; + struct btrfs_delayed_item *curr, *next; +@@ -1730,6 +1730,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, + + if (over) + return 1; ++ *emitted = true; + } + return 0; + } +diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h +index f70119f25421..0167853c84ae 100644 +--- a/fs/btrfs/delayed-inode.h ++++ b/fs/btrfs/delayed-inode.h +@@ -144,7 +144,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list, + int btrfs_should_delete_dir_index(struct list_head *del_list, + u64 index); + int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, +- struct list_head *ins_list); ++ struct list_head *ins_list, bool *emitted); + + /* for init */ + int __init btrfs_delayed_inode_init(void); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 7f0dcfc58cbf..08824fe6ef44 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -5205,6 +5205,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) + char *name_ptr; + int name_len; + int is_curr = 0; /* ctx->pos points to the current index? */ ++ bool emitted; + + /* FIXME, use a real flag for deciding about the key type */ + if (root->fs_info->tree_root == root) +@@ -5233,6 +5234,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) + if (ret < 0) + goto err; + ++ emitted = false; + while (1) { + leaf = path->nodes[0]; + slot = path->slots[0]; +@@ -5312,6 +5314,7 @@ skip: + + if (over) + goto nopos; ++ emitted = true; + di_len = btrfs_dir_name_len(leaf, di) + + btrfs_dir_data_len(leaf, di) + sizeof(*di); + di_cur += di_len; +@@ -5324,11 +5327,20 @@ next: + if (key_type == BTRFS_DIR_INDEX_KEY) { + if (is_curr) + ctx->pos++; +- ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); ++ ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted); + if (ret) + goto nopos; + } + ++ /* ++ * If we haven't emitted any dir entry, we must not touch ctx->pos as ++ * it was was set to the termination value in previous call. We assume ++ * that "." and ".." were emitted if we reach this point and set the ++ * termination value as well for an empty directory. ++ */ ++ if (ctx->pos > 2 && !emitted) ++ goto nopos; ++ + /* Reached end of directory/root. Bump pos past the last item. */ + ctx->pos++; + +diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c +index 3299778391fd..0bd335a393f8 100644 +--- a/fs/cifs/cifsencrypt.c ++++ b/fs/cifs/cifsencrypt.c +@@ -710,7 +710,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) + + ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL); + if (!ses->auth_key.response) { +- rc = ENOMEM; ++ rc = -ENOMEM; + ses->auth_key.len = 0; + goto setup_ntlmv2_rsp_ret; + } +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c +index b334a89d6a66..1320d1ecc630 100644 +--- a/fs/cifs/readdir.c ++++ b/fs/cifs/readdir.c +@@ -849,6 +849,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx) + * if buggy server returns . and .. late do we want to + * check for that here? + */ ++ *tmp_buf = 0; + rc = cifs_filldir(current_entry, file, ctx, + tmp_buf, max_len); + if (rc) { +diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c +index a726b9f29cb7..61af24e379ad 100644 +--- a/fs/devpts/inode.c ++++ b/fs/devpts/inode.c +@@ -564,6 +564,26 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx) + mutex_unlock(&allocated_ptys_lock); + } + ++/* ++ * pty code needs to hold extra references in case of last /dev/tty close ++ */ ++ ++void devpts_add_ref(struct inode *ptmx_inode) ++{ ++ struct super_block *sb = pts_sb_from_inode(ptmx_inode); ++ ++ atomic_inc(&sb->s_active); ++ ihold(ptmx_inode); ++} ++ ++void devpts_del_ref(struct inode *ptmx_inode) ++{ ++ struct super_block *sb = pts_sb_from_inode(ptmx_inode); ++ ++ iput(ptmx_inode); ++ deactivate_super(sb); ++} ++ + /** + * devpts_pty_new -- create a new inode in /dev/pts/ + * @ptmx_inode: inode of the master +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c +index 831cb305c63f..ae8ce49c0437 100644 +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -186,7 +186,7 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size) + if (flex_gd == NULL) + goto out3; + +- if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data)) ++ if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data)) + goto out2; + flex_gd->count = flexbg_size; + +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index d8a60270581c..f9785e3c63ac 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -1006,6 +1006,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, + tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); + flush_dcache_page(page); + ++ iov_iter_advance(ii, tmp); + if (!tmp) { + unlock_page(page); + page_cache_release(page); +@@ -1018,7 +1019,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, + req->page_descs[req->num_pages].length = tmp; + req->num_pages++; + +- iov_iter_advance(ii, tmp); + count += tmp; + pos += tmp; + offset += tmp; +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 7b945957e230..45a7dd36b4a6 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -1169,6 +1169,7 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s + * Protect the call to nfs4_state_set_mode_locked and + * serialise the stateid update + */ ++ spin_lock(&state->owner->so_lock); + write_seqlock(&state->seqlock); + if (deleg_stateid != NULL) { + nfs4_stateid_copy(&state->stateid, deleg_stateid); +@@ -1177,7 +1178,6 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s + if (open_stateid != NULL) + nfs_set_open_stateid_locked(state, open_stateid, fmode); + write_sequnlock(&state->seqlock); +- spin_lock(&state->owner->so_lock); + update_open_stateflags(state, fmode); + spin_unlock(&state->owner->so_lock); + } +diff --git a/fs/proc/array.c b/fs/proc/array.c +index baf3464bbce0..ca4a64b4b119 100644 +--- a/fs/proc/array.c ++++ b/fs/proc/array.c +@@ -391,7 +391,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + + state = *get_task_state(task); + vsize = eip = esp = 0; +- permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT); ++ permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT); + mm = get_task_mm(task); + if (mm) { + vsize = task_vsize(mm); +diff --git a/fs/proc/base.c b/fs/proc/base.c +index 489ba8caafc0..be78847782c4 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -239,7 +239,7 @@ out: + + static int proc_pid_auxv(struct task_struct *task, char *buffer) + { +- struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ); ++ struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); + int res = PTR_ERR(mm); + if (mm && !IS_ERR(mm)) { + unsigned int nwords = 0; +@@ -269,7 +269,7 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer) + wchan = get_wchan(task); + + if (lookup_symbol_name(wchan, symname) < 0) +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) + return 0; + else + return sprintf(buffer, "%lu", wchan); +@@ -283,7 +283,7 @@ static int lock_trace(struct task_struct *task) + int err = mutex_lock_killable(&task->signal->cred_guard_mutex); + if (err) + return err; +- if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) { ++ if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) { + mutex_unlock(&task->signal->cred_guard_mutex); + return -EPERM; + } +@@ -557,7 +557,7 @@ static int proc_fd_access_allowed(struct inode *inode) + */ + task = get_proc_task(inode); + if (task) { +- allowed = ptrace_may_access(task, PTRACE_MODE_READ); ++ allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); + put_task_struct(task); + } + return allowed; +@@ -592,7 +592,7 @@ static bool has_pid_permissions(struct pid_namespace *pid, + return true; + if (in_group_p(pid->pid_gid)) + return true; +- return ptrace_may_access(task, PTRACE_MODE_READ); ++ return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); + } + + +@@ -707,7 +707,7 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode) + if (!task) + return -ESRCH; + +- mm = mm_access(task, mode); ++ mm = mm_access(task, mode | PTRACE_MODE_FSCREDS); + put_task_struct(task); + + if (IS_ERR(mm)) +@@ -1760,7 +1760,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags) + if (!task) + goto out_notask; + +- mm = mm_access(task, PTRACE_MODE_READ); ++ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); + if (IS_ERR_OR_NULL(mm)) + goto out; + +@@ -1895,7 +1895,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir, + goto out; + + result = -EACCES; +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) + goto out_put_task; + + result = -ENOENT; +@@ -1952,7 +1952,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) + goto out; + + ret = -EACCES; +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) + goto out_put_task; + + ret = 0; +@@ -2431,7 +2431,7 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole) + if (result) + return result; + +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) { ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { + result = -EACCES; + goto out_unlock; + } +diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c +index 9ae46b87470d..a05ed26075e3 100644 +--- a/fs/proc/namespaces.c ++++ b/fs/proc/namespaces.c +@@ -119,7 +119,7 @@ static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd) + if (!task) + goto out; + +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) + goto out_put_task; + + ns_path.dentry = proc_ns_get_dentry(sb, task, ei->ns.ns_ops); +@@ -152,7 +152,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl + if (!task) + goto out; + +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) + goto out_put_task; + + len = -ENOENT; +diff --git a/fs/udf/inode.c b/fs/udf/inode.c +index 142d29e3ccdf..5808bfe95126 100644 +--- a/fs/udf/inode.c ++++ b/fs/udf/inode.c +@@ -2062,14 +2062,29 @@ void udf_write_aext(struct inode *inode, struct extent_position *epos, + epos->offset += adsize; + } + ++/* ++ * Only 1 indirect extent in a row really makes sense but allow upto 16 in case ++ * someone does some weird stuff. ++ */ ++#define UDF_MAX_INDIR_EXTS 16 ++ + int8_t udf_next_aext(struct inode *inode, struct extent_position *epos, + struct kernel_lb_addr *eloc, uint32_t *elen, int inc) + { + int8_t etype; ++ unsigned int indirections = 0; + + while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == + (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { + int block; ++ ++ if (++indirections > UDF_MAX_INDIR_EXTS) { ++ udf_err(inode->i_sb, ++ "too many indirect extents in inode %lu\n", ++ inode->i_ino); ++ return -1; ++ } ++ + epos->block = *eloc; + epos->offset = sizeof(struct allocExtDesc); + brelse(epos->bh); +diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c +index 44b815e57f94..685fbd8a2937 100644 +--- a/fs/udf/unicode.c ++++ b/fs/udf/unicode.c +@@ -132,11 +132,15 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i) + if (c < 0x80U) + utf_o->u_name[utf_o->u_len++] = (uint8_t)c; + else if (c < 0x800U) { ++ if (utf_o->u_len > (UDF_NAME_LEN - 4)) ++ break; + utf_o->u_name[utf_o->u_len++] = + (uint8_t)(0xc0 | (c >> 6)); + utf_o->u_name[utf_o->u_len++] = + (uint8_t)(0x80 | (c & 0x3f)); + } else { ++ if (utf_o->u_len > (UDF_NAME_LEN - 5)) ++ break; + utf_o->u_name[utf_o->u_len++] = + (uint8_t)(0xe0 | (c >> 12)); + utf_o->u_name[utf_o->u_len++] = +@@ -177,17 +181,22 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i) + static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length) + { + unsigned c, i, max_val, utf_char; +- int utf_cnt, u_len; ++ int utf_cnt, u_len, u_ch; + + memset(ocu, 0, sizeof(dstring) * length); + ocu[0] = 8; + max_val = 0xffU; ++ u_ch = 1; + + try_again: + u_len = 0U; + utf_char = 0U; + utf_cnt = 0U; + for (i = 0U; i < utf->u_len; i++) { ++ /* Name didn't fit? */ ++ if (u_len + 1 + u_ch >= length) ++ return 0; ++ + c = (uint8_t)utf->u_name[i]; + + /* Complete a multi-byte UTF-8 character */ +@@ -229,6 +238,7 @@ try_again: + if (max_val == 0xffU) { + max_val = 0xffffU; + ocu[0] = (uint8_t)0x10U; ++ u_ch = 2; + goto try_again; + } + goto error_out; +@@ -281,7 +291,7 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o, + c = (c << 8) | ocu[i++]; + + len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len], +- UDF_NAME_LEN - utf_o->u_len); ++ UDF_NAME_LEN - 2 - utf_o->u_len); + /* Valid character? */ + if (len >= 0) + utf_o->u_len += len; +@@ -299,15 +309,19 @@ static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni, + int len; + unsigned i, max_val; + uint16_t uni_char; +- int u_len; ++ int u_len, u_ch; + + memset(ocu, 0, sizeof(dstring) * length); + ocu[0] = 8; + max_val = 0xffU; ++ u_ch = 1; + + try_again: + u_len = 0U; + for (i = 0U; i < uni->u_len; i++) { ++ /* Name didn't fit? */ ++ if (u_len + 1 + u_ch >= length) ++ return 0; + len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char); + if (!len) + continue; +@@ -320,6 +334,7 @@ try_again: + if (uni_char > max_val) { + max_val = 0xffffU; + ocu[0] = (uint8_t)0x10U; ++ u_ch = 2; + goto try_again; + } + +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index 2472740d7ab2..6977192bdb59 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -131,7 +131,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + */ + #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) + #define __trace_if(cond) \ +- if (__builtin_constant_p((cond)) ? !!(cond) : \ ++ if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ + ({ \ + int ______r; \ + static struct ftrace_branch_data \ +diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h +index 251a2090a554..e0ee0b3000b2 100644 +--- a/include/linux/devpts_fs.h ++++ b/include/linux/devpts_fs.h +@@ -19,6 +19,8 @@ + + int devpts_new_index(struct inode *ptmx_inode); + void devpts_kill_index(struct inode *ptmx_inode, int idx); ++void devpts_add_ref(struct inode *ptmx_inode); ++void devpts_del_ref(struct inode *ptmx_inode); + /* mknod in devpts */ + struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, + void *priv); +@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode); + /* Dummy stubs in the no-pty case */ + static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; } + static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { } ++static inline void devpts_add_ref(struct inode *ptmx_inode) { } ++static inline void devpts_del_ref(struct inode *ptmx_inode) { } + static inline struct inode *devpts_pty_new(struct inode *ptmx_inode, + dev_t device, int index, void *priv) + { +diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h +index cc79eff4a1ad..608d90444b6f 100644 +--- a/include/linux/ptrace.h ++++ b/include/linux/ptrace.h +@@ -56,7 +56,29 @@ extern void exit_ptrace(struct task_struct *tracer); + #define PTRACE_MODE_READ 0x01 + #define PTRACE_MODE_ATTACH 0x02 + #define PTRACE_MODE_NOAUDIT 0x04 +-/* Returns true on success, false on denial. */ ++#define PTRACE_MODE_FSCREDS 0x08 ++#define PTRACE_MODE_REALCREDS 0x10 ++ ++/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ ++#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) ++#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) ++#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) ++#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) ++ ++/** ++ * ptrace_may_access - check whether the caller is permitted to access ++ * a target task. ++ * @task: target task ++ * @mode: selects type of access and caller credentials ++ * ++ * Returns true on success, false on denial. ++ * ++ * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must ++ * be set in @mode to specify whether the access was requested through ++ * a filesystem syscall (should use effective capabilities and fsuid ++ * of the caller) or through an explicit syscall such as ++ * process_vm_writev or ptrace (and should use the real credentials). ++ */ + extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); + + static inline int ptrace_reparented(struct task_struct *child) +diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h +index e8be53ecfc45..16604454e95f 100644 +--- a/include/linux/radix-tree.h ++++ b/include/linux/radix-tree.h +@@ -320,12 +320,28 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned flags); + + /** ++ * radix_tree_iter_retry - retry this chunk of the iteration ++ * @iter: iterator state ++ * ++ * If we iterate over a tree protected only by the RCU lock, a race ++ * against deletion or creation may result in seeing a slot for which ++ * radix_tree_deref_retry() returns true. If so, call this function ++ * and continue the iteration. ++ */ ++static inline __must_check ++void **radix_tree_iter_retry(struct radix_tree_iter *iter) ++{ ++ iter->next_index = iter->index; ++ return NULL; ++} ++ ++/** + * radix_tree_chunk_size - get current chunk size + * + * @iter: pointer to radix tree iterator + * Returns: current chunk size + */ +-static __always_inline unsigned ++static __always_inline long + radix_tree_chunk_size(struct radix_tree_iter *iter) + { + return iter->next_index - iter->index; +@@ -359,9 +375,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) + return slot + offset + 1; + } + } else { +- unsigned size = radix_tree_chunk_size(iter) - 1; ++ long size = radix_tree_chunk_size(iter); + +- while (size--) { ++ while (--size > 0) { + slot++; + iter->index++; + if (likely(*slot)) +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 3bf20e36a8e7..bfd91336e888 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -3096,7 +3096,7 @@ find_lively_task_by_vpid(pid_t vpid) + + /* Reuse ptrace permission checks for now. */ + err = -EACCES; +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) + goto errout; + + return task; +@@ -5945,6 +5945,10 @@ static int perf_tp_filter_match(struct perf_event *event, + { + void *record = data->raw->data; + ++ /* only top level events have filters set */ ++ if (event->parent) ++ event = event->parent; ++ + if (likely(!event->filter) || filter_match_preds(event->filter, record)) + return 1; + return 0; +diff --git a/kernel/futex.c b/kernel/futex.c +index fda2950f2ce4..b125c385a257 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -2648,6 +2648,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + if (q.pi_state && (q.pi_state->owner != current)) { + spin_lock(q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); ++ /* ++ * Drop the reference to the pi state which ++ * the requeue_pi() code acquired for us. ++ */ ++ free_pi_state(q.pi_state); + spin_unlock(q.lock_ptr); + } + } else { +@@ -2774,7 +2779,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, + } + + ret = -EPERM; +- if (!ptrace_may_access(p, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) + goto err_unlock; + + head = p->robust_list; +diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c +index f9f44fd4d34d..3888617a1f9e 100644 +--- a/kernel/futex_compat.c ++++ b/kernel/futex_compat.c +@@ -155,7 +155,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, + } + + ret = -EPERM; +- if (!ptrace_may_access(p, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) + goto err_unlock; + + head = p->compat_robust_list; +diff --git a/kernel/kcmp.c b/kernel/kcmp.c +index 0aa69ea1d8fd..3a47fa998fe0 100644 +--- a/kernel/kcmp.c ++++ b/kernel/kcmp.c +@@ -122,8 +122,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, + &task2->signal->cred_guard_mutex); + if (ret) + goto err; +- if (!ptrace_may_access(task1, PTRACE_MODE_READ) || +- !ptrace_may_access(task2, PTRACE_MODE_READ)) { ++ if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) || ++ !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) { + ret = -EPERM; + goto err_unlock; + } +diff --git a/kernel/module.c b/kernel/module.c +index 49f17c27bf11..e40617fac8ad 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -3381,6 +3381,11 @@ static inline int is_arm_mapping_symbol(const char *str) + && (str[2] == '\0' || str[2] == '.'); + } + ++static const char *symname(struct module *mod, unsigned int symnum) ++{ ++ return mod->strtab + mod->symtab[symnum].st_name; ++} ++ + static const char *get_ksymbol(struct module *mod, + unsigned long addr, + unsigned long *size, +@@ -3403,15 +3408,15 @@ static const char *get_ksymbol(struct module *mod, + + /* We ignore unnamed symbols: they're uninformative + * and inserted at a whim. */ ++ if (*symname(mod, i) == '\0' ++ || is_arm_mapping_symbol(symname(mod, i))) ++ continue; ++ + if (mod->symtab[i].st_value <= addr +- && mod->symtab[i].st_value > mod->symtab[best].st_value +- && *(mod->strtab + mod->symtab[i].st_name) != '\0' +- && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) ++ && mod->symtab[i].st_value > mod->symtab[best].st_value) + best = i; + if (mod->symtab[i].st_value > addr +- && mod->symtab[i].st_value < nextval +- && *(mod->strtab + mod->symtab[i].st_name) != '\0' +- && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) ++ && mod->symtab[i].st_value < nextval) + nextval = mod->symtab[i].st_value; + } + +@@ -3422,7 +3427,7 @@ static const char *get_ksymbol(struct module *mod, + *size = nextval - mod->symtab[best].st_value; + if (offset) + *offset = addr - mod->symtab[best].st_value; +- return mod->strtab + mod->symtab[best].st_name; ++ return symname(mod, best); + } + + /* For kallsyms to ask for address resolution. NULL means not found. Careful +@@ -3523,8 +3528,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + if (symnum < mod->num_symtab) { + *value = mod->symtab[symnum].st_value; + *type = mod->symtab[symnum].st_info; +- strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, +- KSYM_NAME_LEN); ++ strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN); + strlcpy(module_name, mod->name, MODULE_NAME_LEN); + *exported = is_exported(name, *value, mod); + preempt_enable(); +@@ -3541,7 +3545,7 @@ static unsigned long mod_find_symname(struct module *mod, const char *name) + unsigned int i; + + for (i = 0; i < mod->num_symtab; i++) +- if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 && ++ if (strcmp(name, symname(mod, i)) == 0 && + mod->symtab[i].st_info != 'U') + return mod->symtab[i].st_value; + return 0; +@@ -3583,7 +3587,7 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + if (mod->state == MODULE_STATE_UNFORMED) + continue; + for (i = 0; i < mod->num_symtab; i++) { +- ret = fn(data, mod->strtab + mod->symtab[i].st_name, ++ ret = fn(data, symname(mod, i), + mod, mod->symtab[i].st_value); + if (ret != 0) + return ret; +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index be9760f8284a..4524314ecbb4 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -225,6 +225,14 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) + static int __ptrace_may_access(struct task_struct *task, unsigned int mode) + { + const struct cred *cred = current_cred(), *tcred; ++ int dumpable = 0; ++ kuid_t caller_uid; ++ kgid_t caller_gid; ++ ++ if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) { ++ WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n"); ++ return -EPERM; ++ } + + /* May we inspect the given task? + * This check is used both for attaching with ptrace +@@ -234,18 +242,33 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) + * because setting up the necessary parent/child relationship + * or halting the specified task is impossible. + */ +- int dumpable = 0; ++ + /* Don't let security modules deny introspection */ + if (same_thread_group(task, current)) + return 0; + rcu_read_lock(); ++ if (mode & PTRACE_MODE_FSCREDS) { ++ caller_uid = cred->fsuid; ++ caller_gid = cred->fsgid; ++ } else { ++ /* ++ * Using the euid would make more sense here, but something ++ * in userland might rely on the old behavior, and this ++ * shouldn't be a security problem since ++ * PTRACE_MODE_REALCREDS implies that the caller explicitly ++ * used a syscall that requests access to another process ++ * (and not a filesystem syscall to procfs). ++ */ ++ caller_uid = cred->uid; ++ caller_gid = cred->gid; ++ } + tcred = __task_cred(task); +- if (uid_eq(cred->uid, tcred->euid) && +- uid_eq(cred->uid, tcred->suid) && +- uid_eq(cred->uid, tcred->uid) && +- gid_eq(cred->gid, tcred->egid) && +- gid_eq(cred->gid, tcred->sgid) && +- gid_eq(cred->gid, tcred->gid)) ++ if (uid_eq(caller_uid, tcred->euid) && ++ uid_eq(caller_uid, tcred->suid) && ++ uid_eq(caller_uid, tcred->uid) && ++ gid_eq(caller_gid, tcred->egid) && ++ gid_eq(caller_gid, tcred->sgid) && ++ gid_eq(caller_gid, tcred->gid)) + goto ok; + if (ptrace_has_cap(tcred->user_ns, mode)) + goto ok; +@@ -312,7 +335,7 @@ static int ptrace_attach(struct task_struct *task, long request, + goto out; + + task_lock(task); +- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); ++ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); + task_unlock(task); + if (retval) + goto unlock_creds; +diff --git a/lib/dma-debug.c b/lib/dma-debug.c +index 98f2d7e91a91..1cbfc16d0b37 100644 +--- a/lib/dma-debug.c ++++ b/lib/dma-debug.c +@@ -1165,7 +1165,7 @@ static inline bool overlap(void *addr, unsigned long len, void *start, void *end + + static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) + { +- if (overlap(addr, len, _text, _etext) || ++ if (overlap(addr, len, _stext, _etext) || + overlap(addr, len, __start_rodata, __end_rodata)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); + } +@@ -1440,7 +1440,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, + entry->type = dma_debug_coherent; + entry->dev = dev; + entry->pfn = page_to_pfn(virt_to_page(virt)); +- entry->offset = (size_t) virt & PAGE_MASK; ++ entry->offset = (size_t) virt & ~PAGE_MASK; + entry->size = size; + entry->dev_addr = dma_addr; + entry->direction = DMA_BIDIRECTIONAL; +@@ -1456,7 +1456,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, + .type = dma_debug_coherent, + .dev = dev, + .pfn = page_to_pfn(virt_to_page(virt)), +- .offset = (size_t) virt & PAGE_MASK, ++ .offset = (size_t) virt & ~PAGE_MASK, + .dev_addr = addr, + .size = size, + .direction = DMA_BIDIRECTIONAL, +diff --git a/lib/dump_stack.c b/lib/dump_stack.c +index f23b63f0a1c3..1e21b4682666 100644 +--- a/lib/dump_stack.c ++++ b/lib/dump_stack.c +@@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1); + + asmlinkage void dump_stack(void) + { ++ unsigned long flags; + int was_locked; + int old; + int cpu; +@@ -33,9 +34,8 @@ asmlinkage void dump_stack(void) + * Permit this cpu to perform nested stack dumps while serialising + * against other CPUs + */ +- preempt_disable(); +- + retry: ++ local_irq_save(flags); + cpu = smp_processor_id(); + old = atomic_cmpxchg(&dump_lock, -1, cpu); + if (old == -1) { +@@ -43,6 +43,7 @@ retry: + } else if (old == cpu) { + was_locked = 1; + } else { ++ local_irq_restore(flags); + cpu_relax(); + goto retry; + } +@@ -52,7 +53,7 @@ retry: + if (!was_locked) + atomic_set(&dump_lock, -1); + +- preempt_enable(); ++ local_irq_restore(flags); + } + #else + asmlinkage void dump_stack(void) +diff --git a/lib/klist.c b/lib/klist.c +index 358a368a2947..2e59aecbec0d 100644 +--- a/lib/klist.c ++++ b/lib/klist.c +@@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i, + struct klist_node *n) + { + i->i_klist = k; +- i->i_cur = n; +- if (n) +- kref_get(&n->n_ref); ++ i->i_cur = NULL; ++ if (n && kref_get_unless_zero(&n->n_ref)) ++ i->i_cur = n; + } + EXPORT_SYMBOL_GPL(klist_iter_init_node); + +diff --git a/lib/radix-tree.c b/lib/radix-tree.c +index 7e30d2a7f346..7fdc4121e44c 100644 +--- a/lib/radix-tree.c ++++ b/lib/radix-tree.c +@@ -977,9 +977,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, + return 0; + + radix_tree_for_each_slot(slot, root, &iter, first_index) { +- results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot)); ++ results[ret] = rcu_dereference_raw(*slot); + if (!results[ret]) + continue; ++ if (radix_tree_is_indirect_ptr(results[ret])) { ++ slot = radix_tree_iter_retry(&iter); ++ continue; ++ } + if (++ret == max_items) + break; + } +@@ -1056,9 +1060,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, + return 0; + + radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { +- results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot)); ++ results[ret] = rcu_dereference_raw(*slot); + if (!results[ret]) + continue; ++ if (radix_tree_is_indirect_ptr(results[ret])) { ++ slot = radix_tree_iter_retry(&iter); ++ continue; ++ } + if (++ret == max_items) + break; + } +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index b58d4fbe6c48..51d7cc8bb5f1 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -5858,16 +5858,17 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, + swap_buffers: + /* Swap primary and spare array */ + thresholds->spare = thresholds->primary; +- /* If all events are unregistered, free the spare array */ +- if (!new) { +- kfree(thresholds->spare); +- thresholds->spare = NULL; +- } + + rcu_assign_pointer(thresholds->primary, new); + + /* To be sure that nobody uses thresholds */ + synchronize_rcu(); ++ ++ /* If all events are unregistered, free the spare array */ ++ if (!new) { ++ kfree(thresholds->spare); ++ thresholds->spare = NULL; ++ } + unlock: + mutex_unlock(&memcg->thresholds_lock); + } +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 42aeb848b8e9..580dbc22ef74 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -1509,7 +1509,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) + * Did it turn free? + */ + ret = __get_any_page(page, pfn, 0); +- if (!PageLRU(page)) { ++ if (ret == 1 && !PageLRU(page)) { + /* Drop page reference which is from __get_any_page() */ + put_page(page); + pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index 5bba3b35bec8..35e14c784bef 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -1220,23 +1220,30 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) + */ + static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) + { +- unsigned long pfn; ++ unsigned long pfn, sec_end_pfn; + struct zone *zone = NULL; + struct page *page; + int i; +- for (pfn = start_pfn; ++ for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn); + pfn < end_pfn; +- pfn += MAX_ORDER_NR_PAGES) { +- i = 0; +- /* This is just a CONFIG_HOLES_IN_ZONE check.*/ +- while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) +- i++; +- if (i == MAX_ORDER_NR_PAGES) ++ pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) { ++ /* Make sure the memory section is present first */ ++ if (!present_section_nr(pfn_to_section_nr(pfn))) + continue; +- page = pfn_to_page(pfn + i); +- if (zone && page_zone(page) != zone) +- return 0; +- zone = page_zone(page); ++ for (; pfn < sec_end_pfn && pfn < end_pfn; ++ pfn += MAX_ORDER_NR_PAGES) { ++ i = 0; ++ /* This is just a CONFIG_HOLES_IN_ZONE check.*/ ++ while ((i < MAX_ORDER_NR_PAGES) && ++ !pfn_valid_within(pfn + i)) ++ i++; ++ if (i == MAX_ORDER_NR_PAGES) ++ continue; ++ page = pfn_to_page(pfn + i); ++ if (zone && page_zone(page) != zone) ++ return 0; ++ zone = page_zone(page); ++ } + } + return 1; + } +diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c +index fd26d0433509..e739825be8b3 100644 +--- a/mm/process_vm_access.c ++++ b/mm/process_vm_access.c +@@ -298,7 +298,7 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, + goto free_proc_pages; + } + +- mm = mm_access(task, PTRACE_MODE_ATTACH); ++ mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); + if (!mm || IS_ERR(mm)) { + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; + /* +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c +index 9ad561152eb6..8b61288e5746 100644 +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -336,7 +336,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id) + + static void ip6mr_free_table(struct mr6_table *mrt) + { +- del_timer(&mrt->ipmr_expire_timer); ++ del_timer_sync(&mrt->ipmr_expire_timer); + mroute_clean_tables(mrt, true); + kfree(mrt); + } +diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter +index 549d0ab8c662..dabd2a4cb4e5 100755 +--- a/scripts/bloat-o-meter ++++ b/scripts/bloat-o-meter +@@ -56,8 +56,8 @@ for name in common: + delta.sort() + delta.reverse() + +-print "add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \ +- (add, remove, grow, shrink, up, -down, up-down) +-print "%-40s %7s %7s %+7s" % ("function", "old", "new", "delta") ++print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \ ++ (add, remove, grow, shrink, up, -down, up-down)) ++print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")) + for d, n in delta: +- if d: print "%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d) ++ if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)) +diff --git a/security/commoncap.c b/security/commoncap.c +index 963dc5981661..a484506445d7 100644 +--- a/security/commoncap.c ++++ b/security/commoncap.c +@@ -142,12 +142,17 @@ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode) + { + int ret = 0; + const struct cred *cred, *child_cred; ++ const kernel_cap_t *caller_caps; + + rcu_read_lock(); + cred = current_cred(); + child_cred = __task_cred(child); ++ if (mode & PTRACE_MODE_FSCREDS) ++ caller_caps = &cred->cap_effective; ++ else ++ caller_caps = &cred->cap_permitted; + if (cred->user_ns == child_cred->user_ns && +- cap_issubset(child_cred->cap_permitted, cred->cap_permitted)) ++ cap_issubset(child_cred->cap_permitted, *caller_caps)) + goto out; + if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE)) + goto out; +diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c +index 67c91d226552..ee0522a8f730 100644 +--- a/sound/core/seq/seq_ports.c ++++ b/sound/core/seq/seq_ports.c +@@ -540,19 +540,22 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client, + bool is_src, bool ack) + { + struct snd_seq_port_subs_info *grp; ++ struct list_head *list; ++ bool empty; + + grp = is_src ? &port->c_src : &port->c_dest; ++ list = is_src ? &subs->src_list : &subs->dest_list; + down_write(&grp->list_mutex); + write_lock_irq(&grp->list_lock); +- if (is_src) +- list_del(&subs->src_list); +- else +- list_del(&subs->dest_list); ++ empty = list_empty(list); ++ if (!empty) ++ list_del_init(list); + grp->exclusive = 0; + write_unlock_irq(&grp->list_lock); + up_write(&grp->list_mutex); + +- unsubscribe_port(client, port, grp, &subs->info, ack); ++ if (!empty) ++ unsubscribe_port(client, port, grp, &subs->info, ack); + } + + /* connect two ports */ +diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c +index 30e8e0c3f117..56b8a3e00522 100644 +--- a/tools/lib/traceevent/event-parse.c ++++ b/tools/lib/traceevent/event-parse.c +@@ -4249,13 +4249,12 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event + sizeof(long) != 8) { + char *p; + +- ls = 2; + /* make %l into %ll */ +- p = strchr(format, 'l'); +- if (p) ++ if (ls == 1 && (p = strchr(format, 'l'))) + memmove(p+1, p, strlen(p)+1); + else if (strcmp(format, "%p") == 0) + strcpy(format, "0x%llx"); ++ ls = 2; + } + switch (ls) { + case -2: +diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt +index fae38d9a44a4..65d6a7a88c53 100644 +--- a/tools/perf/Documentation/perf-trace.txt ++++ b/tools/perf/Documentation/perf-trace.txt +@@ -59,7 +59,6 @@ OPTIONS + --verbose=:: + Verbosity level. + +--i:: + --no-inherit:: + Child tasks do not inherit counters. + diff --git a/patch/kernel/cubox-default/patch-3.14.62-63.patch b/patch/kernel/cubox-default/patch-3.14.62-63.patch new file mode 100644 index 000000000..5dcb8ea0c --- /dev/null +++ b/patch/kernel/cubox-default/patch-3.14.62-63.patch @@ -0,0 +1,4346 @@ +diff --git a/Makefile b/Makefile +index b738f644c71e..0843ef4cc0a4 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 62 ++SUBLEVEL = 63 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c +index e550b117ec4f..2d6a36ea8aaf 100644 +--- a/arch/arc/kernel/unwind.c ++++ b/arch/arc/kernel/unwind.c +@@ -986,42 +986,13 @@ int arc_unwind(struct unwind_frame_info *frame) + (const u8 *)(fde + + 1) + + *fde, ptrType); +- if (pc >= endLoc) ++ if (pc >= endLoc) { + fde = NULL; +- } else +- fde = NULL; +- } +- if (fde == NULL) { +- for (fde = table->address, tableSize = table->size; +- cie = NULL, tableSize > sizeof(*fde) +- && tableSize - sizeof(*fde) >= *fde; +- tableSize -= sizeof(*fde) + *fde, +- fde += 1 + *fde / sizeof(*fde)) { +- cie = cie_for_fde(fde, table); +- if (cie == &bad_cie) { + cie = NULL; +- break; + } +- if (cie == NULL +- || cie == ¬_fde +- || (ptrType = fde_pointer_type(cie)) < 0) +- continue; +- ptr = (const u8 *)(fde + 2); +- startLoc = read_pointer(&ptr, +- (const u8 *)(fde + 1) + +- *fde, ptrType); +- if (!startLoc) +- continue; +- if (!(ptrType & DW_EH_PE_indirect)) +- ptrType &= +- DW_EH_PE_FORM | DW_EH_PE_signed; +- endLoc = +- startLoc + read_pointer(&ptr, +- (const u8 *)(fde + +- 1) + +- *fde, ptrType); +- if (pc >= startLoc && pc < endLoc) +- break; ++ } else { ++ fde = NULL; ++ cie = NULL; + } + } + } +diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi +index 7525982262ac..2897c1ac47d8 100644 +--- a/arch/arm/boot/dts/wm8650.dtsi ++++ b/arch/arm/boot/dts/wm8650.dtsi +@@ -187,6 +187,15 @@ + interrupts = <43>; + }; + ++ sdhc@d800a000 { ++ compatible = "wm,wm8505-sdhc"; ++ reg = <0xd800a000 0x400>; ++ interrupts = <20>, <21>; ++ clocks = <&clksdhc>; ++ bus-width = <4>; ++ sdon-inverted; ++ }; ++ + fb: fb@d8050800 { + compatible = "wm,wm8505-fb"; + reg = <0xd8050800 0x200>; +diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S +index 03a2db58b22d..ba5ce99c021d 100644 +--- a/arch/mips/kvm/kvm_locore.S ++++ b/arch/mips/kvm/kvm_locore.S +@@ -159,9 +159,11 @@ FEXPORT(__kvm_mips_vcpu_run) + + FEXPORT(__kvm_mips_load_asid) + /* Set the ASID for the Guest Kernel */ +- INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ +- /* addresses shift to 0x80000000 */ +- bltz t0, 1f /* If kernel */ ++ PTR_L t0, VCPU_COP0(k1) ++ LONG_L t0, COP0_STATUS(t0) ++ andi t0, KSU_USER | ST0_ERL | ST0_EXL ++ xori t0, KSU_USER ++ bnez t0, 1f /* If kernel */ + INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ + INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ + 1: +@@ -438,9 +440,11 @@ __kvm_mips_return_to_guest: + mtc0 t0, CP0_EPC + + /* Set the ASID for the Guest Kernel */ +- INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ +- /* addresses shift to 0x80000000 */ +- bltz t0, 1f /* If kernel */ ++ PTR_L t0, VCPU_COP0(k1) ++ LONG_L t0, COP0_STATUS(t0) ++ andi t0, KSU_USER | ST0_ERL | ST0_EXL ++ xori t0, KSU_USER ++ bnez t0, 1f /* If kernel */ + INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ + INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ + 1: +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c +index 897c605263f2..12d850b68763 100644 +--- a/arch/mips/kvm/kvm_mips.c ++++ b/arch/mips/kvm/kvm_mips.c +@@ -313,7 +313,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) + + if (!gebase) { + err = -ENOMEM; +- goto out_free_cpu; ++ goto out_uninit_cpu; + } + kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n", + ALIGN(size, PAGE_SIZE), gebase); +@@ -373,6 +373,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) + out_free_gebase: + kfree(gebase); + ++out_uninit_cpu: ++ kvm_vcpu_uninit(vcpu); ++ + out_free_cpu: + kfree(vcpu); + +diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c +index c76f297b7149..33085819cd89 100644 +--- a/arch/mips/kvm/kvm_mips_emul.c ++++ b/arch/mips/kvm/kvm_mips_emul.c +@@ -935,7 +935,7 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, + + base = (inst >> 21) & 0x1f; + op_inst = (inst >> 16) & 0x1f; +- offset = inst & 0xffff; ++ offset = (int16_t)inst; + cache = (inst >> 16) & 0x3; + op = (inst >> 18) & 0x7; + +diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c +index 4d1ee88864e8..18c8b819b0aa 100644 +--- a/arch/s390/mm/extable.c ++++ b/arch/s390/mm/extable.c +@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start, + int i; + + /* Normalize entries to being relative to the start of the section */ +- for (p = start, i = 0; p < finish; p++, i += 8) ++ for (p = start, i = 0; p < finish; p++, i += 8) { + p->insn += i; ++ p->fixup += i + 4; ++ } + sort(start, finish - start, sizeof(*start), cmp_ex, NULL); + /* Denormalize all entries */ +- for (p = start, i = 0; p < finish; p++, i += 8) ++ for (p = start, i = 0; p < finish; p++, i += 8) { + p->insn -= i; ++ p->fixup -= i + 4; ++ } + } + + #ifdef CONFIG_MODULES +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c +index 25db14a33d03..47ae8d757773 100644 +--- a/arch/sparc/kernel/sys_sparc_64.c ++++ b/arch/sparc/kernel/sys_sparc_64.c +@@ -412,7 +412,7 @@ out: + + SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) + { +- int ret; ++ long ret; + + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) +diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c +index 337518c5042a..b412c62486f0 100644 +--- a/arch/um/os-Linux/start_up.c ++++ b/arch/um/os-Linux/start_up.c +@@ -95,6 +95,8 @@ static int start_ptraced_child(void) + { + int pid, n, status; + ++ fflush(stdout); ++ + pid = fork(); + if (pid == 0) + ptrace_child(); +diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c +index ae7d543f23ed..8894f5bc4620 100644 +--- a/arch/x86/platform/efi/efi.c ++++ b/arch/x86/platform/efi/efi.c +@@ -248,12 +248,19 @@ static efi_status_t __init phys_efi_set_virtual_address_map( + efi_memory_desc_t *virtual_map) + { + efi_status_t status; ++ unsigned long flags; + + efi_call_phys_prelog(); ++ ++ /* Disable interrupts around EFI calls: */ ++ local_irq_save(flags); + status = efi_call_phys4(efi_phys.set_virtual_address_map, + memory_map_size, descriptor_size, + descriptor_version, virtual_map); ++ local_irq_restore(flags); ++ + efi_call_phys_epilog(); ++ + return status; + } + +diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c +index 9ee3491e31fb..be4e7eb41674 100644 +--- a/arch/x86/platform/efi/efi_32.c ++++ b/arch/x86/platform/efi/efi_32.c +@@ -33,11 +33,10 @@ + + /* + * To make EFI call EFI runtime service in physical addressing mode we need +- * prelog/epilog before/after the invocation to disable interrupt, to +- * claim EFI runtime service handler exclusively and to duplicate a memory in +- * low memory space say 0 - 3G. ++ * prolog/epilog before/after the invocation to claim the EFI runtime service ++ * handler exclusively and to duplicate a memory mapping in low memory space, ++ * say 0 - 3G. + */ +-static unsigned long efi_rt_eflags; + + void efi_sync_low_kernel_mappings(void) {} + void __init efi_dump_pagetable(void) {} +@@ -59,8 +58,6 @@ void efi_call_phys_prelog(void) + { + struct desc_ptr gdt_descr; + +- local_irq_save(efi_rt_eflags); +- + load_cr3(initial_page_table); + __flush_tlb_all(); + +@@ -79,8 +76,6 @@ void efi_call_phys_epilog(void) + + load_cr3(swapper_pg_dir); + __flush_tlb_all(); +- +- local_irq_restore(efi_rt_eflags); + } + + void __init efi_runtime_mkexec(void) +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c +index 666b74a09092..b1be0425c686 100644 +--- a/arch/x86/platform/efi/efi_64.c ++++ b/arch/x86/platform/efi/efi_64.c +@@ -41,7 +41,6 @@ + #include + + static pgd_t *save_pgd __initdata; +-static unsigned long efi_flags __initdata; + + /* + * We allocate runtime services regions bottom-up, starting from -4G, i.e. +@@ -87,7 +86,6 @@ void __init efi_call_phys_prelog(void) + return; + + early_code_mapping_set_exec(1); +- local_irq_save(efi_flags); + + n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); + save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); +@@ -115,7 +113,6 @@ void __init efi_call_phys_epilog(void) + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); + kfree(save_pgd); + __flush_tlb_all(); +- local_irq_restore(efi_flags); + early_code_mapping_set_exec(0); + } + +diff --git a/block/partitions/mac.c b/block/partitions/mac.c +index 76d8ba6379a9..bd5b91465230 100644 +--- a/block/partitions/mac.c ++++ b/block/partitions/mac.c +@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state) + Sector sect; + unsigned char *data; + int slot, blocks_in_map; +- unsigned secsize; ++ unsigned secsize, datasize, partoffset; + #ifdef CONFIG_PPC_PMAC + int found_root = 0; + int found_root_goodness = 0; +@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state) + } + secsize = be16_to_cpu(md->block_size); + put_dev_sector(sect); +- data = read_part_sector(state, secsize/512, §); ++ datasize = round_down(secsize, 512); ++ data = read_part_sector(state, datasize / 512, §); + if (!data) + return -1; +- part = (struct mac_partition *) (data + secsize%512); ++ partoffset = secsize % 512; ++ if (partoffset + sizeof(*part) > datasize) ++ return -1; ++ part = (struct mac_partition *) (data + partoffset); + if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { + put_dev_sector(sect); + return 0; /* not a MacOS disk */ +diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c +index f8c0b8dbeb75..88bc8e6b2a54 100644 +--- a/crypto/async_tx/async_memcpy.c ++++ b/crypto/async_tx/async_memcpy.c +@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, + struct dmaengine_unmap_data *unmap = NULL; + + if (device) +- unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); ++ unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); + + if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { + unsigned long dma_prep_flags = 0; +diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c +index d05327caf69d..7eb264e65267 100644 +--- a/crypto/async_tx/async_pq.c ++++ b/crypto/async_tx/async_pq.c +@@ -176,7 +176,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, + BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); + + if (device) +- unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); ++ unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); + + if (unmap && + (src_cnt <= dma_maxpq(device, 0) || +@@ -294,7 +294,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, + BUG_ON(disks < 4); + + if (device) +- unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); ++ unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); + + if (unmap && disks <= dma_maxpq(device, 0) && + is_dma_pq_aligned(device, offset, 0, len)) { +diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c +index 934a84981495..8fab6275ea1f 100644 +--- a/crypto/async_tx/async_raid6_recov.c ++++ b/crypto/async_tx/async_raid6_recov.c +@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, + u8 *a, *b, *c; + + if (dma) +- unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); ++ unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); + + if (unmap) { + struct device *dev = dma->dev; +@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, + u8 *d, *s; + + if (dma) +- unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); ++ unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); + + if (unmap) { + dma_addr_t dma_dest[2]; +diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c +index e1bce26cd4f9..da75777f2b3f 100644 +--- a/crypto/async_tx/async_xor.c ++++ b/crypto/async_tx/async_xor.c +@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, + BUG_ON(src_cnt <= 1); + + if (device) +- unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); ++ unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT); + + if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { + struct dma_async_tx_descriptor *tx; +@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, + BUG_ON(src_cnt <= 1); + + if (device) +- unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); ++ unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT); + + if (unmap && src_cnt <= device->max_xor && + is_dma_xor_aligned(device, offset, 0, len)) { +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c +index 136803c47cdb..96e5ed188636 100644 +--- a/drivers/ata/libata-sff.c ++++ b/drivers/ata/libata-sff.c +@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, + static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) + { + struct ata_port *ap = qc->ap; +- unsigned long flags; + + if (ap->ops->error_handler) { + if (in_wq) { +- spin_lock_irqsave(ap->lock, flags); +- + /* EH might have kicked in while host lock is + * released. + */ +@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) + } else + ata_port_freeze(ap); + } +- +- spin_unlock_irqrestore(ap->lock, flags); + } else { + if (likely(!(qc->err_mask & AC_ERR_HSM))) + ata_qc_complete(qc); +@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) + } + } else { + if (in_wq) { +- spin_lock_irqsave(ap->lock, flags); + ata_sff_irq_on(ap); + ata_qc_complete(qc); +- spin_unlock_irqrestore(ap->lock, flags); + } else + ata_qc_complete(qc); + } +@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, + { + struct ata_link *link = qc->dev->link; + struct ata_eh_info *ehi = &link->eh_info; +- unsigned long flags = 0; + int poll_next; + ++ lockdep_assert_held(ap->lock); ++ + WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); + + /* Make sure ata_sff_qc_issue() does not throw things +@@ -1112,14 +1106,6 @@ fsm_start: + } + } + +- /* Send the CDB (atapi) or the first data block (ata pio out). +- * During the state transition, interrupt handler shouldn't +- * be invoked before the data transfer is complete and +- * hsm_task_state is changed. Hence, the following locking. +- */ +- if (in_wq) +- spin_lock_irqsave(ap->lock, flags); +- + if (qc->tf.protocol == ATA_PROT_PIO) { + /* PIO data out protocol. + * send first data block. +@@ -1135,9 +1121,6 @@ fsm_start: + /* send CDB */ + atapi_send_cdb(ap, qc); + +- if (in_wq) +- spin_unlock_irqrestore(ap->lock, flags); +- + /* if polling, ata_sff_pio_task() handles the rest. + * otherwise, interrupt handler takes over from here. + */ +@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work) + u8 status; + int poll_next; + ++ spin_lock_irq(ap->lock); ++ + BUG_ON(ap->sff_pio_task_link == NULL); + /* qc can be NULL if timeout occurred */ + qc = ata_qc_from_tag(ap, link->active_tag); + if (!qc) { + ap->sff_pio_task_link = NULL; +- return; ++ goto out_unlock; + } + + fsm_start: +@@ -1381,11 +1366,14 @@ fsm_start: + */ + status = ata_sff_busy_wait(ap, ATA_BUSY, 5); + if (status & ATA_BUSY) { ++ spin_unlock_irq(ap->lock); + ata_msleep(ap, 2); ++ spin_lock_irq(ap->lock); ++ + status = ata_sff_busy_wait(ap, ATA_BUSY, 10); + if (status & ATA_BUSY) { + ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); +- return; ++ goto out_unlock; + } + } + +@@ -1402,6 +1390,8 @@ fsm_start: + */ + if (poll_next) + goto fsm_start; ++out_unlock: ++ spin_unlock_irq(ap->lock); + } + + /** +diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c +index b7695e804635..fa94fba8fa21 100644 +--- a/drivers/ata/sata_sil.c ++++ b/drivers/ata/sata_sil.c +@@ -631,6 +631,9 @@ static void sil_dev_config(struct ata_device *dev) + unsigned int n, quirks = 0; + unsigned char model_num[ATA_ID_PROD_LEN + 1]; + ++ /* This controller doesn't support trim */ ++ dev->horkage |= ATA_HORKAGE_NOTRIM; ++ + ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); + + for (n = 0; sil_blacklist[n].product; n++) +diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c +index 1098ed3b9b89..dc45ddb36117 100644 +--- a/drivers/clocksource/vt8500_timer.c ++++ b/drivers/clocksource/vt8500_timer.c +@@ -50,6 +50,8 @@ + + #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) + ++#define MIN_OSCR_DELTA 16 ++ + static void __iomem *regbase; + + static cycle_t vt8500_timer_read(struct clocksource *cs) +@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles, + cpu_relax(); + writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL); + +- if ((signed)(alarm - clocksource.read(&clocksource)) <= 16) ++ if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA) + return -ETIME; + + writel(1, regbase + TIMER_IER_VAL); +@@ -160,7 +162,7 @@ static void __init vt8500_timer_init(struct device_node *np) + pr_err("%s: setup_irq failed for %s\n", __func__, + clockevent.name); + clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, +- 4, 0xf0000000); ++ MIN_OSCR_DELTA * 2, 0xf0000000); + } + + CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init); +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c +index 3ae48ee2f488..df79cb0bf04e 100644 +--- a/drivers/dma/dw/core.c ++++ b/drivers/dma/dw/core.c +@@ -176,7 +176,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc) + + /*----------------------------------------------------------------------*/ + +-static inline unsigned int dwc_fast_fls(unsigned long long v) ++static inline unsigned int dwc_fast_ffs(unsigned long long v) + { + /* + * We can be a lot more clever here, but this should take care +@@ -720,7 +720,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + dw->data_width[dwc->dst_master]); + + src_width = dst_width = min_t(unsigned int, data_width, +- dwc_fast_fls(src | dest | len)); ++ dwc_fast_ffs(src | dest | len)); + + ctllo = DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_DST_WIDTH(dst_width) +@@ -799,7 +799,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + + switch (direction) { + case DMA_MEM_TO_DEV: +- reg_width = __fls(sconfig->dst_addr_width); ++ reg_width = __ffs(sconfig->dst_addr_width); + reg = sconfig->dst_addr; + ctllo = (DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_DST_WIDTH(reg_width) +@@ -819,7 +819,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + len = sg_dma_len(sg); + + mem_width = min_t(unsigned int, +- data_width, dwc_fast_fls(mem | len)); ++ data_width, dwc_fast_ffs(mem | len)); + + slave_sg_todev_fill_desc: + desc = dwc_desc_get(dwc); +@@ -859,7 +859,7 @@ slave_sg_todev_fill_desc: + } + break; + case DMA_DEV_TO_MEM: +- reg_width = __fls(sconfig->src_addr_width); ++ reg_width = __ffs(sconfig->src_addr_width); + reg = sconfig->src_addr; + ctllo = (DWC_DEFAULT_CTLLO(chan) + | DWC_CTLL_SRC_WIDTH(reg_width) +@@ -879,7 +879,7 @@ slave_sg_todev_fill_desc: + len = sg_dma_len(sg); + + mem_width = min_t(unsigned int, +- data_width, dwc_fast_fls(mem | len)); ++ data_width, dwc_fast_ffs(mem | len)); + + slave_sg_fromdev_fill_desc: + desc = dwc_desc_get(dwc); +diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c +index 592af5f0cf39..53587377e672 100644 +--- a/drivers/edac/edac_device.c ++++ b/drivers/edac/edac_device.c +@@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, + */ + void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) + { +- int status; +- + if (!edac_dev->edac_check) + return; + +- status = cancel_delayed_work(&edac_dev->work); +- if (status == 0) { +- /* workq instance might be running, wait for it */ +- flush_workqueue(edac_workqueue); +- } ++ edac_dev->op_state = OP_OFFLINE; ++ ++ cancel_delayed_work_sync(&edac_dev->work); ++ flush_workqueue(edac_workqueue); + } + + /* +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c +index 33edd6766344..19dc0bc9b136 100644 +--- a/drivers/edac/edac_mc.c ++++ b/drivers/edac/edac_mc.c +@@ -584,18 +584,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec, + */ + static void edac_mc_workq_teardown(struct mem_ctl_info *mci) + { +- int status; +- +- if (mci->op_state != OP_RUNNING_POLL) +- return; +- +- status = cancel_delayed_work(&mci->work); +- if (status == 0) { +- edac_dbg(0, "not canceled, flush the queue\n"); ++ mci->op_state = OP_OFFLINE; + +- /* workq instance might be running, wait for it */ +- flush_workqueue(edac_workqueue); +- } ++ cancel_delayed_work_sync(&mci->work); ++ flush_workqueue(edac_workqueue); + } + + /* +diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c +index 2cf44b4db80c..b4b38603b804 100644 +--- a/drivers/edac/edac_pci.c ++++ b/drivers/edac/edac_pci.c +@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci, + */ + static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci) + { +- int status; +- + edac_dbg(0, "\n"); + +- status = cancel_delayed_work(&pci->work); +- if (status == 0) +- flush_workqueue(edac_workqueue); ++ pci->op_state = OP_OFFLINE; ++ ++ cancel_delayed_work_sync(&pci->work); ++ flush_workqueue(edac_workqueue); + } + + /* +diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h +index 9833a1b1acc1..3fc122306f1f 100644 +--- a/drivers/gpu/drm/ast/ast_drv.h ++++ b/drivers/gpu/drm/ast/ast_drv.h +@@ -296,6 +296,7 @@ int ast_framebuffer_init(struct drm_device *dev, + int ast_fbdev_init(struct drm_device *dev); + void ast_fbdev_fini(struct drm_device *dev); + void ast_fbdev_set_suspend(struct drm_device *dev, int state); ++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr); + + struct ast_bo { + struct ttm_buffer_object bo; +diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c +index a28640f47c27..b55b6b1c9fe2 100644 +--- a/drivers/gpu/drm/ast/ast_fb.c ++++ b/drivers/gpu/drm/ast/ast_fb.c +@@ -367,3 +367,10 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state) + + fb_set_suspend(ast->fbdev->helper.fbdev, state); + } ++ ++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr) ++{ ++ ast->fbdev->helper.fbdev->fix.smem_start = ++ ast->fbdev->helper.fbdev->apertures->ranges[0].base + gpu_addr; ++ ast->fbdev->helper.fbdev->fix.smem_len = ast->vram_size - gpu_addr; ++} +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index d830b38e54f6..c0f284230a39 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -312,6 +312,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) + dev->mode_config.min_height = 0; + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; ++ dev->mode_config.fb_base = pci_resource_start(ast->dev->pdev, 0); + + if (ast->chip == AST2100 || + ast->chip == AST2200 || +diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c +index d2e56e95d886..cea916fa164b 100644 +--- a/drivers/gpu/drm/ast/ast_mode.c ++++ b/drivers/gpu/drm/ast/ast_mode.c +@@ -509,6 +509,8 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc, + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); + if (ret) + DRM_ERROR("failed to kmap fbcon\n"); ++ else ++ ast_fbdev_set_base(ast, gpu_addr); + } + ast_bo_unreserve(bo); + +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index 958b26dcac8a..0a9d1fd32994 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -8821,11 +8821,21 @@ connected_sink_compute_bpp(struct intel_connector * connector, + pipe_config->pipe_bpp = connector->base.display_info.bpc*3; + } + +- /* Clamp bpp to 8 on screens without EDID 1.4 */ +- if (connector->base.display_info.bpc == 0 && bpp > 24) { +- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", +- bpp); +- pipe_config->pipe_bpp = 24; ++ /* Clamp bpp to default limit on screens without EDID 1.4 */ ++ if (connector->base.display_info.bpc == 0) { ++ int type = connector->base.connector_type; ++ int clamp_bpp = 24; ++ ++ /* Fall back to 18 bpp when DP sink capability is unknown. */ ++ if (type == DRM_MODE_CONNECTOR_DisplayPort || ++ type == DRM_MODE_CONNECTOR_eDP) ++ clamp_bpp = 18; ++ ++ if (bpp > clamp_bpp) { ++ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", ++ bpp, clamp_bpp); ++ pipe_config->pipe_bpp = clamp_bpp; ++ } + } + } + +diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c +index 56a13a915155..0928c5e2bafd 100644 +--- a/drivers/gpu/drm/qxl/qxl_ioctl.c ++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c +@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev, + cmd->command_size)) + return -EFAULT; + +- reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); ++ reloc_info = kmalloc_array(cmd->relocs_num, ++ sizeof(struct qxl_reloc_info), GFP_KERNEL); + if (!reloc_info) + return -ENOMEM; + +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +index 2fa3cf615a67..6a3b5f92219f 100644 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -436,7 +436,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, + } + + /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ +- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) && ++ if (((dev->pdev->device == 0x9802) || ++ (dev->pdev->device == 0x9805) || ++ (dev->pdev->device == 0x9806)) && + (dev->pdev->subsystem_vendor == 0x1734) && + (dev->pdev->subsystem_device == 0x11bd)) { + if (*connector_type == DRM_MODE_CONNECTOR_VGA) { +@@ -447,14 +449,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, + } + } + +- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */ +- if ((dev->pdev->device == 0x9805) && +- (dev->pdev->subsystem_vendor == 0x1734) && +- (dev->pdev->subsystem_device == 0x11bd)) { +- if (*connector_type == DRM_MODE_CONNECTOR_VGA) +- return false; +- } +- + return true; + } + +diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c +index f8b20e1c0820..614144d34aea 100644 +--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c ++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c +@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work) + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + ++ /* we can race here at startup, some boards seem to trigger ++ * hotplug irqs when they shouldn't. */ ++ if (!rdev->mode_info.mode_config_initialized) ++ return; ++ + mutex_lock(&mode_config->mutex); + if (mode_config->num_connector) { + list_for_each_entry(connector, &mode_config->connector_list, head) +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c +index 0b00de55b2a4..9a559140e4a3 100644 +--- a/drivers/gpu/drm/radeon/radeon_pm.c ++++ b/drivers/gpu/drm/radeon/radeon_pm.c +@@ -915,8 +915,6 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) + + /* update display watermarks based on new power state */ + radeon_bandwidth_update(rdev); +- /* update displays */ +- radeon_dpm_display_configuration_changed(rdev); + + rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; + rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; +@@ -936,6 +934,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) + + radeon_dpm_post_set_power_state(rdev); + ++ /* update displays */ ++ radeon_dpm_display_configuration_changed(rdev); ++ + if (rdev->asic->dpm.force_performance_level) { + if (rdev->pm.dpm.thermal_active) { + enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; +@@ -1364,8 +1365,7 @@ int radeon_pm_late_init(struct radeon_device *rdev) + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); +- if (!ret) +- rdev->pm.sysfs_initialized = true; ++ rdev->pm.sysfs_initialized = true; + } + + mutex_lock(&rdev->pm.mutex); +diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c +index c0625805cdd7..a1d684266549 100644 +--- a/drivers/gpu/drm/radeon/radeon_sa.c ++++ b/drivers/gpu/drm/radeon/radeon_sa.c +@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev, + /* see if we can skip over some allocations */ + } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); + ++ for (i = 0; i < RADEON_NUM_RINGS; ++i) ++ radeon_fence_ref(fences[i]); ++ + spin_unlock(&sa_manager->wq.lock); + r = radeon_fence_wait_any(rdev, fences, false); ++ for (i = 0; i < RADEON_NUM_RINGS; ++i) ++ radeon_fence_unref(&fences[i]); + spin_lock(&sa_manager->wq.lock); + /* if we have nothing to wait for block */ + if (r == -ENOENT && block) { +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c +index 45a9a03efc06..2d50433b8c72 100644 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c +@@ -623,7 +623,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) + 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { +- while (--i) { ++ while (i--) { + pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + gtt->ttm.dma_address[i] = 0; +diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c +index 8fcb932a3a55..aaefb10aa09e 100644 +--- a/drivers/gpu/drm/radeon/rv770_dpm.c ++++ b/drivers/gpu/drm/radeon/rv770_dpm.c +@@ -1415,7 +1415,7 @@ int rv770_resume_smc(struct radeon_device *rdev) + int rv770_set_sw_state(struct radeon_device *rdev) + { + if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) +- return -EINVAL; ++ DRM_ERROR("rv770_set_sw_state failed\n"); + return 0; + } + +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +index 0771dcbf9ed0..7c48070cf9d8 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +@@ -25,6 +25,7 @@ + * + **************************************************************************/ + #include ++#include + + #include + #include "vmwgfx_drv.h" +@@ -1383,6 +1384,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + static int __init vmwgfx_init(void) + { + int ret; ++ ++#ifdef CONFIG_VGA_CONSOLE ++ if (vgacon_text_force()) ++ return -EINVAL; ++#endif ++ + ret = drm_pci_init(&driver, &vmw_pci_driver); + if (ret) + DRM_ERROR("Failed initializing DRM.\n"); +diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c +index af0259708358..bbb554d586d4 100644 +--- a/drivers/gpu/vga/vgaarb.c ++++ b/drivers/gpu/vga/vgaarb.c +@@ -392,8 +392,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) + set_current_state(interruptible ? + TASK_INTERRUPTIBLE : + TASK_UNINTERRUPTIBLE); +- if (signal_pending(current)) { +- rc = -EINTR; ++ if (interruptible && signal_pending(current)) { ++ __set_current_state(TASK_RUNNING); ++ remove_wait_queue(&vga_wait_queue, &wait); ++ rc = -ERESTARTSYS; + break; + } + schedule(); +diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c +index 126516414c11..44223f5d92d8 100644 +--- a/drivers/hwmon/ads1015.c ++++ b/drivers/hwmon/ads1015.c +@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel, + struct ads1015_data *data = i2c_get_clientdata(client); + unsigned int pga = data->channel_data[channel].pga; + int fullscale = fullscale_table[pga]; +- const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0; ++ const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0; + + return DIV_ROUND_CLOSEST(reg * fullscale, mask); + } +diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c +index 095bb046e2c8..875348699e6e 100644 +--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c ++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c +@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en + error = l2t_send(tdev, skb, l2e); + if (error < 0) + kfree_skb(skb); +- return error; ++ return error < 0 ? error : 0; + } + + int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb) +@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb) + error = cxgb3_ofld_send(tdev, skb); + if (error < 0) + kfree_skb(skb); +- return error; ++ return error < 0 ? error : 0; + } + + static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) +diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c +index dabb697b1c2a..48ba1c3e945a 100644 +--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c ++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c +@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + struct qib_ibdev *dev = to_idev(ibqp->device); + struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); + struct qib_mcast *mcast = NULL; +- struct qib_mcast_qp *p, *tmp; ++ struct qib_mcast_qp *p, *tmp, *delp = NULL; + struct rb_node *n; + int last = 0; + int ret; + +- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { +- ret = -EINVAL; +- goto bail; +- } ++ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) ++ return -EINVAL; + + spin_lock_irq(&ibp->lock); + +@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + while (1) { + if (n == NULL) { + spin_unlock_irq(&ibp->lock); +- ret = -EINVAL; +- goto bail; ++ return -EINVAL; + } + + mcast = rb_entry(n, struct qib_mcast, rb_node); +@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + */ + list_del_rcu(&p->list); + mcast->n_attached--; ++ delp = p; + + /* If this was the last attached QP, remove the GID too. */ + if (list_empty(&mcast->qp_list)) { +@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + } + + spin_unlock_irq(&ibp->lock); ++ /* QP not attached */ ++ if (!delp) ++ return -EINVAL; ++ /* ++ * Wait for any list walkers to finish before freeing the ++ * list element. ++ */ ++ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); ++ qib_mcast_qp_free(delp); + +- if (p) { +- /* +- * Wait for any list walkers to finish before freeing the +- * list element. +- */ +- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); +- qib_mcast_qp_free(p); +- } + if (last) { + atomic_dec(&mcast->refcount); + wait_event(mcast->wait, !atomic_read(&mcast->refcount)); +@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + dev->n_mcast_grps_allocated--; + spin_unlock_irq(&dev->n_mcast_grps_lock); + } +- +- ret = 0; +- +-bail: +- return ret; ++ return 0; + } + + int qib_mcast_tree_empty(struct qib_ibport *ibp) +diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c +index 3ae2bb8d9cf2..21a44b168d46 100644 +--- a/drivers/irqchip/irq-versatile-fpga.c ++++ b/drivers/irqchip/irq-versatile-fpga.c +@@ -204,7 +204,12 @@ int __init fpga_irq_of_init(struct device_node *node, + if (!parent_irq) + parent_irq = -1; + ++#ifdef CONFIG_ARCH_VERSATILE ++ fpga_irq_init(base, node->name, IRQ_SIC_START, parent_irq, valid_mask, ++ node); ++#else + fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); ++#endif + + writel(clear_mask, base + IRQ_ENABLE_CLEAR); + writel(clear_mask, base + FIQ_ENABLE_CLEAR); +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c +index fbcb6225f794..74a5786ddcce 100644 +--- a/drivers/md/bcache/btree.c ++++ b/drivers/md/bcache/btree.c +@@ -1641,6 +1641,7 @@ static void bch_btree_gc(struct cache_set *c) + do { + ret = btree_root(gc_root, c, &op, &writes, &stats); + closure_sync(&writes); ++ cond_resched(); + + if (ret && ret != -EAGAIN) + pr_warn("gc failed!"); +@@ -2037,8 +2038,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, + rw_lock(true, b, b->level); + + if (b->key.ptr[0] != btree_ptr || +- b->seq != seq + 1) ++ b->seq != seq + 1) { ++ op->lock = b->level; + goto out; ++ } + } + + SET_KEY_PTRS(check_key, 1); +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c +index 24a3a1546caa..1b6beb1e3142 100644 +--- a/drivers/md/bcache/super.c ++++ b/drivers/md/bcache/super.c +@@ -712,6 +712,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c, + WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || + sysfs_create_link(&c->kobj, &d->kobj, d->name), + "Couldn't create device <-> cache set symlinks"); ++ ++ clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); + } + + static void bcache_device_detach(struct bcache_device *d) +@@ -882,8 +884,11 @@ void bch_cached_dev_run(struct cached_dev *dc) + buf[SB_LABEL_SIZE] = '\0'; + env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); + +- if (atomic_xchg(&dc->running, 1)) ++ if (atomic_xchg(&dc->running, 1)) { ++ kfree(env[1]); ++ kfree(env[2]); + return; ++ } + + if (!d->c && + BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { +@@ -2081,8 +2086,10 @@ static int __init bcache_init(void) + closure_debug_init(); + + bcache_major = register_blkdev(0, "bcache"); +- if (bcache_major < 0) ++ if (bcache_major < 0) { ++ unregister_reboot_notifier(&reboot); + return bcache_major; ++ } + + if (!(bcache_wq = create_workqueue("bcache")) || + !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c +index f4300e4c0114..d6525c12c8d8 100644 +--- a/drivers/md/bcache/writeback.c ++++ b/drivers/md/bcache/writeback.c +@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, + + static bool dirty_pred(struct keybuf *buf, struct bkey *k) + { ++ struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys); ++ ++ BUG_ON(KEY_INODE(k) != dc->disk.id); ++ + return KEY_DIRTY(k); + } + +@@ -372,11 +376,24 @@ next: + } + } + ++/* ++ * Returns true if we scanned the entire disk ++ */ + static bool refill_dirty(struct cached_dev *dc) + { + struct keybuf *buf = &dc->writeback_keys; ++ struct bkey start = KEY(dc->disk.id, 0, 0); + struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); +- bool searched_from_start = false; ++ struct bkey start_pos; ++ ++ /* ++ * make sure keybuf pos is inside the range for this disk - at bringup ++ * we might not be attached yet so this disk's inode nr isn't ++ * initialized then ++ */ ++ if (bkey_cmp(&buf->last_scanned, &start) < 0 || ++ bkey_cmp(&buf->last_scanned, &end) > 0) ++ buf->last_scanned = start; + + if (dc->partial_stripes_expensive) { + refill_full_stripes(dc); +@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc) + return false; + } + +- if (bkey_cmp(&buf->last_scanned, &end) >= 0) { +- buf->last_scanned = KEY(dc->disk.id, 0, 0); +- searched_from_start = true; +- } +- ++ start_pos = buf->last_scanned; + bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); + +- return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start; ++ if (bkey_cmp(&buf->last_scanned, &end) < 0) ++ return false; ++ ++ /* ++ * If we get to the end start scanning again from the beginning, and ++ * only scan up to where we initially started scanning from: ++ */ ++ buf->last_scanned = start; ++ bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); ++ ++ return bkey_cmp(&buf->last_scanned, &start_pos) >= 0; + } + + static int bch_writeback_thread(void *arg) +diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h +index e2f8598937ac..afe7ecada503 100644 +--- a/drivers/md/bcache/writeback.h ++++ b/drivers/md/bcache/writeback.h +@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, + + static inline void bch_writeback_queue(struct cached_dev *dc) + { +- wake_up_process(dc->writeback_thread); ++ if (!IS_ERR_OR_NULL(dc->writeback_thread)) ++ wake_up_process(dc->writeback_thread); + } + + static inline void bch_writeback_add(struct cached_dev *dc) +diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h +index 0b2536247cf5..84e27708ad97 100644 +--- a/drivers/md/dm-exception-store.h ++++ b/drivers/md/dm-exception-store.h +@@ -70,7 +70,7 @@ struct dm_exception_store_type { + * Update the metadata with this exception. + */ + void (*commit_exception) (struct dm_exception_store *store, +- struct dm_exception *e, ++ struct dm_exception *e, int valid, + void (*callback) (void *, int success), + void *callback_context); + +diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c +index d6e88178d22c..d3272acc0f0e 100644 +--- a/drivers/md/dm-snap-persistent.c ++++ b/drivers/md/dm-snap-persistent.c +@@ -700,7 +700,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store, + } + + static void persistent_commit_exception(struct dm_exception_store *store, +- struct dm_exception *e, ++ struct dm_exception *e, int valid, + void (*callback) (void *, int success), + void *callback_context) + { +@@ -709,6 +709,9 @@ static void persistent_commit_exception(struct dm_exception_store *store, + struct core_exception ce; + struct commit_callback *cb; + ++ if (!valid) ++ ps->valid = 0; ++ + ce.old_chunk = e->old_chunk; + ce.new_chunk = e->new_chunk; + write_exception(ps, ps->current_committed++, &ce); +diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c +index 1ce9a2586e41..31439d53cf7e 100644 +--- a/drivers/md/dm-snap-transient.c ++++ b/drivers/md/dm-snap-transient.c +@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store, + } + + static void transient_commit_exception(struct dm_exception_store *store, +- struct dm_exception *e, ++ struct dm_exception *e, int valid, + void (*callback) (void *, int success), + void *callback_context) + { + /* Just succeed */ +- callback(callback_context, 1); ++ callback(callback_context, valid); + } + + static void transient_usage(struct dm_exception_store *store, +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c +index c356a10b9ba5..2e9117630dbe 100644 +--- a/drivers/md/dm-snap.c ++++ b/drivers/md/dm-snap.c +@@ -1388,8 +1388,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err) + dm_table_event(s->ti->table); + } + +-static void pending_complete(struct dm_snap_pending_exception *pe, int success) ++static void pending_complete(void *context, int success) + { ++ struct dm_snap_pending_exception *pe = context; + struct dm_exception *e; + struct dm_snapshot *s = pe->snap; + struct bio *origin_bios = NULL; +@@ -1460,24 +1461,13 @@ out: + free_pending_exception(pe); + } + +-static void commit_callback(void *context, int success) +-{ +- struct dm_snap_pending_exception *pe = context; +- +- pending_complete(pe, success); +-} +- + static void complete_exception(struct dm_snap_pending_exception *pe) + { + struct dm_snapshot *s = pe->snap; + +- if (unlikely(pe->copy_error)) +- pending_complete(pe, 0); +- +- else +- /* Update the metadata if we are persistent */ +- s->store->type->commit_exception(s->store, &pe->e, +- commit_callback, pe); ++ /* Update the metadata if we are persistent */ ++ s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error, ++ pending_complete, pe); + } + + /* +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c +index 3412b86e79fd..7768de60f699 100644 +--- a/drivers/md/dm-thin-metadata.c ++++ b/drivers/md/dm-thin-metadata.c +@@ -1205,6 +1205,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd) + dm_block_t held_root; + + /* ++ * We commit to ensure the btree roots which we increment in a ++ * moment are up to date. ++ */ ++ __commit_transaction(pmd); ++ ++ /* + * Copy the superblock. + */ + dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION); +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c +index b94e4648c199..d633a3921b3c 100644 +--- a/drivers/md/dm-thin.c ++++ b/drivers/md/dm-thin.c +@@ -1619,6 +1619,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) + case PM_WRITE: + if (old_mode != new_mode) + notify_of_pool_mode_change(pool, "write"); ++ pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; + dm_pool_metadata_read_write(pool->pmd); + pool->process_bio = process_bio; + pool->process_discard = process_discard; +@@ -2567,8 +2568,8 @@ static void pool_postsuspend(struct dm_target *ti) + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + +- cancel_delayed_work(&pool->waker); +- cancel_delayed_work(&pool->no_space_timeout); ++ cancel_delayed_work_sync(&pool->waker); ++ cancel_delayed_work_sync(&pool->no_space_timeout); + flush_workqueue(pool->wq); + (void) commit(pool); + } +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c +index 7ba85e2b146b..7b4bb1f09b01 100644 +--- a/drivers/md/persistent-data/dm-btree.c ++++ b/drivers/md/persistent-data/dm-btree.c +@@ -250,6 +250,16 @@ static void pop_frame(struct del_stack *s) + dm_tm_unlock(s->tm, f->b); + } + ++static void unlock_all_frames(struct del_stack *s) ++{ ++ struct frame *f; ++ ++ while (unprocessed_frames(s)) { ++ f = s->spine + s->top--; ++ dm_tm_unlock(s->tm, f->b); ++ } ++} ++ + int dm_btree_del(struct dm_btree_info *info, dm_block_t root) + { + int r; +@@ -306,9 +316,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root) + pop_frame(s); + } + } +- + out: ++ if (r) { ++ /* cleanup all frames of del_stack */ ++ unlock_all_frames(s); ++ } + kfree(s); ++ + return r; + } + EXPORT_SYMBOL_GPL(dm_btree_del); +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c +index 199c9ccd1f5d..032ee39a0e9b 100644 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c +@@ -136,7 +136,7 @@ static int brb_push(struct bop_ring_buffer *brb, + return 0; + } + +-static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) ++static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result) + { + struct block_op *bop; + +@@ -147,6 +147,14 @@ static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) + result->type = bop->type; + result->block = bop->block; + ++ return 0; ++} ++ ++static int brb_pop(struct bop_ring_buffer *brb) ++{ ++ if (brb_empty(brb)) ++ return -ENODATA; ++ + brb->begin = brb_next(brb, brb->begin); + + return 0; +@@ -211,7 +219,7 @@ static int apply_bops(struct sm_metadata *smm) + while (!brb_empty(&smm->uncommitted)) { + struct block_op bop; + +- r = brb_pop(&smm->uncommitted, &bop); ++ r = brb_peek(&smm->uncommitted, &bop); + if (r) { + DMERR("bug in bop ring buffer"); + break; +@@ -220,6 +228,8 @@ static int apply_bops(struct sm_metadata *smm) + r = commit_bop(smm, &bop); + if (r) + break; ++ ++ brb_pop(&smm->uncommitted); + } + + return r; +@@ -681,7 +691,6 @@ static struct dm_space_map bootstrap_ops = { + static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) + { + int r, i; +- enum allocation_event ev; + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + dm_block_t old_len = smm->ll.nr_blocks; + +@@ -703,11 +712,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) + * allocate any new blocks. + */ + do { +- for (i = old_len; !r && i < smm->begin; i++) { +- r = sm_ll_inc(&smm->ll, i, &ev); +- if (r) +- goto out; +- } ++ for (i = old_len; !r && i < smm->begin; i++) ++ r = add_bop(smm, BOP_INC, i); ++ ++ if (r) ++ goto out; ++ + old_len = smm->begin; + + r = apply_bops(smm); +@@ -752,7 +762,6 @@ int dm_sm_metadata_create(struct dm_space_map *sm, + { + int r; + dm_block_t i; +- enum allocation_event ev; + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + + smm->begin = superblock + 1; +@@ -780,7 +789,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm, + * allocated blocks that they were built from. + */ + for (i = superblock; !r && i < smm->begin; i++) +- r = sm_ll_inc(&smm->ll, i, &ev); ++ r = add_bop(smm, BOP_INC, i); + + if (r) + return r; +diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c +index 1f925e856974..46a984291b7d 100644 +--- a/drivers/media/dvb-core/dvb_frontend.c ++++ b/drivers/media/dvb-core/dvb_frontend.c +@@ -2195,9 +2195,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file, + dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n", + __func__, c->delivery_system, fe->ops.info.type); + +- /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't +- * do it, it is done for it. */ +- info->caps |= FE_CAN_INVERSION_AUTO; ++ /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */ ++ if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT)) ++ info->caps |= FE_CAN_INVERSION_AUTO; + err = 0; + break; + } +diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c +index a2631be7ffac..08e0f0dd8728 100644 +--- a/drivers/media/dvb-frontends/tda1004x.c ++++ b/drivers/media/dvb-frontends/tda1004x.c +@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe) + { + struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache; + struct tda1004x_state* state = fe->demodulator_priv; ++ int status; + + dprintk("%s\n", __func__); + ++ status = tda1004x_read_byte(state, TDA1004X_STATUS_CD); ++ if (status == -1) ++ return -EIO; ++ ++ /* Only update the properties cache if device is locked */ ++ if (!(status & 8)) ++ return 0; ++ + // inversion status + fe_params->inversion = INVERSION_OFF; + if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20) +diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c +index 90f0d637cd9d..cd05840abc91 100644 +--- a/drivers/media/usb/gspca/ov534.c ++++ b/drivers/media/usb/gspca/ov534.c +@@ -1490,8 +1490,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev, + struct v4l2_fract *tpf = &cp->timeperframe; + struct sd *sd = (struct sd *) gspca_dev; + +- /* Set requested framerate */ +- sd->frame_rate = tpf->denominator / tpf->numerator; ++ if (tpf->numerator == 0 || tpf->denominator == 0) ++ /* Set default framerate */ ++ sd->frame_rate = 30; ++ else ++ /* Set requested framerate */ ++ sd->frame_rate = tpf->denominator / tpf->numerator; ++ + if (gspca_dev->streaming) + set_frame_rate(gspca_dev); + +diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c +index 640c2fe760b3..a6fbb2a07979 100644 +--- a/drivers/media/usb/gspca/topro.c ++++ b/drivers/media/usb/gspca/topro.c +@@ -4792,7 +4792,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev, + struct v4l2_fract *tpf = &cp->timeperframe; + int fr, i; + +- sd->framerate = tpf->denominator / tpf->numerator; ++ if (tpf->numerator == 0 || tpf->denominator == 0) ++ sd->framerate = 30; ++ else ++ sd->framerate = tpf->denominator / tpf->numerator; ++ + if (gspca_dev->streaming) + setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure)); + +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c +index d71f5ef036e0..92aeb1d2b41b 100644 +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -62,8 +62,7 @@ MODULE_ALIAS("mmc:block"); + #define MMC_SANITIZE_REQ_TIMEOUT 240000 + #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) + +-#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ +- (req->cmd_flags & REQ_META)) && \ ++#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ + (rq_data_dir(req) == WRITE)) + #define PACKED_CMD_VER 0x01 + #define PACKED_CMD_WR 0x02 +@@ -1328,13 +1327,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, + + /* + * Reliable writes are used to implement Forced Unit Access and +- * REQ_META accesses, and are supported only on MMCs. +- * +- * XXX: this really needs a good explanation of why REQ_META +- * is treated special. ++ * are supported only on MMCs. + */ +- bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || +- (req->cmd_flags & REQ_META)) && ++ bool do_rel_wr = (req->cmd_flags & REQ_FUA) && + (rq_data_dir(req) == WRITE) && + (md->flags & MMC_BLK_REL_WR); + +diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c +index 4d721c6e2af0..ae360b3b4fda 100644 +--- a/drivers/mmc/core/sdio.c ++++ b/drivers/mmc/core/sdio.c +@@ -670,7 +670,7 @@ try_again: + */ + if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) { + err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, +- ocr); ++ ocr_card); + if (err == -EAGAIN) { + sdio_reset(host); + mmc_go_idle(host); +diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c +index b93122636531..8103db25db69 100644 +--- a/drivers/mmc/host/mmci.c ++++ b/drivers/mmc/host/mmci.c +@@ -1860,7 +1860,7 @@ static struct amba_id mmci_ids[] = { + { + .id = 0x00280180, + .mask = 0x00ffffff, +- .data = &variant_u300, ++ .data = &variant_nomadik, + }, + { + .id = 0x00480180, +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c +index 881bf89acfcc..75d3c28940f1 100644 +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -2663,7 +2663,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host) + + static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) + { +- if (host->runtime_suspended || host->bus_on) ++ if (host->bus_on) + return; + host->bus_on = true; + pm_runtime_get_noresume(host->mmc->parent); +@@ -2671,7 +2671,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) + + static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) + { +- if (host->runtime_suspended || !host->bus_on) ++ if (!host->bus_on) + return; + host->bus_on = false; + pm_runtime_put_noidle(host->mmc->parent); +diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c +index c2d0559115d3..732a8ed571c2 100644 +--- a/drivers/net/can/sja1000/sja1000.c ++++ b/drivers/net/can/sja1000/sja1000.c +@@ -187,6 +187,9 @@ static void sja1000_start(struct net_device *dev) + /* clear interrupt flags */ + priv->read_reg(priv, SJA1000_IR); + ++ /* clear interrupt flags */ ++ priv->read_reg(priv, SJA1000_IR); ++ + /* leave reset mode */ + set_normal_mode(dev); + } +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c +index 52c42fd49510..a5735a7797f8 100644 +--- a/drivers/net/can/usb/ems_usb.c ++++ b/drivers/net/can/usb/ems_usb.c +@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2"); + */ + #define EMS_USB_ARM7_CLOCK 8000000 + ++#define CPC_TX_QUEUE_TRIGGER_LOW 25 ++#define CPC_TX_QUEUE_TRIGGER_HIGH 35 ++ + /* + * CAN-Message representation in a CPC_MSG. Message object type is + * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or +@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb) + switch (urb->status) { + case 0: + dev->free_slots = dev->intr_in_buffer[1]; ++ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){ ++ if (netif_queue_stopped(netdev)){ ++ netif_wake_queue(netdev); ++ } ++ } + break; + + case -ECONNRESET: /* unlink */ +@@ -529,8 +537,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb) + /* Release context */ + context->echo_index = MAX_TX_URBS; + +- if (netif_queue_stopped(netdev)) +- netif_wake_queue(netdev); + } + + /* +@@ -590,7 +596,7 @@ static int ems_usb_start(struct ems_usb *dev) + int err, i; + + dev->intr_in_buffer[0] = 0; +- dev->free_slots = 15; /* initial size */ ++ dev->free_slots = 50; /* initial size */ + + for (i = 0; i < MAX_RX_URBS; i++) { + struct urb *urb = NULL; +@@ -841,7 +847,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne + + /* Slow down tx path */ + if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || +- dev->free_slots < 5) { ++ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) { + netif_stop_queue(netdev); + } + } +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +index 1fbeaa9dd202..6ef93562c6b7 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +@@ -2401,10 +2401,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, + AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) + +-#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ +- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ +- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ +- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) ++#define HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD \ ++ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ ++ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ ++ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY) ++ ++#define HW_PRTY_ASSERT_SET_3 (HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD | \ ++ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) + + #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +index 242874041ba4..e157adb85b2a 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +@@ -4631,9 +4631,7 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, + res |= true; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: +- if (print) +- _print_next_block((*par_num)++, +- "MCP SCPAD"); ++ (*par_num)++; + /* clear latched SCPAD PATIRY from MCP */ + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, + 1UL << 10); +@@ -4695,6 +4693,7 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, + (sig[3] & HW_PRTY_ASSERT_SET_3) || + (sig[4] & HW_PRTY_ASSERT_SET_4)) { + int par_num = 0; ++ + DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" + "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", + sig[0] & HW_PRTY_ASSERT_SET_0, +@@ -4702,9 +4701,18 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, + sig[2] & HW_PRTY_ASSERT_SET_2, + sig[3] & HW_PRTY_ASSERT_SET_3, + sig[4] & HW_PRTY_ASSERT_SET_4); +- if (print) +- netdev_err(bp->dev, +- "Parity errors detected in blocks: "); ++ if (print) { ++ if (((sig[0] & HW_PRTY_ASSERT_SET_0) || ++ (sig[1] & HW_PRTY_ASSERT_SET_1) || ++ (sig[2] & HW_PRTY_ASSERT_SET_2) || ++ (sig[4] & HW_PRTY_ASSERT_SET_4)) || ++ (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) { ++ netdev_err(bp->dev, ++ "Parity errors detected in blocks: "); ++ } else { ++ print = false; ++ } ++ } + res |= bnx2x_check_blocks_with_parity0(bp, + sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); + res |= bnx2x_check_blocks_with_parity1(bp, +diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c +index 988f9fec0bff..d8c1b69d0f66 100644 +--- a/drivers/net/wireless/iwlwifi/pcie/drv.c ++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c +@@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { + {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, ++ {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, +@@ -383,10 +384,10 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { + {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, +- {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, ++ {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, +- {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, ++ {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, +diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c +index 7c7a388c85ab..126f641a9582 100644 +--- a/drivers/pci/hotplug/acpiphp_glue.c ++++ b/drivers/pci/hotplug/acpiphp_glue.c +@@ -1133,8 +1133,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot) + { + pci_lock_rescan_remove(); + +- if (slot->flags & SLOT_IS_GOING_AWAY) ++ if (slot->flags & SLOT_IS_GOING_AWAY) { ++ pci_unlock_rescan_remove(); + return -ENODEV; ++ } + + mutex_lock(&slot->crit_sect); + /* configure all functions */ +diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c +index 0bf82a20a0fb..48d21e0edd56 100644 +--- a/drivers/pci/pcie/aer/aerdrv.c ++++ b/drivers/pci/pcie/aer/aerdrv.c +@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) + rpc->rpd = dev; + INIT_WORK(&rpc->dpc_handler, aer_isr); + mutex_init(&rpc->rpc_mutex); +- init_waitqueue_head(&rpc->wait_release); + + /* Use PCIe bus function to store rpc into PCIe device */ + set_service_data(dev, rpc); +@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev) + if (rpc->isr) + free_irq(dev->irq, dev); + +- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); +- ++ flush_work(&rpc->dpc_handler); + aer_disable_rootport(rpc); + kfree(rpc); + set_service_data(dev, NULL); +diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h +index 84420b7c9456..945c939a86c5 100644 +--- a/drivers/pci/pcie/aer/aerdrv.h ++++ b/drivers/pci/pcie/aer/aerdrv.h +@@ -72,7 +72,6 @@ struct aer_rpc { + * recovery on the same + * root port hierarchy + */ +- wait_queue_head_t wait_release; + }; + + struct aer_broadcast_data { +diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c +index b2c8881da764..777edcc4aab6 100644 +--- a/drivers/pci/pcie/aer/aerdrv_core.c ++++ b/drivers/pci/pcie/aer/aerdrv_core.c +@@ -785,8 +785,6 @@ void aer_isr(struct work_struct *work) + while (get_e_source(rpc, &e_src)) + aer_isr_one_error(p_device, &e_src); + mutex_unlock(&rpc->rpc_mutex); +- +- wake_up(&rpc->wait_release); + } + + /** +diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c +index 179b8edc2262..318d535e337d 100644 +--- a/drivers/pci/xen-pcifront.c ++++ b/drivers/pci/xen-pcifront.c +@@ -52,7 +52,7 @@ struct pcifront_device { + }; + + struct pcifront_sd { +- int domain; ++ struct pci_sysdata sd; + struct pcifront_device *pdev; + }; + +@@ -66,7 +66,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd, + unsigned int domain, unsigned int bus, + struct pcifront_device *pdev) + { +- sd->domain = domain; ++ /* Because we do not expose that information via XenBus. */ ++ sd->sd.node = first_online_node; ++ sd->sd.domain = domain; + sd->pdev = pdev; + } + +@@ -464,8 +466,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev, + dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", + domain, bus); + +- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); +- sd = kmalloc(sizeof(*sd), GFP_KERNEL); ++ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL); ++ sd = kzalloc(sizeof(*sd), GFP_KERNEL); + if (!bus_entry || !sd) { + err = -ENOMEM; + goto err_out; +diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c +index 3bed2f55cf7d..3ccadf631d45 100644 +--- a/drivers/power/wm831x_power.c ++++ b/drivers/power/wm831x_power.c +@@ -567,7 +567,7 @@ static int wm831x_power_probe(struct platform_device *pdev) + + irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO")); + ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq, +- IRQF_TRIGGER_RISING, "System power low", ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "System power low", + power); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n", +@@ -577,7 +577,7 @@ static int wm831x_power_probe(struct platform_device *pdev) + + irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC")); + ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq, +- IRQF_TRIGGER_RISING, "Power source", ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "Power source", + power); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n", +@@ -590,7 +590,7 @@ static int wm831x_power_probe(struct platform_device *pdev) + platform_get_irq_byname(pdev, + wm831x_bat_irqs[i])); + ret = request_threaded_irq(irq, NULL, wm831x_bat_irq, +- IRQF_TRIGGER_RISING, ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, + wm831x_bat_irqs[i], + power); + if (ret != 0) { +diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c +index 3c6768378a94..4048d7f5babd 100644 +--- a/drivers/powercap/intel_rapl.c ++++ b/drivers/powercap/intel_rapl.c +@@ -1194,10 +1194,13 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu) + + for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { + /* check if the domain is locked by BIOS */ +- if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) { ++ ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked); ++ if (ret) ++ return ret; ++ if (locked) { + pr_info("RAPL package %d domain %s locked by BIOS\n", + rp->id, rd->name); +- rd->state |= DOMAIN_STATE_BIOS_LOCKED; ++ rd->state |= DOMAIN_STATE_BIOS_LOCKED; + } + } + +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c +index a2597e683e79..6a64e86e8ccd 100644 +--- a/drivers/s390/block/dasd_alias.c ++++ b/drivers/s390/block/dasd_alias.c +@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) + spin_unlock_irqrestore(&lcu->lock, flags); + cancel_work_sync(&lcu->suc_data.worker); + spin_lock_irqsave(&lcu->lock, flags); +- if (device == lcu->suc_data.device) ++ if (device == lcu->suc_data.device) { ++ dasd_put_device(device); + lcu->suc_data.device = NULL; ++ } + } + was_pending = 0; + if (device == lcu->ruac_data.device) { +@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) + was_pending = 1; + cancel_delayed_work_sync(&lcu->ruac_data.dwork); + spin_lock_irqsave(&lcu->lock, flags); +- if (device == lcu->ruac_data.device) ++ if (device == lcu->ruac_data.device) { ++ dasd_put_device(device); + lcu->ruac_data.device = NULL; ++ } + } + private->lcu = NULL; + spin_unlock_irqrestore(&lcu->lock, flags); +@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work) + if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { + DBF_DEV_EVENT(DBF_WARNING, device, "could not update" + " alias data in lcu (rc = %d), retry later", rc); +- schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ)) ++ dasd_put_device(device); + } else { ++ dasd_put_device(device); + lcu->ruac_data.device = NULL; + lcu->flags &= ~UPDATE_PENDING; + } +@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu, + */ + if (!usedev) + return -EINVAL; ++ dasd_get_device(usedev); + lcu->ruac_data.device = usedev; +- schedule_delayed_work(&lcu->ruac_data.dwork, 0); ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0)) ++ dasd_put_device(usedev); + return 0; + } + +@@ -722,7 +730,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu, + ASCEBC((char *) &cqr->magic, 4); + ccw = cqr->cpaddr; + ccw->cmd_code = DASD_ECKD_CCW_RSCK; +- ccw->flags = 0 ; ++ ccw->flags = CCW_FLAG_SLI; + ccw->count = 16; + ccw->cda = (__u32)(addr_t) cqr->data; + ((char *)cqr->data)[0] = reason; +@@ -926,6 +934,7 @@ static void summary_unit_check_handling_work(struct work_struct *work) + /* 3. read new alias configuration */ + _schedule_lcu_update(lcu, device); + lcu->suc_data.device = NULL; ++ dasd_put_device(device); + spin_unlock_irqrestore(&lcu->lock, flags); + } + +@@ -985,6 +994,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device, + } + lcu->suc_data.reason = reason; + lcu->suc_data.device = device; ++ dasd_get_device(device); + spin_unlock(&lcu->lock); +- schedule_work(&lcu->suc_data.worker); ++ if (!schedule_work(&lcu->suc_data.worker)) ++ dasd_put_device(device); + }; +diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h +index 34452ea386ac..52636cfbab8f 100644 +--- a/drivers/scsi/megaraid/megaraid_sas.h ++++ b/drivers/scsi/megaraid/megaraid_sas.h +@@ -334,6 +334,8 @@ enum MR_EVT_ARGS { + MR_EVT_ARGS_GENERIC, + }; + ++ ++#define SGE_BUFFER_SIZE 4096 + /* + * define constants for device list query options + */ +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c +index c80afde97e96..9f833f1504cc 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c +@@ -3821,7 +3821,7 @@ static int megasas_init_fw(struct megasas_instance *instance) + } + } + instance->max_sectors_per_req = instance->max_num_sge * +- PAGE_SIZE / 512; ++ SGE_BUFFER_SIZE / 512; + if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) + instance->max_sectors_per_req = tmp_sectors; + +@@ -5281,6 +5281,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) + int i; + int error = 0; + compat_uptr_t ptr; ++ unsigned long local_raw_ptr; ++ u32 local_sense_off; ++ u32 local_sense_len; + + if (clear_user(ioc, sizeof(*ioc))) + return -EFAULT; +@@ -5298,9 +5301,15 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) + * sense_len is not null, so prepare the 64bit value under + * the same condition. + */ +- if (ioc->sense_len) { ++ if (get_user(local_raw_ptr, ioc->frame.raw) || ++ get_user(local_sense_off, &ioc->sense_off) || ++ get_user(local_sense_len, &ioc->sense_len)) ++ return -EFAULT; ++ ++ ++ if (local_sense_len) { + void __user **sense_ioc_ptr = +- (void __user **)(ioc->frame.raw + ioc->sense_off); ++ (void __user **)((u8*)local_raw_ptr + local_sense_off); + compat_uptr_t *sense_cioc_ptr = + (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off); + if (get_user(ptr, sense_cioc_ptr) || +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c +index eba183c428cf..3643bbf5456d 100644 +--- a/drivers/scsi/ses.c ++++ b/drivers/scsi/ses.c +@@ -70,6 +70,7 @@ static int ses_probe(struct device *dev) + static int ses_recv_diag(struct scsi_device *sdev, int page_code, + void *buf, int bufflen) + { ++ int ret; + unsigned char cmd[] = { + RECEIVE_DIAGNOSTIC, + 1, /* Set PCV bit */ +@@ -78,9 +79,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code, + bufflen & 0xff, + 0 + }; ++ unsigned char recv_page_code; + +- return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, ++ ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, + NULL, SES_TIMEOUT, SES_RETRIES, NULL); ++ if (unlikely(!ret)) ++ return ret; ++ ++ recv_page_code = ((unsigned char *)buf)[0]; ++ ++ if (likely(recv_page_code == page_code)) ++ return ret; ++ ++ /* successful diagnostic but wrong page code. This happens to some ++ * USB devices, just print a message and pretend there was an error */ ++ ++ sdev_printk(KERN_ERR, sdev, ++ "Wrong diagnostic page; asked for %d got %u\n", ++ page_code, recv_page_code); ++ ++ return -EINVAL; + } + + static int ses_send_diag(struct scsi_device *sdev, int page_code, +@@ -436,7 +454,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, + if (desc_ptr) + desc_ptr += len; + +- if (addl_desc_ptr) ++ if (addl_desc_ptr && ++ /* only find additional descriptions for specific devices */ ++ (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || ++ type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE || ++ type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER || ++ /* these elements are optional */ ++ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT || ++ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT || ++ type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) + addl_desc_ptr += addl_desc_ptr[1] + 2; + + } +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c +index 3bb6646bb406..f9da66fa850b 100644 +--- a/drivers/scsi/storvsc_drv.c ++++ b/drivers/scsi/storvsc_drv.c +@@ -1610,8 +1610,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) + vm_srb->win8_extension.time_out_value = 60; + + vm_srb->win8_extension.srb_flags |= +- (SRB_FLAGS_QUEUE_ACTION_ENABLE | +- SRB_FLAGS_DISABLE_SYNCH_TRANSFER); ++ SRB_FLAGS_DISABLE_SYNCH_TRANSFER; + + /* Build the SRB */ + switch (scmnd->sc_data_direction) { +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c +index f89b24a09b19..231d63caa663 100644 +--- a/drivers/target/target_core_sbc.c ++++ b/drivers/target/target_core_sbc.c +@@ -314,7 +314,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o + return 0; + } + +-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) ++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success, ++ int *post_ret) + { + unsigned char *buf, *addr; + struct scatterlist *sg; +@@ -378,7 +379,8 @@ sbc_execute_rw(struct se_cmd *cmd) + cmd->data_direction); + } + +-static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) ++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, ++ int *post_ret) + { + struct se_device *dev = cmd->se_dev; + +@@ -388,8 +390,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) + * sent to the backend driver. + */ + spin_lock_irq(&cmd->t_state_lock); +- if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) ++ if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { + cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; ++ *post_ret = 1; ++ } + spin_unlock_irq(&cmd->t_state_lock); + + /* +@@ -401,7 +405,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) + return TCM_NO_SENSE; + } + +-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) ++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, ++ int *post_ret) + { + struct se_device *dev = cmd->se_dev; + struct scatterlist *write_sg = NULL, *sg; +@@ -497,11 +502,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes + + if (block_size < PAGE_SIZE) { + sg_set_page(&write_sg[i], m.page, block_size, +- block_size); ++ m.piter.sg->offset + block_size); + } else { + sg_miter_next(&m); + sg_set_page(&write_sg[i], m.page, block_size, +- 0); ++ m.piter.sg->offset); + } + len -= block_size; + i++; +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index 6fc38903046c..7afea9b59e2c 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -1581,7 +1581,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) + void transport_generic_request_failure(struct se_cmd *cmd, + sense_reason_t sense_reason) + { +- int ret = 0; ++ int ret = 0, post_ret = 0; + + pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" + " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), +@@ -1604,7 +1604,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, + */ + if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && + cmd->transport_complete_callback) +- cmd->transport_complete_callback(cmd, false); ++ cmd->transport_complete_callback(cmd, false, &post_ret); + + switch (sense_reason) { + case TCM_NON_EXISTENT_LUN: +@@ -1940,11 +1940,13 @@ static void target_complete_ok_work(struct work_struct *work) + */ + if (cmd->transport_complete_callback) { + sense_reason_t rc; ++ bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); ++ bool zero_dl = !(cmd->data_length); ++ int post_ret = 0; + +- rc = cmd->transport_complete_callback(cmd, true); +- if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { +- if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && +- !cmd->data_length) ++ rc = cmd->transport_complete_callback(cmd, true, &post_ret); ++ if (!rc && !post_ret) { ++ if (caw && zero_dl) + goto queue_rsp; + + return; +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 2aca88715632..584514c7ed1f 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1810,6 +1810,11 @@ static const struct usb_device_id acm_ids[] = { + }, + #endif + ++ /*Samsung phone in firmware update mode */ ++ { USB_DEVICE(0x04e8, 0x685d), ++ .driver_info = IGNORE_DEVICE, ++ }, ++ + /* Exclude Infineon Flash Loader utility */ + { USB_DEVICE(0x058b, 0x0041), + .driver_info = IGNORE_DEVICE, +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 02e6fe228a63..21bf168981f9 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ ++ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ ++ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 81f6a572f016..9bab34cf01d4 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb); + #define TOSHIBA_PRODUCT_G450 0x0d45 + + #define ALINK_VENDOR_ID 0x1e0e ++#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */ + #define ALINK_PRODUCT_PH300 0x9100 + #define ALINK_PRODUCT_3GU 0x9200 + +@@ -615,6 +616,10 @@ static const struct option_blacklist_info zte_1255_blacklist = { + .reserved = BIT(3) | BIT(4), + }; + ++static const struct option_blacklist_info simcom_sim7100e_blacklist = { ++ .reserved = BIT(5) | BIT(6), ++}; ++ + static const struct option_blacklist_info telit_le910_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(1) | BIT(2), +@@ -1130,6 +1135,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ ++ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */ ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ +@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, + { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, + { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), ++ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), + .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist + }, +diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c +index fed0ce198ae3..64eba4f51f71 100644 +--- a/drivers/virtio/virtio.c ++++ b/drivers/virtio/virtio.c +@@ -249,6 +249,7 @@ static int virtio_init(void) + static void __exit virtio_exit(void) + { + bus_unregister(&virtio_bus); ++ ida_destroy(&virtio_index_ida); + } + core_initcall(virtio_init); + module_exit(virtio_exit); +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index f48d5fc352a9..469051b01fbf 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2336,6 +2336,7 @@ int open_ctree(struct super_block *sb, + if (btrfs_check_super_csum(bh->b_data)) { + printk(KERN_ERR "BTRFS: superblock checksum mismatch\n"); + err = -EINVAL; ++ brelse(bh); + goto fail_alloc; + } + +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 08824fe6ef44..fb37441a592f 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -7511,15 +7511,28 @@ int btrfs_readpage(struct file *file, struct page *page) + static int btrfs_writepage(struct page *page, struct writeback_control *wbc) + { + struct extent_io_tree *tree; +- ++ struct inode *inode = page->mapping->host; ++ int ret; + + if (current->flags & PF_MEMALLOC) { + redirty_page_for_writepage(wbc, page); + unlock_page(page); + return 0; + } ++ ++ /* ++ * If we are under memory pressure we will call this directly from the ++ * VM, we need to make sure we have the inode referenced for the ordered ++ * extent. If not just return like we didn't do anything. ++ */ ++ if (!igrab(inode)) { ++ redirty_page_for_writepage(wbc, page); ++ return AOP_WRITEPAGE_ACTIVATE; ++ } + tree = &BTRFS_I(page->mapping->host)->io_tree; +- return extent_write_full_page(tree, page, btrfs_get_extent, wbc); ++ ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc); ++ btrfs_add_delayed_iput(inode); ++ return ret; + } + + static int btrfs_writepages(struct address_space *mapping, +@@ -8612,9 +8625,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, + /* + * 2 items for inode item and ref + * 2 items for dir items ++ * 1 item for updating parent inode item ++ * 1 item for the inline extent item + * 1 item for xattr if selinux is on + */ +- trans = btrfs_start_transaction(root, 5); ++ trans = btrfs_start_transaction(root, 7); + if (IS_ERR(trans)) + return PTR_ERR(trans); + +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index 20d793542096..0fd23ab3b4ad 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -1377,7 +1377,21 @@ static int read_symlink(struct btrfs_root *root, + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; +- BUG_ON(ret); ++ if (ret) { ++ /* ++ * An empty symlink inode. Can happen in rare error paths when ++ * creating a symlink (transaction committed before the inode ++ * eviction handler removed the symlink inode items and a crash ++ * happened in between or the subvol was snapshoted in between). ++ * Print an informative message to dmesg/syslog so that the user ++ * can delete the symlink. ++ */ ++ btrfs_err(root->fs_info, ++ "Found empty symlink inode %llu at root %llu", ++ ino, root->root_key.objectid); ++ ret = -EIO; ++ goto out; ++ } + + ei = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_file_extent_item); +diff --git a/fs/dcache.c b/fs/dcache.c +index 65ccdf0e2854..9b235362efcd 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -439,42 +439,12 @@ void d_drop(struct dentry *dentry) + } + EXPORT_SYMBOL(d_drop); + +-/* +- * Finish off a dentry we've decided to kill. +- * dentry->d_lock must be held, returns with it unlocked. +- * If ref is non-zero, then decrement the refcount too. +- * Returns dentry requiring refcount drop, or NULL if we're done. +- */ +-static struct dentry * +-dentry_kill(struct dentry *dentry, int unlock_on_failure) +- __releases(dentry->d_lock) ++static void __dentry_kill(struct dentry *dentry) + { +- struct inode *inode; + struct dentry *parent = NULL; + bool can_free = true; +- +- if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { +- can_free = dentry->d_flags & DCACHE_MAY_FREE; +- spin_unlock(&dentry->d_lock); +- goto out; +- } +- +- inode = dentry->d_inode; +- if (inode && !spin_trylock(&inode->i_lock)) { +-relock: +- if (unlock_on_failure) { +- spin_unlock(&dentry->d_lock); +- cpu_relax(); +- } +- return dentry; /* try again with same dentry */ +- } + if (!IS_ROOT(dentry)) + parent = dentry->d_parent; +- if (parent && !spin_trylock(&parent->d_lock)) { +- if (inode) +- spin_unlock(&inode->i_lock); +- goto relock; +- } + + /* + * The dentry is now unrecoverably dead to the world. +@@ -518,9 +488,74 @@ relock: + can_free = false; + } + spin_unlock(&dentry->d_lock); +-out: + if (likely(can_free)) + dentry_free(dentry); ++} ++ ++/* ++ * Finish off a dentry we've decided to kill. ++ * dentry->d_lock must be held, returns with it unlocked. ++ * If ref is non-zero, then decrement the refcount too. ++ * Returns dentry requiring refcount drop, or NULL if we're done. ++ */ ++static struct dentry *dentry_kill(struct dentry *dentry) ++ __releases(dentry->d_lock) ++{ ++ struct inode *inode = dentry->d_inode; ++ struct dentry *parent = NULL; ++ ++ if (inode && unlikely(!spin_trylock(&inode->i_lock))) ++ goto failed; ++ ++ if (!IS_ROOT(dentry)) { ++ parent = dentry->d_parent; ++ if (unlikely(!spin_trylock(&parent->d_lock))) { ++ if (inode) ++ spin_unlock(&inode->i_lock); ++ goto failed; ++ } ++ } ++ ++ __dentry_kill(dentry); ++ return parent; ++ ++failed: ++ spin_unlock(&dentry->d_lock); ++ cpu_relax(); ++ return dentry; /* try again with same dentry */ ++} ++ ++static inline struct dentry *lock_parent(struct dentry *dentry) ++{ ++ struct dentry *parent = dentry->d_parent; ++ if (IS_ROOT(dentry)) ++ return NULL; ++ if (unlikely((int)dentry->d_lockref.count < 0)) ++ return NULL; ++ if (likely(spin_trylock(&parent->d_lock))) ++ return parent; ++ rcu_read_lock(); ++ spin_unlock(&dentry->d_lock); ++again: ++ parent = ACCESS_ONCE(dentry->d_parent); ++ spin_lock(&parent->d_lock); ++ /* ++ * We can't blindly lock dentry until we are sure ++ * that we won't violate the locking order. ++ * Any changes of dentry->d_parent must have ++ * been done with parent->d_lock held, so ++ * spin_lock() above is enough of a barrier ++ * for checking if it's still our child. ++ */ ++ if (unlikely(parent != dentry->d_parent)) { ++ spin_unlock(&parent->d_lock); ++ goto again; ++ } ++ rcu_read_unlock(); ++ if (parent != dentry) ++ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); ++ else ++ parent = NULL; + return parent; + } + +@@ -580,7 +615,7 @@ repeat: + return; + + kill_it: +- dentry = dentry_kill(dentry, 1); ++ dentry = dentry_kill(dentry); + if (dentry) + goto repeat; + } +@@ -798,8 +833,11 @@ static void shrink_dentry_list(struct list_head *list) + struct dentry *dentry, *parent; + + while (!list_empty(list)) { ++ struct inode *inode; + dentry = list_entry(list->prev, struct dentry, d_lru); + spin_lock(&dentry->d_lock); ++ parent = lock_parent(dentry); ++ + /* + * The dispose list is isolated and dentries are not accounted + * to the LRU here, so we can simply remove it from the list +@@ -813,26 +851,33 @@ static void shrink_dentry_list(struct list_head *list) + */ + if ((int)dentry->d_lockref.count > 0) { + spin_unlock(&dentry->d_lock); ++ if (parent) ++ spin_unlock(&parent->d_lock); + continue; + } + +- parent = dentry_kill(dentry, 0); +- /* +- * If dentry_kill returns NULL, we have nothing more to do. +- */ +- if (!parent) ++ ++ if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { ++ bool can_free = dentry->d_flags & DCACHE_MAY_FREE; ++ spin_unlock(&dentry->d_lock); ++ if (parent) ++ spin_unlock(&parent->d_lock); ++ if (can_free) ++ dentry_free(dentry); + continue; ++ } + +- if (unlikely(parent == dentry)) { +- /* +- * trylocks have failed and d_lock has been held the +- * whole time, so it could not have been added to any +- * other lists. Just add it back to the shrink list. +- */ ++ inode = dentry->d_inode; ++ if (inode && unlikely(!spin_trylock(&inode->i_lock))) { + d_shrink_add(dentry, list); + spin_unlock(&dentry->d_lock); ++ if (parent) ++ spin_unlock(&parent->d_lock); + continue; + } ++ ++ __dentry_kill(dentry); ++ + /* + * We need to prune ancestors too. This is necessary to prevent + * quadratic behavior of shrink_dcache_parent(), but is also +@@ -840,8 +885,26 @@ static void shrink_dentry_list(struct list_head *list) + * fragmentation. + */ + dentry = parent; +- while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) +- dentry = dentry_kill(dentry, 1); ++ while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) { ++ parent = lock_parent(dentry); ++ if (dentry->d_lockref.count != 1) { ++ dentry->d_lockref.count--; ++ spin_unlock(&dentry->d_lock); ++ if (parent) ++ spin_unlock(&parent->d_lock); ++ break; ++ } ++ inode = dentry->d_inode; /* can't be NULL */ ++ if (unlikely(!spin_trylock(&inode->i_lock))) { ++ spin_unlock(&dentry->d_lock); ++ if (parent) ++ spin_unlock(&parent->d_lock); ++ cpu_relax(); ++ continue; ++ } ++ __dentry_kill(dentry); ++ dentry = parent; ++ } + } + } + +diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c +index fe649d325b1f..ce653dfb0ae3 100644 +--- a/fs/hostfs/hostfs_kern.c ++++ b/fs/hostfs/hostfs_kern.c +@@ -720,15 +720,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, + + init_special_inode(inode, mode, dev); + err = do_mknod(name, mode, MAJOR(dev), MINOR(dev)); +- if (!err) ++ if (err) + goto out_free; + + err = read_name(inode, name); + __putname(name); + if (err) + goto out_put; +- if (err) +- goto out_put; + + d_instantiate(dentry, inode); + return 0; +diff --git a/fs/lockd/host.c b/fs/lockd/host.c +index 969d589c848d..b5f3c3ab0d5f 100644 +--- a/fs/lockd/host.c ++++ b/fs/lockd/host.c +@@ -116,7 +116,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni, + atomic_inc(&nsm->sm_count); + else { + host = NULL; +- nsm = nsm_get_handle(ni->sap, ni->salen, ++ nsm = nsm_get_handle(ni->net, ni->sap, ni->salen, + ni->hostname, ni->hostname_len); + if (unlikely(nsm == NULL)) { + dprintk("lockd: %s failed; no nsm handle\n", +@@ -534,17 +534,18 @@ static struct nlm_host *next_host_state(struct hlist_head *cache, + + /** + * nlm_host_rebooted - Release all resources held by rebooted host ++ * @net: network namespace + * @info: pointer to decoded results of NLM_SM_NOTIFY call + * + * We were notified that the specified host has rebooted. Release + * all resources held by that peer. + */ +-void nlm_host_rebooted(const struct nlm_reboot *info) ++void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info) + { + struct nsm_handle *nsm; + struct nlm_host *host; + +- nsm = nsm_reboot_lookup(info); ++ nsm = nsm_reboot_lookup(net, info); + if (unlikely(nsm == NULL)) + return; + +diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c +index 6ae664b489af..13fac49aff7f 100644 +--- a/fs/lockd/mon.c ++++ b/fs/lockd/mon.c +@@ -51,7 +51,6 @@ struct nsm_res { + }; + + static const struct rpc_program nsm_program; +-static LIST_HEAD(nsm_handles); + static DEFINE_SPINLOCK(nsm_lock); + + /* +@@ -259,33 +258,35 @@ void nsm_unmonitor(const struct nlm_host *host) + } + } + +-static struct nsm_handle *nsm_lookup_hostname(const char *hostname, +- const size_t len) ++static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles, ++ const char *hostname, const size_t len) + { + struct nsm_handle *nsm; + +- list_for_each_entry(nsm, &nsm_handles, sm_link) ++ list_for_each_entry(nsm, nsm_handles, sm_link) + if (strlen(nsm->sm_name) == len && + memcmp(nsm->sm_name, hostname, len) == 0) + return nsm; + return NULL; + } + +-static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap) ++static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles, ++ const struct sockaddr *sap) + { + struct nsm_handle *nsm; + +- list_for_each_entry(nsm, &nsm_handles, sm_link) ++ list_for_each_entry(nsm, nsm_handles, sm_link) + if (rpc_cmp_addr(nsm_addr(nsm), sap)) + return nsm; + return NULL; + } + +-static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv) ++static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles, ++ const struct nsm_private *priv) + { + struct nsm_handle *nsm; + +- list_for_each_entry(nsm, &nsm_handles, sm_link) ++ list_for_each_entry(nsm, nsm_handles, sm_link) + if (memcmp(nsm->sm_priv.data, priv->data, + sizeof(priv->data)) == 0) + return nsm; +@@ -350,6 +351,7 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap, + + /** + * nsm_get_handle - Find or create a cached nsm_handle ++ * @net: network namespace + * @sap: pointer to socket address of handle to find + * @salen: length of socket address + * @hostname: pointer to C string containing hostname to find +@@ -362,11 +364,13 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap, + * @hostname cannot be found in the handle cache. Returns NULL if + * an error occurs. + */ +-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, ++struct nsm_handle *nsm_get_handle(const struct net *net, ++ const struct sockaddr *sap, + const size_t salen, const char *hostname, + const size_t hostname_len) + { + struct nsm_handle *cached, *new = NULL; ++ struct lockd_net *ln = net_generic(net, lockd_net_id); + + if (hostname && memchr(hostname, '/', hostname_len) != NULL) { + if (printk_ratelimit()) { +@@ -381,9 +385,10 @@ retry: + spin_lock(&nsm_lock); + + if (nsm_use_hostnames && hostname != NULL) +- cached = nsm_lookup_hostname(hostname, hostname_len); ++ cached = nsm_lookup_hostname(&ln->nsm_handles, ++ hostname, hostname_len); + else +- cached = nsm_lookup_addr(sap); ++ cached = nsm_lookup_addr(&ln->nsm_handles, sap); + + if (cached != NULL) { + atomic_inc(&cached->sm_count); +@@ -397,7 +402,7 @@ retry: + } + + if (new != NULL) { +- list_add(&new->sm_link, &nsm_handles); ++ list_add(&new->sm_link, &ln->nsm_handles); + spin_unlock(&nsm_lock); + dprintk("lockd: created nsm_handle for %s (%s)\n", + new->sm_name, new->sm_addrbuf); +@@ -414,19 +419,22 @@ retry: + + /** + * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle ++ * @net: network namespace + * @info: pointer to NLMPROC_SM_NOTIFY arguments + * + * Returns a matching nsm_handle if found in the nsm cache. The returned + * nsm_handle's reference count is bumped. Otherwise returns NULL if some + * error occurred. + */ +-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info) ++struct nsm_handle *nsm_reboot_lookup(const struct net *net, ++ const struct nlm_reboot *info) + { + struct nsm_handle *cached; ++ struct lockd_net *ln = net_generic(net, lockd_net_id); + + spin_lock(&nsm_lock); + +- cached = nsm_lookup_priv(&info->priv); ++ cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv); + if (unlikely(cached == NULL)) { + spin_unlock(&nsm_lock); + dprintk("lockd: never saw rebooted peer '%.*s' before\n", +diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h +index 5010b55628b4..414da99744e9 100644 +--- a/fs/lockd/netns.h ++++ b/fs/lockd/netns.h +@@ -16,6 +16,7 @@ struct lockd_net { + spinlock_t nsm_clnt_lock; + unsigned int nsm_users; + struct rpc_clnt *nsm_clnt; ++ struct list_head nsm_handles; + }; + + extern int lockd_net_id; +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c +index 59a53f664005..bb1ad4df024d 100644 +--- a/fs/lockd/svc.c ++++ b/fs/lockd/svc.c +@@ -583,6 +583,7 @@ static int lockd_init_net(struct net *net) + INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender); + INIT_LIST_HEAD(&ln->grace_list); + spin_lock_init(&ln->nsm_clnt_lock); ++ INIT_LIST_HEAD(&ln->nsm_handles); + return 0; + } + +diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c +index b147d1ae71fd..09c576f26c7b 100644 +--- a/fs/lockd/svc4proc.c ++++ b/fs/lockd/svc4proc.c +@@ -421,7 +421,7 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, + return rpc_system_err; + } + +- nlm_host_rebooted(argp); ++ nlm_host_rebooted(SVC_NET(rqstp), argp); + return rpc_success; + } + +diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c +index 21171f0c6477..fb26b9f522e7 100644 +--- a/fs/lockd/svcproc.c ++++ b/fs/lockd/svcproc.c +@@ -464,7 +464,7 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, + return rpc_system_err; + } + +- nlm_host_rebooted(argp); ++ nlm_host_rebooted(SVC_NET(rqstp), argp); + return rpc_success; + } + +diff --git a/fs/namei.c b/fs/namei.c +index f4f6460b6958..c24781f07cf3 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -3085,6 +3085,10 @@ opened: + goto exit_fput; + } + out: ++ if (unlikely(error > 0)) { ++ WARN_ON(1); ++ error = -EINVAL; ++ } + if (got_write) + mnt_drop_write(nd->path.mnt); + path_put(&save_parent); +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 45a7dd36b4a6..3b5e86fd2800 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -2187,9 +2187,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, + dentry = d_add_unique(dentry, igrab(state->inode)); + if (dentry == NULL) { + dentry = opendata->dentry; +- } else if (dentry != ctx->dentry) { ++ } else { + dput(ctx->dentry); +- ctx->dentry = dget(dentry); ++ ctx->dentry = dentry; + } + nfs_set_verifier(dentry, + nfs_save_change_attribute(opendata->dir->d_inode)); +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index c402b672a474..1c02b300dc5d 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -1482,7 +1482,7 @@ restart: + spin_unlock(&state->state_lock); + } + nfs4_put_open_state(state); +- clear_bit(NFS4CLNT_RECLAIM_NOGRACE, ++ clear_bit(NFS_STATE_RECLAIM_NOGRACE, + &state->flags); + spin_lock(&sp->so_lock); + goto restart; +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index eaa7374305a3..6b1d8498d208 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -165,7 +165,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) + if (!priv->task) + return ERR_PTR(-ESRCH); + +- mm = mm_access(priv->task, PTRACE_MODE_READ); ++ mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS); + if (!mm || IS_ERR(mm)) + return mm; + down_read(&mm->mmap_sem); +@@ -1182,7 +1182,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, + if (!pm.buffer) + goto out_task; + +- mm = mm_access(task, PTRACE_MODE_READ); ++ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); + ret = PTR_ERR(mm); + if (!mm || IS_ERR(mm)) + goto out_free; +diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c +index 678455d2d683..f9db7e9f6966 100644 +--- a/fs/proc/task_nommu.c ++++ b/fs/proc/task_nommu.c +@@ -216,7 +216,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) + if (!priv->task) + return ERR_PTR(-ESRCH); + +- mm = mm_access(priv->task, PTRACE_MODE_READ); ++ mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS); + if (!mm || IS_ERR(mm)) { + put_task_struct(priv->task); + priv->task = NULL; +diff --git a/fs/splice.c b/fs/splice.c +index f345d53f94da..e64f59960ec5 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -949,6 +949,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, + + splice_from_pipe_begin(sd); + do { ++ cond_resched(); + ret = splice_from_pipe_next(pipe, sd); + if (ret > 0) + ret = splice_from_pipe_feed(pipe, sd, actor); +@@ -1175,7 +1176,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, + long ret, bytes; + umode_t i_mode; + size_t len; +- int i, flags; ++ int i, flags, more; + + /* + * We require the input being a regular file, as we don't want to +@@ -1218,6 +1219,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, + * Don't block on output, we have to drain the direct pipe. + */ + sd->flags &= ~SPLICE_F_NONBLOCK; ++ more = sd->flags & SPLICE_F_MORE; + + while (len) { + size_t read_len; +@@ -1231,6 +1233,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, + sd->total_len = read_len; + + /* ++ * If more data is pending, set SPLICE_F_MORE ++ * If this is the last data and SPLICE_F_MORE was not set ++ * initially, clears it. ++ */ ++ if (read_len < len) ++ sd->flags |= SPLICE_F_MORE; ++ else if (!more) ++ sd->flags &= ~SPLICE_F_MORE; ++ /* + * NOTE: nonblocking mode only applies to the input. We + * must not do the output in nonblocking mode as then we + * could get stuck data in the internal pipe: +diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h +index 2c9e62c2bfd0..f55fb04501ec 100644 +--- a/include/asm-generic/cputime_nsecs.h ++++ b/include/asm-generic/cputime_nsecs.h +@@ -70,7 +70,7 @@ typedef u64 __nocast cputime64_t; + */ + static inline cputime_t timespec_to_cputime(const struct timespec *val) + { +- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; ++ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec; + return (__force cputime_t) ret; + } + static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) +@@ -86,7 +86,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) + */ + static inline cputime_t timeval_to_cputime(const struct timeval *val) + { +- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; ++ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + ++ val->tv_usec * NSEC_PER_USEC; + return (__force cputime_t) ret; + } + static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) +diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h +index 9a33c5f7e126..f6c229e2bffa 100644 +--- a/include/linux/enclosure.h ++++ b/include/linux/enclosure.h +@@ -29,7 +29,11 @@ + /* A few generic types ... taken from ses-2 */ + enum enclosure_component_type { + ENCLOSURE_COMPONENT_DEVICE = 0x01, ++ ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07, ++ ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14, ++ ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15, + ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, ++ ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18, + }; + + /* ses-2 common element status */ +diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h +index dcaad79f54ed..0adf073f13b3 100644 +--- a/include/linux/lockd/lockd.h ++++ b/include/linux/lockd/lockd.h +@@ -236,7 +236,8 @@ void nlm_rebind_host(struct nlm_host *); + struct nlm_host * nlm_get_host(struct nlm_host *); + void nlm_shutdown_hosts(void); + void nlm_shutdown_hosts_net(struct net *net); +-void nlm_host_rebooted(const struct nlm_reboot *); ++void nlm_host_rebooted(const struct net *net, ++ const struct nlm_reboot *); + + /* + * Host monitoring +@@ -244,11 +245,13 @@ void nlm_host_rebooted(const struct nlm_reboot *); + int nsm_monitor(const struct nlm_host *host); + void nsm_unmonitor(const struct nlm_host *host); + +-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, ++struct nsm_handle *nsm_get_handle(const struct net *net, ++ const struct sockaddr *sap, + const size_t salen, + const char *hostname, + const size_t hostname_len); +-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info); ++struct nsm_handle *nsm_reboot_lookup(const struct net *net, ++ const struct nlm_reboot *info); + void nsm_release(struct nsm_handle *nsm); + + /* +diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h +index 0ae5807480f4..1e122cc9ea3e 100644 +--- a/include/linux/nfs_fs.h ++++ b/include/linux/nfs_fs.h +@@ -580,9 +580,7 @@ static inline int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, + + static inline loff_t nfs_size_to_loff_t(__u64 size) + { +- if (size > (__u64) OFFSET_MAX - 1) +- return OFFSET_MAX - 1; +- return (loff_t) size; ++ return min_t(u64, size, OFFSET_MAX); + } + + static inline ino_t +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h +index 7159a0a933df..97c8689c7e51 100644 +--- a/include/linux/tracepoint.h ++++ b/include/linux/tracepoint.h +@@ -14,8 +14,11 @@ + * See the file COPYING for more details. + */ + ++#include + #include + #include ++#include ++#include + #include + #include + +@@ -126,6 +129,9 @@ static inline void tracepoint_synchronize_unregister(void) + void *it_func; \ + void *__data; \ + \ ++ if (!cpu_online(raw_smp_processor_id())) \ ++ return; \ ++ \ + if (!(cond)) \ + return; \ + prercu; \ +diff --git a/include/net/af_unix.h b/include/net/af_unix.h +index e830c3dff61a..7bb69c9c3c43 100644 +--- a/include/net/af_unix.h ++++ b/include/net/af_unix.h +@@ -6,8 +6,8 @@ + #include + #include + +-void unix_inflight(struct file *fp); +-void unix_notinflight(struct file *fp); ++void unix_inflight(struct user_struct *user, struct file *fp); ++void unix_notinflight(struct user_struct *user, struct file *fp); + void unix_gc(void); + void wait_for_unix_gc(void); + struct sock *unix_get_socket(struct file *filp); +diff --git a/include/net/scm.h b/include/net/scm.h +index 262532d111f5..59fa93c01d2a 100644 +--- a/include/net/scm.h ++++ b/include/net/scm.h +@@ -21,6 +21,7 @@ struct scm_creds { + struct scm_fp_list { + short count; + short max; ++ struct user_struct *user; + struct file *fp[SCM_MAX_FD]; + }; + +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index e4b9e011d2a1..42606764d830 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -513,7 +513,7 @@ struct se_cmd { + sense_reason_t (*execute_cmd)(struct se_cmd *); + sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *, + u32, enum dma_data_direction); +- sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); ++ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); + + unsigned char *t_task_cdb; + unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index ebb8a9e937fa..2c2e5e70e4f3 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -1230,6 +1230,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) + if (!desc) + return NULL; + ++ chip_bus_lock(desc); + raw_spin_lock_irqsave(&desc->lock, flags); + + /* +@@ -1243,7 +1244,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) + if (!action) { + WARN(1, "Trying to free already-free IRQ %d\n", irq); + raw_spin_unlock_irqrestore(&desc->lock, flags); +- ++ chip_bus_sync_unlock(desc); + return NULL; + } + +@@ -1266,6 +1267,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) + #endif + + raw_spin_unlock_irqrestore(&desc->lock, flags); ++ chip_bus_sync_unlock(desc); + + unregister_handler_proc(irq, action); + +@@ -1339,9 +1341,7 @@ void free_irq(unsigned int irq, void *dev_id) + desc->affinity_notify = NULL; + #endif + +- chip_bus_lock(desc); + kfree(__free_irq(irq, dev_id)); +- chip_bus_sync_unlock(desc); + } + EXPORT_SYMBOL(free_irq); + +diff --git a/kernel/resource.c b/kernel/resource.c +index 3f285dce9347..449282e48bb1 100644 +--- a/kernel/resource.c ++++ b/kernel/resource.c +@@ -961,9 +961,10 @@ struct resource * __request_region(struct resource *parent, + if (!conflict) + break; + if (conflict != parent) { +- parent = conflict; +- if (!(conflict->flags & IORESOURCE_BUSY)) ++ if (!(conflict->flags & IORESOURCE_BUSY)) { ++ parent = conflict; + continue; ++ } + } + if (conflict->flags & flags & IORESOURCE_MUXED) { + add_wait_queue(&muxed_resource_wait, &wait); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index bbe957762ace..46afc8cd69dd 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -937,6 +937,13 @@ inline int task_curr(const struct task_struct *p) + return cpu_curr(task_cpu(p)) == p; + } + ++/* ++ * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, ++ * use the balance_callback list if you want balancing. ++ * ++ * this means any call to check_class_changed() must be followed by a call to ++ * balance_callback(). ++ */ + static inline void check_class_changed(struct rq *rq, struct task_struct *p, + const struct sched_class *prev_class, + int oldprio) +@@ -1423,8 +1430,12 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) + + p->state = TASK_RUNNING; + #ifdef CONFIG_SMP +- if (p->sched_class->task_woken) ++ if (p->sched_class->task_woken) { ++ /* ++ * XXX can drop rq->lock; most likely ok. ++ */ + p->sched_class->task_woken(rq, p); ++ } + + if (rq->idle_stamp) { + u64 delta = rq_clock(rq) - rq->idle_stamp; +@@ -1685,7 +1696,6 @@ out: + */ + int wake_up_process(struct task_struct *p) + { +- WARN_ON(task_is_stopped_or_traced(p)); + return try_to_wake_up(p, TASK_NORMAL, 0); + } + EXPORT_SYMBOL(wake_up_process); +@@ -2179,18 +2189,30 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev) + } + + /* rq->lock is NOT held, but preemption is disabled */ +-static inline void post_schedule(struct rq *rq) ++static void __balance_callback(struct rq *rq) + { +- if (rq->post_schedule) { +- unsigned long flags; ++ struct callback_head *head, *next; ++ void (*func)(struct rq *rq); ++ unsigned long flags; + +- raw_spin_lock_irqsave(&rq->lock, flags); +- if (rq->curr->sched_class->post_schedule) +- rq->curr->sched_class->post_schedule(rq); +- raw_spin_unlock_irqrestore(&rq->lock, flags); ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ head = rq->balance_callback; ++ rq->balance_callback = NULL; ++ while (head) { ++ func = (void (*)(struct rq *))head->func; ++ next = head->next; ++ head->next = NULL; ++ head = next; + +- rq->post_schedule = 0; ++ func(rq); + } ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++} ++ ++static inline void balance_callback(struct rq *rq) ++{ ++ if (unlikely(rq->balance_callback)) ++ __balance_callback(rq); + } + + #else +@@ -2199,7 +2221,7 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *p) + { + } + +-static inline void post_schedule(struct rq *rq) ++static inline void balance_callback(struct rq *rq) + { + } + +@@ -2220,7 +2242,7 @@ asmlinkage void schedule_tail(struct task_struct *prev) + * FIXME: do we need to worry about rq being invalidated by the + * task_switch? + */ +- post_schedule(rq); ++ balance_callback(rq); + + #ifdef __ARCH_WANT_UNLOCKED_CTXSW + /* In this case, finish_task_switch does not reenable preemption */ +@@ -2732,7 +2754,7 @@ need_resched: + } else + raw_spin_unlock_irq(&rq->lock); + +- post_schedule(rq); ++ balance_callback(rq); + + sched_preempt_enable_no_resched(); + if (need_resched()) +@@ -2994,7 +3016,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio) + + check_class_changed(rq, p, prev_class, oldprio); + out_unlock: ++ preempt_disable(); /* avoid rq from going away on us */ + __task_rq_unlock(rq); ++ ++ balance_callback(rq); ++ preempt_enable(); + } + #endif + +@@ -3500,10 +3526,17 @@ change: + enqueue_task(rq, p, 0); + + check_class_changed(rq, p, prev_class, oldprio); ++ preempt_disable(); /* avoid rq from going away on us */ + task_rq_unlock(rq, p, &flags); + + rt_mutex_adjust_pi(p); + ++ /* ++ * Run balance callbacks after we've adjusted the PI chain. ++ */ ++ balance_callback(rq); ++ preempt_enable(); ++ + return 0; + } + +@@ -5386,13 +5419,13 @@ static int init_rootdomain(struct root_domain *rd) + { + memset(rd, 0, sizeof(*rd)); + +- if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) ++ if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) + goto out; +- if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) ++ if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) + goto free_span; +- if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) ++ if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) + goto free_online; +- if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) ++ if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) + goto free_dlo_mask; + + init_dl_bw(&rd->dl_bw); +@@ -6902,7 +6935,7 @@ void __init sched_init(void) + rq->sd = NULL; + rq->rd = NULL; + rq->cpu_power = SCHED_POWER_SCALE; +- rq->post_schedule = 0; ++ rq->balance_callback = NULL; + rq->active_balance = 0; + rq->next_balance = jiffies; + rq->push_cpu = 0; +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index 8d3c5ddfdfdd..6ab59bb2947b 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -210,6 +210,25 @@ static inline int has_pushable_dl_tasks(struct rq *rq) + + static int push_dl_task(struct rq *rq); + ++static DEFINE_PER_CPU(struct callback_head, dl_push_head); ++static DEFINE_PER_CPU(struct callback_head, dl_pull_head); ++ ++static void push_dl_tasks(struct rq *); ++static void pull_dl_task(struct rq *); ++ ++static inline void queue_push_tasks(struct rq *rq) ++{ ++ if (!has_pushable_dl_tasks(rq)) ++ return; ++ ++ queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); ++} ++ ++static inline void queue_pull_task(struct rq *rq) ++{ ++ queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); ++} ++ + #else + + static inline +@@ -232,6 +251,13 @@ void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) + { + } + ++static inline void queue_push_tasks(struct rq *rq) ++{ ++} ++ ++static inline void queue_pull_task(struct rq *rq) ++{ ++} + #endif /* CONFIG_SMP */ + + static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); +@@ -1005,7 +1031,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq) + #endif + + #ifdef CONFIG_SMP +- rq->post_schedule = has_pushable_dl_tasks(rq); ++ queue_push_tasks(rq); + #endif /* CONFIG_SMP */ + + return p; +@@ -1336,15 +1362,16 @@ static void push_dl_tasks(struct rq *rq) + ; + } + +-static int pull_dl_task(struct rq *this_rq) ++static void pull_dl_task(struct rq *this_rq) + { +- int this_cpu = this_rq->cpu, ret = 0, cpu; ++ int this_cpu = this_rq->cpu, cpu; + struct task_struct *p; ++ bool resched = false; + struct rq *src_rq; + u64 dmin = LONG_MAX; + + if (likely(!dl_overloaded(this_rq))) +- return 0; ++ return; + + /* + * Match the barrier from dl_set_overloaded; this guarantees that if we +@@ -1399,7 +1426,7 @@ static int pull_dl_task(struct rq *this_rq) + src_rq->curr->dl.deadline)) + goto skip; + +- ret = 1; ++ resched = true; + + deactivate_task(src_rq, p, 0); + set_task_cpu(p, this_cpu); +@@ -1412,7 +1439,8 @@ skip: + double_unlock_balance(this_rq, src_rq); + } + +- return ret; ++ if (resched) ++ resched_task(this_rq->curr); + } + + static void pre_schedule_dl(struct rq *rq, struct task_struct *prev) +@@ -1422,11 +1450,6 @@ static void pre_schedule_dl(struct rq *rq, struct task_struct *prev) + pull_dl_task(rq); + } + +-static void post_schedule_dl(struct rq *rq) +-{ +- push_dl_tasks(rq); +-} +- + /* + * Since the task is not running and a reschedule is not going to happen + * anytime soon on its runqueue, we try pushing it away now. +@@ -1529,7 +1552,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) + * from an overloaded cpu, if any. + */ + if (!rq->dl.dl_nr_running) +- pull_dl_task(rq); ++ queue_pull_task(rq); + #endif + } + +@@ -1539,8 +1562,6 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) + */ + static void switched_to_dl(struct rq *rq, struct task_struct *p) + { +- int check_resched = 1; +- + /* + * If p is throttled, don't consider the possibility + * of preempting rq->curr, the check will be done right +@@ -1551,12 +1572,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) + + if (p->on_rq || rq->curr != p) { + #ifdef CONFIG_SMP +- if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) +- /* Only reschedule if pushing failed */ +- check_resched = 0; +-#endif /* CONFIG_SMP */ +- if (check_resched && task_has_dl_policy(rq->curr)) ++ if (rq->dl.overloaded) ++ queue_push_tasks(rq); ++#else ++ if (task_has_dl_policy(rq->curr)) + check_preempt_curr_dl(rq, p, 0); ++#endif /* CONFIG_SMP */ + } + } + +@@ -1576,15 +1597,14 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p, + * or lowering its prio, so... + */ + if (!rq->dl.overloaded) +- pull_dl_task(rq); ++ queue_pull_task(rq); + + /* + * If we now have a earlier deadline task than p, + * then reschedule, provided p is still on this + * runqueue. + */ +- if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) && +- rq->curr == p) ++ if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) + resched_task(p); + #else + /* +@@ -1615,7 +1635,6 @@ const struct sched_class dl_sched_class = { + .rq_online = rq_online_dl, + .rq_offline = rq_offline_dl, + .pre_schedule = pre_schedule_dl, +- .post_schedule = post_schedule_dl, + .task_woken = task_woken_dl, + #endif + +diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c +index 516c3d9ceea1..d08678d38d12 100644 +--- a/kernel/sched/idle_task.c ++++ b/kernel/sched/idle_task.c +@@ -19,11 +19,6 @@ static void pre_schedule_idle(struct rq *rq, struct task_struct *prev) + idle_exit_fair(rq); + rq_last_tick_reset(rq); + } +- +-static void post_schedule_idle(struct rq *rq) +-{ +- idle_enter_fair(rq); +-} + #endif /* CONFIG_SMP */ + /* + * Idle tasks are unconditionally rescheduled: +@@ -37,8 +32,7 @@ static struct task_struct *pick_next_task_idle(struct rq *rq) + { + schedstat_inc(rq, sched_goidle); + #ifdef CONFIG_SMP +- /* Trigger the post schedule to do an idle_enter for CFS */ +- rq->post_schedule = 1; ++ idle_enter_fair(rq); + #endif + return rq->idle; + } +@@ -102,7 +96,6 @@ const struct sched_class idle_sched_class = { + #ifdef CONFIG_SMP + .select_task_rq = select_task_rq_idle, + .pre_schedule = pre_schedule_idle, +- .post_schedule = post_schedule_idle, + #endif + + .set_curr_task = set_curr_task_idle, +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index 27b8e836307f..0fb72ae876e7 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -315,6 +315,25 @@ static inline int has_pushable_tasks(struct rq *rq) + return !plist_head_empty(&rq->rt.pushable_tasks); + } + ++static DEFINE_PER_CPU(struct callback_head, rt_push_head); ++static DEFINE_PER_CPU(struct callback_head, rt_pull_head); ++ ++static void push_rt_tasks(struct rq *); ++static void pull_rt_task(struct rq *); ++ ++static inline void queue_push_tasks(struct rq *rq) ++{ ++ if (!has_pushable_tasks(rq)) ++ return; ++ ++ queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); ++} ++ ++static inline void queue_pull_task(struct rq *rq) ++{ ++ queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); ++} ++ + static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) + { + plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); +@@ -359,6 +378,9 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) + { + } + ++static inline void queue_push_tasks(struct rq *rq) ++{ ++} + #endif /* CONFIG_SMP */ + + static inline int on_rt_rq(struct sched_rt_entity *rt_se) +@@ -1349,11 +1371,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) + dequeue_pushable_task(rq, p); + + #ifdef CONFIG_SMP +- /* +- * We detect this state here so that we can avoid taking the RQ +- * lock again later if there is no need to push +- */ +- rq->post_schedule = has_pushable_tasks(rq); ++ queue_push_tasks(rq); + #endif + + return p; +@@ -1641,14 +1659,15 @@ static void push_rt_tasks(struct rq *rq) + ; + } + +-static int pull_rt_task(struct rq *this_rq) ++static void pull_rt_task(struct rq *this_rq) + { +- int this_cpu = this_rq->cpu, ret = 0, cpu; ++ int this_cpu = this_rq->cpu, cpu; ++ bool resched = false; + struct task_struct *p; + struct rq *src_rq; + + if (likely(!rt_overloaded(this_rq))) +- return 0; ++ return; + + /* + * Match the barrier from rt_set_overloaded; this guarantees that if we +@@ -1705,7 +1724,7 @@ static int pull_rt_task(struct rq *this_rq) + if (p->prio < src_rq->curr->prio) + goto skip; + +- ret = 1; ++ resched = true; + + deactivate_task(src_rq, p, 0); + set_task_cpu(p, this_cpu); +@@ -1721,7 +1740,8 @@ skip: + double_unlock_balance(this_rq, src_rq); + } + +- return ret; ++ if (resched) ++ resched_task(this_rq->curr); + } + + static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) +@@ -1731,11 +1751,6 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) + pull_rt_task(rq); + } + +-static void post_schedule_rt(struct rq *rq) +-{ +- push_rt_tasks(rq); +-} +- + /* + * If we are not running and we are not going to reschedule soon, we should + * try to push tasks away now +@@ -1829,8 +1844,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) + if (!p->on_rq || rq->rt.rt_nr_running) + return; + +- if (pull_rt_task(rq)) +- resched_task(rq->curr); ++ queue_pull_task(rq); + } + + void init_sched_rt_class(void) +@@ -1851,8 +1865,6 @@ void init_sched_rt_class(void) + */ + static void switched_to_rt(struct rq *rq, struct task_struct *p) + { +- int check_resched = 1; +- + /* + * If we are already running, then there's nothing + * that needs to be done. But if we are not running +@@ -1862,13 +1874,12 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) + */ + if (p->on_rq && rq->curr != p) { + #ifdef CONFIG_SMP +- if (rq->rt.overloaded && push_rt_task(rq) && +- /* Don't resched if we changed runqueues */ +- rq != task_rq(p)) +- check_resched = 0; +-#endif /* CONFIG_SMP */ +- if (check_resched && p->prio < rq->curr->prio) ++ if (rq->rt.overloaded) ++ queue_push_tasks(rq); ++#else ++ if (p->prio < rq->curr->prio) + resched_task(rq->curr); ++#endif /* CONFIG_SMP */ + } + } + +@@ -1889,14 +1900,13 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) + * may need to pull tasks to this runqueue. + */ + if (oldprio < p->prio) +- pull_rt_task(rq); ++ queue_pull_task(rq); ++ + /* + * If there's a higher priority task waiting to run +- * then reschedule. Note, the above pull_rt_task +- * can release the rq lock and p could migrate. +- * Only reschedule if p is still on the same runqueue. ++ * then reschedule. + */ +- if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) ++ if (p->prio > rq->rt.highest_prio.curr) + resched_task(p); + #else + /* For UP simply resched on drop of prio */ +@@ -2008,7 +2018,6 @@ const struct sched_class rt_sched_class = { + .rq_online = rq_online_rt, + .rq_offline = rq_offline_rt, + .pre_schedule = pre_schedule_rt, +- .post_schedule = post_schedule_rt, + .task_woken = task_woken_rt, + .switched_from = switched_from_rt, + #endif +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 835b6efa8bd6..675e147a86f2 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -587,9 +587,10 @@ struct rq { + + unsigned long cpu_power; + ++ struct callback_head *balance_callback; ++ + unsigned char idle_balance; + /* For active balancing */ +- int post_schedule; + int active_balance; + int push_cpu; + struct cpu_stop_work active_balance_work; +@@ -690,6 +691,21 @@ extern int migrate_swap(struct task_struct *, struct task_struct *); + + #ifdef CONFIG_SMP + ++static inline void ++queue_balance_callback(struct rq *rq, ++ struct callback_head *head, ++ void (*func)(struct rq *rq)) ++{ ++ lockdep_assert_held(&rq->lock); ++ ++ if (unlikely(head->next)) ++ return; ++ ++ head->func = (void (*)(struct callback_head *))func; ++ head->next = rq->balance_callback; ++ rq->balance_callback = head; ++} ++ + #define rcu_dereference_check_sched_domain(p) \ + rcu_dereference_check((p), \ + lockdep_is_held(&sched_domains_mutex)) +@@ -1131,7 +1147,6 @@ struct sched_class { + void (*migrate_task_rq)(struct task_struct *p, int next_cpu); + + void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); +- void (*post_schedule) (struct rq *this_rq); + void (*task_waking) (struct task_struct *task); + void (*task_woken) (struct rq *this_rq, struct task_struct *task); + +diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c +index ce033c7aa2e8..9cff0ab82b63 100644 +--- a/kernel/time/posix-clock.c ++++ b/kernel/time/posix-clock.c +@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf, + static unsigned int posix_clock_poll(struct file *fp, poll_table *wait) + { + struct posix_clock *clk = get_posix_clock(fp); +- int result = 0; ++ unsigned int result = 0; + + if (!clk) +- return -ENODEV; ++ return POLLERR; + + if (clk->ops.poll) + result = clk->ops.poll(clk, fp, wait); +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index da41de9dc319..c798ed2fc281 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -1949,12 +1949,6 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) + goto again; + } + +-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) +-{ +- cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; +- cpu_buffer->reader_page->read = 0; +-} +- + static void rb_inc_iter(struct ring_buffer_iter *iter) + { + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; +@@ -3592,7 +3586,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) + + /* Finally update the reader page to the new head */ + cpu_buffer->reader_page = reader; +- rb_reset_reader_page(cpu_buffer); ++ cpu_buffer->reader_page->read = 0; + + if (overwrite != cpu_buffer->last_overrun) { + cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; +@@ -3602,6 +3596,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) + goto again; + + out: ++ /* Update the read_stamp on the first event */ ++ if (reader && reader->read == 0) ++ cpu_buffer->read_stamp = reader->page->time_stamp; ++ + arch_spin_unlock(&cpu_buffer->lock); + local_irq_restore(flags); + +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index c6646a58d23e..bb1ac9cbe30a 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -606,7 +606,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) + * The ftrace subsystem is for showing formats only. + * They can not be enabled or disabled via the event files. + */ +- if (call->class && call->class->reg) ++ if (call->class && call->class->reg && ++ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) + return file; + } + +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 646a8b81bee1..423c9e37a9e7 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -1475,13 +1475,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, + timer_stats_timer_set_start_info(&dwork->timer); + + dwork->wq = wq; +- /* timer isn't guaranteed to run in this cpu, record earlier */ +- if (cpu == WORK_CPU_UNBOUND) +- cpu = raw_smp_processor_id(); + dwork->cpu = cpu; + timer->expires = jiffies + delay; + +- add_timer_on(timer, cpu); ++ if (unlikely(cpu != WORK_CPU_UNBOUND)) ++ add_timer_on(timer, cpu); ++ else ++ add_timer(timer); + } + + /** +diff --git a/lib/devres.c b/lib/devres.c +index 823533138fa0..20afaf181b27 100644 +--- a/lib/devres.c ++++ b/lib/devres.c +@@ -423,7 +423,7 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) + if (!iomap) + return; + +- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { ++ for (i = 0; i < PCIM_IOMAP_MAX; i++) { + if (!(mask & (1 << i))) + continue; + +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c +index 2e87eecec8f6..04dd542697a7 100644 +--- a/net/ceph/messenger.c ++++ b/net/ceph/messenger.c +@@ -2279,7 +2279,7 @@ static int read_partial_message(struct ceph_connection *con) + con->in_base_pos = -front_len - middle_len - data_len - + sizeof(m->footer); + con->in_tag = CEPH_MSGR_TAG_READY; +- return 0; ++ return 1; + } else if ((s64)seq - (s64)con->in_seq > 1) { + pr_err("read_partial_message bad seq %lld expected %lld\n", + seq, con->in_seq + 1); +@@ -2312,7 +2312,7 @@ static int read_partial_message(struct ceph_connection *con) + sizeof(m->footer); + con->in_tag = CEPH_MSGR_TAG_READY; + con->in_seq++; +- return 0; ++ return 1; + } + + BUG_ON(!con->in_msg); +diff --git a/net/core/scm.c b/net/core/scm.c +index d30eb057fa7b..cad57a1390dd 100644 +--- a/net/core/scm.c ++++ b/net/core/scm.c +@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) + *fplp = fpl; + fpl->count = 0; + fpl->max = SCM_MAX_FD; ++ fpl->user = NULL; + } + fpp = &fpl->fp[fpl->count]; + +@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) + *fpp++ = file; + fpl->count++; + } ++ ++ if (!fpl->user) ++ fpl->user = get_uid(current_user()); ++ + return num; + } + +@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm) + scm->fp = NULL; + for (i=fpl->count-1; i>=0; i--) + fput(fpl->fp[i]); ++ free_uid(fpl->user); + kfree(fpl); + } + } +@@ -337,6 +343,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) + for (i = 0; i < fpl->count; i++) + get_file(fpl->fp[i]); + new_fpl->max = new_fpl->count; ++ new_fpl->user = get_uid(fpl->user); + } + return new_fpl; + } +diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c +index c49dcd0284a0..56dd8ac6d28b 100644 +--- a/net/ipv4/netfilter/ipt_rpfilter.c ++++ b/net/ipv4/netfilter/ipt_rpfilter.c +@@ -61,9 +61,7 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4, + if (FIB_RES_DEV(res) == dev) + dev_match = true; + #endif +- if (dev_match || flags & XT_RPFILTER_LOOSE) +- return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST; +- return dev_match; ++ return dev_match || flags & XT_RPFILTER_LOOSE; + } + + static bool rpfilter_is_local(const struct sk_buff *skb) +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 3f0ec063d7f8..7b74fca4d850 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -4793,6 +4793,21 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write, + return ret; + } + ++static ++int addrconf_sysctl_mtu(struct ctl_table *ctl, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ struct inet6_dev *idev = ctl->extra1; ++ int min_mtu = IPV6_MIN_MTU; ++ struct ctl_table lctl; ++ ++ lctl = *ctl; ++ lctl.extra1 = &min_mtu; ++ lctl.extra2 = idev ? &idev->dev->mtu : NULL; ++ ++ return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos); ++} ++ + static void dev_disable_change(struct inet6_dev *idev) + { + struct netdev_notifier_info info; +@@ -4944,7 +4959,7 @@ static struct addrconf_sysctl_table + .data = &ipv6_devconf.mtu6, + .maxlen = sizeof(int), + .mode = 0644, +- .proc_handler = proc_dointvec, ++ .proc_handler = addrconf_sysctl_mtu, + }, + { + .procname = "accept_ra", +diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c +index a0d17270117c..bd174540eb21 100644 +--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c ++++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c +@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr, + } + + static void +-synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb, ++synproxy_send_tcp(const struct synproxy_net *snet, ++ const struct sk_buff *skb, struct sk_buff *nskb, + struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, + struct ipv6hdr *niph, struct tcphdr *nth, + unsigned int tcp_hdr_size) + { +- struct net *net = nf_ct_net((struct nf_conn *)nfct); ++ struct net *net = nf_ct_net(snet->tmpl); + struct dst_entry *dst; + struct flowi6 fl6; + +@@ -83,7 +84,8 @@ free_nskb: + } + + static void +-synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, ++synproxy_send_client_synack(const struct synproxy_net *snet, ++ const struct sk_buff *skb, const struct tcphdr *th, + const struct synproxy_options *opts) + { + struct sk_buff *nskb; +@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, + + synproxy_build_options(nth, opts); + +- synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, ++ synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + niph, nth, tcp_hdr_size); + } + +@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet, + + synproxy_build_options(nth, opts); + +- synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, ++ synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, + niph, nth, tcp_hdr_size); + } + +@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet, + + synproxy_build_options(nth, opts); + +- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); ++ synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + } + + static void +@@ -241,7 +243,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet, + + synproxy_build_options(nth, opts); + +- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); ++ synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + } + + static bool +@@ -301,7 +303,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) + XT_SYNPROXY_OPT_SACK_PERM | + XT_SYNPROXY_OPT_ECN); + +- synproxy_send_client_synack(skb, th, &opts); ++ synproxy_send_client_synack(snet, skb, th, &opts); + return NF_DROP; + + } else if (th->ack && !(th->fin || th->rst || th->syn)) { +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c +index 7d050ed6fe5a..6d28bd434ce8 100644 +--- a/net/mac80211/mesh_pathtbl.c ++++ b/net/mac80211/mesh_pathtbl.c +@@ -746,10 +746,8 @@ void mesh_plink_broken(struct sta_info *sta) + static void mesh_path_node_reclaim(struct rcu_head *rp) + { + struct mpath_node *node = container_of(rp, struct mpath_node, rcu); +- struct ieee80211_sub_if_data *sdata = node->mpath->sdata; + + del_timer_sync(&node->mpath->timer); +- atomic_dec(&sdata->u.mesh.mpaths); + kfree(node->mpath); + kfree(node); + } +@@ -757,8 +755,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) + /* needs to be called with the corresponding hashwlock taken */ + static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) + { +- struct mesh_path *mpath; +- mpath = node->mpath; ++ struct mesh_path *mpath = node->mpath; ++ struct ieee80211_sub_if_data *sdata = node->mpath->sdata; ++ + spin_lock(&mpath->state_lock); + mpath->flags |= MESH_PATH_RESOLVING; + if (mpath->is_gate) +@@ -766,6 +765,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) + hlist_del_rcu(&node->list); + call_rcu(&node->rcu, mesh_path_node_reclaim); + spin_unlock(&mpath->state_lock); ++ atomic_dec(&sdata->u.mesh.mpaths); + atomic_dec(&tbl->entries); + } + +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 99de2409f731..4e8d90b8fc01 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -3316,9 +3316,9 @@ EXPORT_SYMBOL_GPL(nft_data_init); + */ + void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) + { +- switch (type) { +- case NFT_DATA_VALUE: ++ if (type < NFT_DATA_VERDICT) + return; ++ switch (type) { + case NFT_DATA_VERDICT: + return nft_verdict_uninit(data); + default: +diff --git a/net/rds/send.c b/net/rds/send.c +index a82fb660ec00..44222c0607c7 100644 +--- a/net/rds/send.c ++++ b/net/rds/send.c +@@ -955,11 +955,13 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, + release_sock(sk); + } + +- /* racing with another thread binding seems ok here */ ++ lock_sock(sk); + if (daddr == 0 || rs->rs_bound_addr == 0) { ++ release_sock(sk); + ret = -ENOTCONN; /* XXX not a great errno */ + goto out; + } ++ release_sock(sk); + + /* size of rm including all sgs */ + ret = rds_rm_size(msg, payload_len); +diff --git a/net/rfkill/core.c b/net/rfkill/core.c +index ed7e0b4e7f90..4b9dc2460772 100644 +--- a/net/rfkill/core.c ++++ b/net/rfkill/core.c +@@ -49,7 +49,6 @@ + struct rfkill { + spinlock_t lock; + +- const char *name; + enum rfkill_type type; + + unsigned long state; +@@ -73,6 +72,7 @@ struct rfkill { + struct delayed_work poll_work; + struct work_struct uevent_work; + struct work_struct sync_work; ++ char name[]; + }; + #define to_rfkill(d) container_of(d, struct rfkill, dev) + +@@ -861,14 +861,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name, + if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) + return NULL; + +- rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); ++ rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL); + if (!rfkill) + return NULL; + + spin_lock_init(&rfkill->lock); + INIT_LIST_HEAD(&rfkill->node); + rfkill->type = type; +- rfkill->name = name; ++ strcpy(rfkill->name, name); + rfkill->ops = ops; + rfkill->data = ops_data; + +@@ -1078,17 +1078,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) + return res; + } + +-static bool rfkill_readable(struct rfkill_data *data) +-{ +- bool r; +- +- mutex_lock(&data->mtx); +- r = !list_empty(&data->events); +- mutex_unlock(&data->mtx); +- +- return r; +-} +- + static ssize_t rfkill_fop_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) + { +@@ -1105,8 +1094,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf, + goto out; + } + mutex_unlock(&data->mtx); ++ /* since we re-check and it just compares pointers, ++ * using !list_empty() without locking isn't a problem ++ */ + ret = wait_event_interruptible(data->read_wait, +- rfkill_readable(data)); ++ !list_empty(&data->events)); + mutex_lock(&data->mtx); + + if (ret) +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c +index 0adc66caae2f..07edbcd8697e 100644 +--- a/net/sunrpc/cache.c ++++ b/net/sunrpc/cache.c +@@ -1230,7 +1230,7 @@ int qword_get(char **bpp, char *dest, int bufsize) + if (bp[0] == '\\' && bp[1] == 'x') { + /* HEX STRING */ + bp += 2; +- while (len < bufsize) { ++ while (len < bufsize - 1) { + int h, l; + + h = hex_to_bin(bp[0]); +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index 0cd18c240250..ab2eeb1cb32c 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -1469,7 +1469,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) + UNIXCB(skb).fp = NULL; + + for (i = scm->fp->count-1; i >= 0; i--) +- unix_notinflight(scm->fp->fp[i]); ++ unix_notinflight(scm->fp->user, scm->fp->fp[i]); + } + + static void unix_destruct_scm(struct sk_buff *skb) +@@ -1534,7 +1534,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) + return -ENOMEM; + + for (i = scm->fp->count - 1; i >= 0; i--) +- unix_inflight(scm->fp->fp[i]); ++ unix_inflight(scm->fp->user, scm->fp->fp[i]); + return max_level; + } + +diff --git a/net/unix/garbage.c b/net/unix/garbage.c +index 06730fe6ad9d..a72182d6750f 100644 +--- a/net/unix/garbage.c ++++ b/net/unix/garbage.c +@@ -122,7 +122,7 @@ struct sock *unix_get_socket(struct file *filp) + * descriptor if it is for an AF_UNIX socket. + */ + +-void unix_inflight(struct file *fp) ++void unix_inflight(struct user_struct *user, struct file *fp) + { + struct sock *s = unix_get_socket(fp); + +@@ -139,11 +139,11 @@ void unix_inflight(struct file *fp) + } + unix_tot_inflight++; + } +- fp->f_cred->user->unix_inflight++; ++ user->unix_inflight++; + spin_unlock(&unix_gc_lock); + } + +-void unix_notinflight(struct file *fp) ++void unix_notinflight(struct user_struct *user, struct file *fp) + { + struct sock *s = unix_get_socket(fp); + +@@ -157,7 +157,7 @@ void unix_notinflight(struct file *fp) + list_del_init(&u->link); + unix_tot_inflight--; + } +- fp->f_cred->user->unix_inflight--; ++ user->unix_inflight--; + spin_unlock(&unix_gc_lock); + } + +diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c +index 9c22317778eb..ee625e3a56ba 100644 +--- a/scripts/recordmcount.c ++++ b/scripts/recordmcount.c +@@ -189,6 +189,20 @@ static void *mmap_file(char const *fname) + addr = umalloc(sb.st_size); + uread(fd_map, addr, sb.st_size); + } ++ if (sb.st_nlink != 1) { ++ /* file is hard-linked, break the hard link */ ++ close(fd_map); ++ if (unlink(fname) < 0) { ++ perror(fname); ++ fail_file(); ++ } ++ fd_map = open(fname, O_RDWR | O_CREAT, sb.st_mode); ++ if (fd_map < 0) { ++ perror(fname); ++ fail_file(); ++ } ++ uwrite(fd_map, addr, sb.st_size); ++ } + return addr; + } + +diff --git a/tools/Makefile b/tools/Makefile +index feec3ad5fd09..6e8ac8982149 100644 +--- a/tools/Makefile ++++ b/tools/Makefile +@@ -24,6 +24,10 @@ help: + @echo ' from the kernel command line to build and install one of' + @echo ' the tools above' + @echo '' ++ @echo ' $$ make tools/all' ++ @echo '' ++ @echo ' builds all tools.' ++ @echo '' + @echo ' $$ make tools/install' + @echo '' + @echo ' installs all tools.' +@@ -58,6 +62,11 @@ turbostat x86_energy_perf_policy: FORCE + tmon: FORCE + $(call descend,thermal/$@) + ++all: acpi cgroup cpupower firewire lguest \ ++ perf selftests turbostat usb \ ++ virtio vm net x86_energy_perf_policy \ ++ tmon ++ + acpi_install: + $(call descend,power/$(@:_install=),install) + +diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c +index f2c80d5451c3..919095029528 100644 +--- a/virt/kvm/async_pf.c ++++ b/virt/kvm/async_pf.c +@@ -152,7 +152,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, + * do alloc nowait since if we are going to sleep anyway we + * may as well sleep faulting in page + */ +- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); ++ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); + if (!work) + return 0; + diff --git a/patch/kernel/cubox-default/patch-3.14.63-64.patch b/patch/kernel/cubox-default/patch-3.14.63-64.patch new file mode 100644 index 000000000..4e1bc88a6 --- /dev/null +++ b/patch/kernel/cubox-default/patch-3.14.63-64.patch @@ -0,0 +1,2749 @@ +diff --git a/Makefile b/Makefile +index 0843ef4cc0a4..de41fa82652f 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 63 ++SUBLEVEL = 64 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c +index 81e6ae0220bc..887535c4c93d 100644 +--- a/arch/mips/kernel/traps.c ++++ b/arch/mips/kernel/traps.c +@@ -688,15 +688,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode) + asmlinkage void do_ov(struct pt_regs *regs) + { + enum ctx_state prev_state; +- siginfo_t info; ++ siginfo_t info = { ++ .si_signo = SIGFPE, ++ .si_code = FPE_INTOVF, ++ .si_addr = (void __user *)regs->cp0_epc, ++ }; + + prev_state = exception_enter(); + die_if_kernel("Integer overflow", regs); + +- info.si_code = FPE_INTOVF; +- info.si_signo = SIGFPE; +- info.si_errno = 0; +- info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); + exception_exit(prev_state); + } +@@ -797,7 +797,7 @@ out: + static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + const char *str) + { +- siginfo_t info; ++ siginfo_t info = { 0 }; + char b[40]; + + #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP +@@ -825,7 +825,6 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + else + info.si_code = FPE_INTOVF; + info.si_signo = SIGFPE; +- info.si_errno = 0; + info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); + break; +diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S +index 92a2e9333620..b74ac9c5710b 100644 +--- a/arch/x86/ia32/ia32entry.S ++++ b/arch/x86/ia32/ia32entry.S +@@ -422,6 +422,7 @@ ENTRY(ia32_syscall) + /*CFI_REL_OFFSET cs,CS-RIP*/ + CFI_REL_OFFSET rip,RIP-RIP + PARAVIRT_ADJUST_EXCEPTION_FRAME ++ ASM_CLAC /* Do this early to minimize exposure */ + SWAPGS + /* + * No need to follow this irqs on/off section: the syscall +diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c +index 3a2ae4c88948..398c7a908c17 100644 +--- a/arch/x86/kernel/acpi/sleep.c ++++ b/arch/x86/kernel/acpi/sleep.c +@@ -16,6 +16,7 @@ + #include + #include + ++#include + #include "../../realmode/rm/wakeup.h" + #include "sleep.h" + +@@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void) + saved_magic = 0x123456789abcdef0L; + #endif /* CONFIG_64BIT */ + ++ /* ++ * Pause/unpause graph tracing around do_suspend_lowlevel as it has ++ * inconsistent call/return info after it jumps to the wakeup vector. ++ */ ++ pause_graph_tracing(); + do_suspend_lowlevel(); ++ unpause_graph_tracing(); + return 0; + } + +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 6fecf0bde105..1e82d2a1e205 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -674,19 +674,18 @@ static int ata_ioc32(struct ata_port *ap) + int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, + int cmd, void __user *arg) + { +- int val = -EINVAL, rc = -EINVAL; ++ unsigned long val; ++ int rc = -EINVAL; + unsigned long flags; + + switch (cmd) { +- case ATA_IOC_GET_IO32: ++ case HDIO_GET_32BIT: + spin_lock_irqsave(ap->lock, flags); + val = ata_ioc32(ap); + spin_unlock_irqrestore(ap->lock, flags); +- if (copy_to_user(arg, &val, 1)) +- return -EFAULT; +- return 0; ++ return put_user(val, (unsigned long __user *)arg); + +- case ATA_IOC_SET_IO32: ++ case HDIO_SET_32BIT: + val = (unsigned long) arg; + rc = 0; + spin_lock_irqsave(ap->lock, flags); +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c +index b335c6ab5efe..b2dd473237e8 100644 +--- a/drivers/edac/edac_mc_sysfs.c ++++ b/drivers/edac/edac_mc_sysfs.c +@@ -973,21 +973,26 @@ nomem: + */ + int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) + { ++ char *name; + int i, err; + + /* + * The memory controller needs its own bus, in order to avoid + * namespace conflicts at /sys/bus/edac. + */ +- mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); +- if (!mci->bus->name) ++ name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); ++ if (!name) + return -ENOMEM; + ++ mci->bus->name = name; ++ + edac_dbg(0, "creating bus %s\n", mci->bus->name); + + err = bus_register(mci->bus); +- if (err < 0) ++ if (err < 0) { ++ kfree(name); + return err; ++ } + + /* get the /sys/devices/system/edac subsys reference */ + mci->dev.type = &mci_attr_type; +@@ -1071,7 +1076,8 @@ fail: + fail2: + device_unregister(&mci->dev); + bus_unregister(mci->bus); +- kfree(mci->bus->name); ++ kfree(name); ++ + return err; + } + +@@ -1102,10 +1108,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) + + void edac_unregister_sysfs(struct mem_ctl_info *mci) + { ++ const char *name = mci->bus->name; ++ + edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); + device_unregister(&mci->dev); + bus_unregister(mci->bus); +- kfree(mci->bus->name); ++ kfree(name); + } + + static void mc_attr_release(struct device *dev) +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index c0f284230a39..ed8d93cbd1ca 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -124,7 +124,7 @@ static int ast_get_dram_info(struct drm_device *dev) + } while (ast_read32(ast, 0x10000) != 0x01); + data = ast_read32(ast, 0x10004); + +- if (data & 0x400) ++ if (data & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index a49ce4a6e72f..1320a81df792 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -2071,6 +2071,24 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) + return isert_post_response(isert_conn, isert_cmd); + } + ++static void ++isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) ++{ ++ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); ++ struct isert_conn *isert_conn = (struct isert_conn *)conn->context; ++ struct isert_device *device = isert_conn->conn_device; ++ ++ spin_lock_bh(&conn->cmd_lock); ++ if (!list_empty(&cmd->i_conn_node)) ++ list_del_init(&cmd->i_conn_node); ++ spin_unlock_bh(&conn->cmd_lock); ++ ++ if (cmd->data_direction == DMA_TO_DEVICE) ++ iscsit_stop_dataout_timer(cmd); ++ ++ device->unreg_rdma_mem(isert_cmd, isert_conn); ++} ++ + static int + isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, + bool nopout_response) +@@ -2999,6 +3017,7 @@ static struct iscsit_transport iser_target_transport = { + .iscsit_get_dataout = isert_get_dataout, + .iscsit_queue_data_in = isert_put_datain, + .iscsit_queue_status = isert_put_response, ++ .iscsit_aborted_task = isert_aborted_task, + }; + + static int __init isert_init(void) +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c +index 0097b8dae5bc..727a88d9a708 100644 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c +@@ -3093,6 +3093,14 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd) + srpt_queue_response(cmd); + } + ++static void srpt_aborted_task(struct se_cmd *cmd) ++{ ++ struct srpt_send_ioctx *ioctx = container_of(cmd, ++ struct srpt_send_ioctx, cmd); ++ ++ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); ++} ++ + static int srpt_queue_status(struct se_cmd *cmd) + { + struct srpt_send_ioctx *ioctx; +@@ -3940,6 +3948,7 @@ static struct target_core_fabric_ops srpt_template = { + .queue_data_in = srpt_queue_data_in, + .queue_status = srpt_queue_status, + .queue_tm_rsp = srpt_queue_tm_rsp, ++ .aborted_task = srpt_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c +index 28b4bea7c109..a8e52ef6e266 100644 +--- a/drivers/iommu/amd_iommu_init.c ++++ b/drivers/iommu/amd_iommu_init.c +@@ -226,6 +226,10 @@ static enum iommu_init_state init_state = IOMMU_START_STATE; + static int amd_iommu_enable_interrupts(void); + static int __init iommu_go_to_state(enum iommu_init_state state); + ++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, ++ u8 bank, u8 cntr, u8 fxn, ++ u64 *value, bool is_write); ++ + static inline void update_last_devid(u16 devid) + { + if (devid > amd_iommu_last_bdf) +@@ -1182,8 +1186,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu) + amd_iommu_pc_present = true; + + /* Check if the performance counters can be written to */ +- if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || +- (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || ++ if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) || ++ (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) || + (val != val2)) { + pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); + amd_iommu_pc_present = false; +@@ -2314,22 +2318,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid) + } + EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); + +-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, ++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, ++ u8 bank, u8 cntr, u8 fxn, + u64 *value, bool is_write) + { +- struct amd_iommu *iommu; + u32 offset; + u32 max_offset_lim; + +- /* Make sure the IOMMU PC resource is available */ +- if (!amd_iommu_pc_present) +- return -ENODEV; +- +- /* Locate the iommu associated with the device ID */ +- iommu = amd_iommu_rlookup_table[devid]; +- + /* Check for valid iommu and pc register indexing */ +- if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) ++ if (WARN_ON((fxn > 0x28) || (fxn & 7))) + return -ENODEV; + + offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); +@@ -2353,3 +2350,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, + return 0; + } + EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val); ++ ++int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, ++ u64 *value, bool is_write) ++{ ++ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; ++ ++ /* Make sure the IOMMU PC resource is available */ ++ if (!amd_iommu_pc_present || iommu == NULL) ++ return -ENODEV; ++ ++ return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn, ++ value, is_write); ++} +diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c +index 2a1b6e037e1a..0134ba32a057 100644 +--- a/drivers/mtd/ubi/upd.c ++++ b/drivers/mtd/ubi/upd.c +@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, + vol->changing_leb = 1; + vol->ch_lnum = req->lnum; + +- vol->upd_buf = vmalloc(req->bytes); ++ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size)); + if (!vol->upd_buf) + return -ENOMEM; + +diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +index 1817f3f2b02d..f46c0a6c5016 100644 +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +@@ -684,6 +684,20 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) + qlt_xmit_tm_rsp(mcmd); + } + ++static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) ++{ ++ struct qla_tgt_cmd *cmd = container_of(se_cmd, ++ struct qla_tgt_cmd, se_cmd); ++ struct scsi_qla_host *vha = cmd->vha; ++ struct qla_hw_data *ha = vha->hw; ++ ++ if (!cmd->sg_mapped) ++ return; ++ ++ pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); ++ cmd->sg_mapped = 0; ++} ++ + /* Local pointer to allocated TCM configfs fabric module */ + struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; + struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; +@@ -1886,6 +1900,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { + .queue_data_in = tcm_qla2xxx_queue_data_in, + .queue_status = tcm_qla2xxx_queue_status, + .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, ++ .aborted_task = tcm_qla2xxx_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c +@@ -1935,6 +1950,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { + .queue_data_in = tcm_qla2xxx_queue_data_in, + .queue_status = tcm_qla2xxx_queue_status, + .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, ++ .aborted_task = tcm_qla2xxx_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index c066e6e298c3..5600eab07865 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -500,6 +500,18 @@ static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) + return 0; + } + ++static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) ++{ ++ bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); ++ ++ spin_lock_bh(&conn->cmd_lock); ++ if (!list_empty(&cmd->i_conn_node)) ++ list_del_init(&cmd->i_conn_node); ++ spin_unlock_bh(&conn->cmd_lock); ++ ++ __iscsit_free_cmd(cmd, scsi_cmd, true); ++} ++ + static struct iscsit_transport iscsi_target_transport = { + .name = "iSCSI/TCP", + .transport_type = ISCSI_TCP, +@@ -514,6 +526,7 @@ static struct iscsit_transport iscsi_target_transport = { + .iscsit_response_queue = iscsit_response_queue, + .iscsit_queue_data_in = iscsit_queue_rsp, + .iscsit_queue_status = iscsit_queue_rsp, ++ .iscsit_aborted_task = iscsit_aborted_task, + }; + + static int __init iscsi_target_init_module(void) +diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c +index 83465617ad6d..4a28c5f0dfd1 100644 +--- a/drivers/target/iscsi/iscsi_target_configfs.c ++++ b/drivers/target/iscsi/iscsi_target_configfs.c +@@ -1815,6 +1815,13 @@ static void lio_queue_tm_rsp(struct se_cmd *se_cmd) + iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); + } + ++static void lio_aborted_task(struct se_cmd *se_cmd) ++{ ++ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); ++ ++ cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd); ++} ++ + static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg) + { + struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; +@@ -2013,6 +2020,7 @@ int iscsi_target_register_configfs(void) + fabric->tf_ops.queue_data_in = &lio_queue_data_in; + fabric->tf_ops.queue_status = &lio_queue_status; + fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; ++ fabric->tf_ops.aborted_task = &lio_aborted_task; + /* + * Setup function pointers for generic logic in target_core_fabric_configfs.c + */ +diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c +index 1e406af4ee47..2e96ae6cf3c1 100644 +--- a/drivers/target/iscsi/iscsi_target_util.c ++++ b/drivers/target/iscsi/iscsi_target_util.c +@@ -705,8 +705,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) + } + EXPORT_SYMBOL(iscsit_release_cmd); + +-static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, +- bool check_queues) ++void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, ++ bool check_queues) + { + struct iscsi_conn *conn = cmd->conn; + +diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h +index 561a424d1980..a68508c4fec8 100644 +--- a/drivers/target/iscsi/iscsi_target_util.h ++++ b/drivers/target/iscsi/iscsi_target_util.h +@@ -30,6 +30,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co + extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); + extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); + extern void iscsit_release_cmd(struct iscsi_cmd *); ++extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool); + extern void iscsit_free_cmd(struct iscsi_cmd *, bool); + extern int iscsit_check_session_usage_count(struct iscsi_session *); + extern void iscsit_dec_session_usage_count(struct iscsi_session *); +diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c +index 67c802c93ef3..fd974d69458b 100644 +--- a/drivers/target/loopback/tcm_loop.c ++++ b/drivers/target/loopback/tcm_loop.c +@@ -892,6 +892,11 @@ static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) + wake_up(&tl_tmr->tl_tmr_wait); + } + ++static void tcm_loop_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) + { + switch (tl_hba->tl_proto_id) { +@@ -1456,6 +1461,7 @@ static int tcm_loop_register_configfs(void) + fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; + fabric->tf_ops.queue_status = &tcm_loop_queue_status; + fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; ++ fabric->tf_ops.aborted_task = &tcm_loop_aborted_task; + + /* + * Setup function pointers for generic logic in target_core_fabric_configfs.c +diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c +index 24884cac19ce..ad04ea928e4f 100644 +--- a/drivers/target/sbp/sbp_target.c ++++ b/drivers/target/sbp/sbp_target.c +@@ -1846,6 +1846,11 @@ static void sbp_queue_tm_rsp(struct se_cmd *se_cmd) + { + } + ++static void sbp_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static int sbp_check_stop_free(struct se_cmd *se_cmd) + { + struct sbp_target_request *req = container_of(se_cmd, +@@ -2526,6 +2531,7 @@ static struct target_core_fabric_ops sbp_ops = { + .queue_data_in = sbp_queue_data_in, + .queue_status = sbp_queue_status, + .queue_tm_rsp = sbp_queue_tm_rsp, ++ .aborted_task = sbp_aborted_task, + .check_stop_free = sbp_check_stop_free, + + .fabric_make_wwn = sbp_make_tport, +diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c +index f30385385544..756def38c77a 100644 +--- a/drivers/target/target_core_configfs.c ++++ b/drivers/target/target_core_configfs.c +@@ -457,6 +457,10 @@ static int target_fabric_tf_ops_check( + pr_err("Missing tfo->queue_tm_rsp()\n"); + return -EINVAL; + } ++ if (!tfo->aborted_task) { ++ pr_err("Missing tfo->aborted_task()\n"); ++ return -EINVAL; ++ } + /* + * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() + * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index 093b8cb85de7..e366b812f0e1 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -1577,6 +1577,49 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) + return dev; + } + ++/* ++ * Check if the underlying struct block_device request_queue supports ++ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM ++ * in ATA and we need to set TPE=1 ++ */ ++bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, ++ struct request_queue *q, int block_size) ++{ ++ if (!blk_queue_discard(q)) ++ return false; ++ ++ attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) / ++ block_size; ++ /* ++ * Currently hardcoded to 1 in Linux/SCSI code.. ++ */ ++ attrib->max_unmap_block_desc_count = 1; ++ attrib->unmap_granularity = q->limits.discard_granularity / block_size; ++ attrib->unmap_granularity_alignment = q->limits.discard_alignment / ++ block_size; ++ return true; ++} ++EXPORT_SYMBOL(target_configure_unmap_from_queue); ++ ++/* ++ * Convert from blocksize advertised to the initiator to the 512 byte ++ * units unconditionally used by the Linux block layer. ++ */ ++sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) ++{ ++ switch (dev->dev_attrib.block_size) { ++ case 4096: ++ return lb << 3; ++ case 2048: ++ return lb << 2; ++ case 1024: ++ return lb << 1; ++ default: ++ return lb; ++ } ++} ++EXPORT_SYMBOL(target_to_linux_sector); ++ + int target_configure_device(struct se_device *dev) + { + struct se_hba *hba = dev->se_hba; +diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c +index b199f1e21d0e..6fe5b503f6e1 100644 +--- a/drivers/target/target_core_file.c ++++ b/drivers/target/target_core_file.c +@@ -164,25 +164,11 @@ static int fd_configure_device(struct se_device *dev) + " block_device blocks: %llu logical_block_size: %d\n", + dev_size, div_u64(dev_size, fd_dev->fd_block_size), + fd_dev->fd_block_size); +- /* +- * Check if the underlying struct block_device request_queue supports +- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM +- * in ATA and we need to set TPE=1 +- */ +- if (blk_queue_discard(q)) { +- dev->dev_attrib.max_unmap_lba_count = +- q->limits.max_discard_sectors; +- /* +- * Currently hardcoded to 1 in Linux/SCSI code.. +- */ +- dev->dev_attrib.max_unmap_block_desc_count = 1; +- dev->dev_attrib.unmap_granularity = +- q->limits.discard_granularity >> 9; +- dev->dev_attrib.unmap_granularity_alignment = +- q->limits.discard_alignment; ++ ++ if (target_configure_unmap_from_queue(&dev->dev_attrib, q, ++ fd_dev->fd_block_size)) + pr_debug("IFILE: BLOCK Discard support available," +- " disabled by default\n"); +- } ++ " disabled by default\n"); + /* + * Enable write same emulation for IBLOCK and use 0xFFFF as + * the smaller WRITE_SAME(10) only has a two-byte block count. +@@ -545,9 +531,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) + if (S_ISBLK(inode->i_mode)) { + /* The backend is block device, use discard */ + struct block_device *bdev = inode->i_bdev; ++ struct se_device *dev = cmd->se_dev; + +- ret = blkdev_issue_discard(bdev, lba, +- nolb, GFP_KERNEL, 0); ++ ret = blkdev_issue_discard(bdev, ++ target_to_linux_sector(dev, lba), ++ target_to_linux_sector(dev, nolb), ++ GFP_KERNEL, 0); + if (ret < 0) { + pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", + ret); +diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c +index feefe24a88f7..357b9fb61499 100644 +--- a/drivers/target/target_core_iblock.c ++++ b/drivers/target/target_core_iblock.c +@@ -126,27 +126,11 @@ static int iblock_configure_device(struct se_device *dev) + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); + dev->dev_attrib.hw_queue_depth = q->nr_requests; + +- /* +- * Check if the underlying struct block_device request_queue supports +- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM +- * in ATA and we need to set TPE=1 +- */ +- if (blk_queue_discard(q)) { +- dev->dev_attrib.max_unmap_lba_count = +- q->limits.max_discard_sectors; +- +- /* +- * Currently hardcoded to 1 in Linux/SCSI code.. +- */ +- dev->dev_attrib.max_unmap_block_desc_count = 1; +- dev->dev_attrib.unmap_granularity = +- q->limits.discard_granularity >> 9; +- dev->dev_attrib.unmap_granularity_alignment = +- q->limits.discard_alignment; +- ++ if (target_configure_unmap_from_queue(&dev->dev_attrib, q, ++ dev->dev_attrib.hw_block_size)) + pr_debug("IBLOCK: BLOCK Discard support available," +- " disabled by default\n"); +- } ++ " disabled by default\n"); ++ + /* + * Enable write same emulation for IBLOCK and use 0xFFFF as + * the smaller WRITE_SAME(10) only has a two-byte block count. +@@ -418,9 +402,13 @@ iblock_do_unmap(struct se_cmd *cmd, void *priv, + sector_t lba, sector_t nolb) + { + struct block_device *bdev = priv; ++ struct se_device *dev = cmd->se_dev; + int ret; + +- ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); ++ ret = blkdev_issue_discard(bdev, ++ target_to_linux_sector(dev, lba), ++ target_to_linux_sector(dev, nolb), ++ GFP_KERNEL, 0); + if (ret < 0) { + pr_err("blkdev_issue_discard() failed: %d\n", ret); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +@@ -460,8 +448,10 @@ iblock_execute_write_same(struct se_cmd *cmd) + struct scatterlist *sg; + struct bio *bio; + struct bio_list list; +- sector_t block_lba = cmd->t_task_lba; +- sector_t sectors = sbc_get_write_same_sectors(cmd); ++ struct se_device *dev = cmd->se_dev; ++ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); ++ sector_t sectors = target_to_linux_sector(dev, ++ sbc_get_write_same_sectors(cmd)); + + sg = &cmd->t_data_sg[0]; + +@@ -670,12 +660,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) + { + struct se_device *dev = cmd->se_dev; ++ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); + struct iblock_req *ibr; + struct bio *bio, *bio_start; + struct bio_list list; + struct scatterlist *sg; + u32 sg_num = sgl_nents; +- sector_t block_lba; + unsigned bio_cnt; + int rw = 0; + int i; +@@ -701,24 +691,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + rw = READ; + } + +- /* +- * Convert the blocksize advertised to the initiator to the 512 byte +- * units unconditionally used by the Linux block layer. +- */ +- if (dev->dev_attrib.block_size == 4096) +- block_lba = (cmd->t_task_lba << 3); +- else if (dev->dev_attrib.block_size == 2048) +- block_lba = (cmd->t_task_lba << 2); +- else if (dev->dev_attrib.block_size == 1024) +- block_lba = (cmd->t_task_lba << 1); +- else if (dev->dev_attrib.block_size == 512) +- block_lba = cmd->t_task_lba; +- else { +- pr_err("Unsupported SCSI -> BLOCK LBA conversion:" +- " %u\n", dev->dev_attrib.block_size); +- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +- } +- + ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); + if (!ibr) + goto fail; +diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c +index 70c638f730af..47a90d631a90 100644 +--- a/drivers/target/target_core_tmr.c ++++ b/drivers/target/target_core_tmr.c +@@ -76,25 +76,29 @@ void core_tmr_release_req( + } + + spin_lock_irqsave(&dev->se_tmr_lock, flags); +- list_del(&tmr->tmr_list); ++ list_del_init(&tmr->tmr_list); + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + + kfree(tmr); + } + +-static void core_tmr_handle_tas_abort( +- struct se_node_acl *tmr_nacl, +- struct se_cmd *cmd, +- int tas) ++static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) + { ++ unsigned long flags; ++ bool remove = true, send_tas; + /* + * TASK ABORTED status (TAS) bit support +- */ +- if ((tmr_nacl && +- (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) ++ */ ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ send_tas = (cmd->transport_state & CMD_T_TAS); ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ ++ if (send_tas) { ++ remove = false; + transport_send_task_abort(cmd); ++ } + +- transport_cmd_finish_abort(cmd, 0); ++ transport_cmd_finish_abort(cmd, remove); + } + + static int target_check_cdb_and_preempt(struct list_head *list, +@@ -112,6 +116,47 @@ static int target_check_cdb_and_preempt(struct list_head *list, + return 1; + } + ++static bool __target_check_io_state(struct se_cmd *se_cmd, ++ struct se_session *tmr_sess, int tas) ++{ ++ struct se_session *sess = se_cmd->se_sess; ++ ++ assert_spin_locked(&sess->sess_cmd_lock); ++ WARN_ON_ONCE(!irqs_disabled()); ++ /* ++ * If command already reached CMD_T_COMPLETE state within ++ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, ++ * this se_cmd has been passed to fabric driver and will ++ * not be aborted. ++ * ++ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR ++ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as ++ * long as se_cmd->cmd_kref is still active unless zero. ++ */ ++ spin_lock(&se_cmd->t_state_lock); ++ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) { ++ pr_debug("Attempted to abort io tag: %u already complete or" ++ " fabric stop, skipping\n", ++ se_cmd->se_tfo->get_task_tag(se_cmd)); ++ spin_unlock(&se_cmd->t_state_lock); ++ return false; ++ } ++ if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { ++ pr_debug("Attempted to abort io tag: %u already shutdown," ++ " skipping\n", se_cmd->se_tfo->get_task_tag(se_cmd)); ++ spin_unlock(&se_cmd->t_state_lock); ++ return false; ++ } ++ se_cmd->transport_state |= CMD_T_ABORTED; ++ ++ if ((tmr_sess != se_cmd->se_sess) && tas) ++ se_cmd->transport_state |= CMD_T_TAS; ++ ++ spin_unlock(&se_cmd->t_state_lock); ++ ++ return kref_get_unless_zero(&se_cmd->cmd_kref); ++} ++ + void core_tmr_abort_task( + struct se_device *dev, + struct se_tmr_req *tmr, +@@ -134,33 +179,19 @@ void core_tmr_abort_task( + printk("ABORT_TASK: Found referenced %s task_tag: %u\n", + se_cmd->se_tfo->get_fabric_name(), ref_tag); + +- spin_lock(&se_cmd->t_state_lock); +- if (se_cmd->transport_state & CMD_T_COMPLETE) { +- printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag); +- spin_unlock(&se_cmd->t_state_lock); ++ if (!__target_check_io_state(se_cmd, se_sess, 0)) { + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); ++ target_put_sess_cmd(se_sess, se_cmd); + goto out; + } +- se_cmd->transport_state |= CMD_T_ABORTED; +- spin_unlock(&se_cmd->t_state_lock); + + list_del_init(&se_cmd->se_cmd_list); +- kref_get(&se_cmd->cmd_kref); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + + cancel_work_sync(&se_cmd->work); + transport_wait_for_tasks(se_cmd); +- /* +- * Now send SAM_STAT_TASK_ABORTED status for the referenced +- * se_cmd descriptor.. +- */ +- transport_send_task_abort(se_cmd); +- /* +- * Also deal with possible extra acknowledge reference.. +- */ +- if (se_cmd->se_cmd_flags & SCF_ACK_KREF) +- target_put_sess_cmd(se_sess, se_cmd); + ++ transport_cmd_finish_abort(se_cmd, true); + target_put_sess_cmd(se_sess, se_cmd); + + printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" +@@ -182,9 +213,11 @@ static void core_tmr_drain_tmr_list( + struct list_head *preempt_and_abort_list) + { + LIST_HEAD(drain_tmr_list); ++ struct se_session *sess; + struct se_tmr_req *tmr_p, *tmr_pp; + struct se_cmd *cmd; + unsigned long flags; ++ bool rc; + /* + * Release all pending and outgoing TMRs aside from the received + * LUN_RESET tmr.. +@@ -210,17 +243,39 @@ static void core_tmr_drain_tmr_list( + if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) + continue; + ++ sess = cmd->se_sess; ++ if (WARN_ON_ONCE(!sess)) ++ continue; ++ ++ spin_lock(&sess->sess_cmd_lock); + spin_lock(&cmd->t_state_lock); +- if (!(cmd->transport_state & CMD_T_ACTIVE)) { ++ if (!(cmd->transport_state & CMD_T_ACTIVE) || ++ (cmd->transport_state & CMD_T_FABRIC_STOP)) { + spin_unlock(&cmd->t_state_lock); ++ spin_unlock(&sess->sess_cmd_lock); + continue; + } + if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { + spin_unlock(&cmd->t_state_lock); ++ spin_unlock(&sess->sess_cmd_lock); + continue; + } ++ if (sess->sess_tearing_down || cmd->cmd_wait_set) { ++ spin_unlock(&cmd->t_state_lock); ++ spin_unlock(&sess->sess_cmd_lock); ++ continue; ++ } ++ cmd->transport_state |= CMD_T_ABORTED; + spin_unlock(&cmd->t_state_lock); + ++ rc = kref_get_unless_zero(&cmd->cmd_kref); ++ if (!rc) { ++ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n"); ++ spin_unlock(&sess->sess_cmd_lock); ++ continue; ++ } ++ spin_unlock(&sess->sess_cmd_lock); ++ + list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); + } + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); +@@ -234,20 +289,26 @@ static void core_tmr_drain_tmr_list( + (preempt_and_abort_list) ? "Preempt" : "", tmr_p, + tmr_p->function, tmr_p->response, cmd->t_state); + ++ cancel_work_sync(&cmd->work); ++ transport_wait_for_tasks(cmd); ++ + transport_cmd_finish_abort(cmd, 1); ++ target_put_sess_cmd(cmd->se_sess, cmd); + } + } + + static void core_tmr_drain_state_list( + struct se_device *dev, + struct se_cmd *prout_cmd, +- struct se_node_acl *tmr_nacl, ++ struct se_session *tmr_sess, + int tas, + struct list_head *preempt_and_abort_list) + { + LIST_HEAD(drain_task_list); ++ struct se_session *sess; + struct se_cmd *cmd, *next; + unsigned long flags; ++ int rc; + + /* + * Complete outstanding commands with TASK_ABORTED SAM status. +@@ -286,6 +347,16 @@ static void core_tmr_drain_state_list( + if (prout_cmd == cmd) + continue; + ++ sess = cmd->se_sess; ++ if (WARN_ON_ONCE(!sess)) ++ continue; ++ ++ spin_lock(&sess->sess_cmd_lock); ++ rc = __target_check_io_state(cmd, tmr_sess, tas); ++ spin_unlock(&sess->sess_cmd_lock); ++ if (!rc) ++ continue; ++ + list_move_tail(&cmd->state_list, &drain_task_list); + cmd->state_active = false; + } +@@ -293,7 +364,7 @@ static void core_tmr_drain_state_list( + + while (!list_empty(&drain_task_list)) { + cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); +- list_del(&cmd->state_list); ++ list_del_init(&cmd->state_list); + + pr_debug("LUN_RESET: %s cmd: %p" + " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" +@@ -317,16 +388,11 @@ static void core_tmr_drain_state_list( + * loop above, but we do it down here given that + * cancel_work_sync may block. + */ +- if (cmd->t_state == TRANSPORT_COMPLETE) +- cancel_work_sync(&cmd->work); +- +- spin_lock_irqsave(&cmd->t_state_lock, flags); +- target_stop_cmd(cmd, &flags); +- +- cmd->transport_state |= CMD_T_ABORTED; +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ cancel_work_sync(&cmd->work); ++ transport_wait_for_tasks(cmd); + +- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); ++ core_tmr_handle_tas_abort(cmd, tas); ++ target_put_sess_cmd(cmd->se_sess, cmd); + } + } + +@@ -338,6 +404,7 @@ int core_tmr_lun_reset( + { + struct se_node_acl *tmr_nacl = NULL; + struct se_portal_group *tmr_tpg = NULL; ++ struct se_session *tmr_sess = NULL; + int tas; + /* + * TASK_ABORTED status bit, this is configurable via ConfigFS +@@ -356,8 +423,9 @@ int core_tmr_lun_reset( + * or struct se_device passthrough.. + */ + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { +- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; +- tmr_tpg = tmr->task_cmd->se_sess->se_tpg; ++ tmr_sess = tmr->task_cmd->se_sess; ++ tmr_nacl = tmr_sess->se_node_acl; ++ tmr_tpg = tmr_sess->se_tpg; + if (tmr_nacl && tmr_tpg) { + pr_debug("LUN_RESET: TMR caller fabric: %s" + " initiator port %s\n", +@@ -370,7 +438,7 @@ int core_tmr_lun_reset( + dev->transport->name, tas); + + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); +- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, ++ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas, + preempt_and_abort_list); + + /* +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index 7afea9b59e2c..cbf927a67160 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -509,9 +509,6 @@ void transport_deregister_session(struct se_session *se_sess) + } + EXPORT_SYMBOL(transport_deregister_session); + +-/* +- * Called with cmd->t_state_lock held. +- */ + static void target_remove_from_state_list(struct se_cmd *cmd) + { + struct se_device *dev = cmd->se_dev; +@@ -536,10 +533,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, + { + unsigned long flags; + +- spin_lock_irqsave(&cmd->t_state_lock, flags); +- if (write_pending) +- cmd->t_state = TRANSPORT_WRITE_PENDING; +- + if (remove_from_lists) { + target_remove_from_state_list(cmd); + +@@ -549,6 +542,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, + cmd->se_lun = NULL; + } + ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (write_pending) ++ cmd->t_state = TRANSPORT_WRITE_PENDING; ++ + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. +@@ -603,9 +600,20 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) + + void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) + { ++ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); ++ ++ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) ++ transport_lun_remove_cmd(cmd); ++ /* ++ * Allow the fabric driver to unmap any resources before ++ * releasing the descriptor via TFO->release_cmd() ++ */ ++ if (remove) ++ cmd->se_tfo->aborted_task(cmd); ++ + if (transport_cmd_check_stop_to_fabric(cmd)) + return; +- if (remove) ++ if (remove && ack_kref) + transport_put_cmd(cmd); + } + +@@ -673,7 +681,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) + * Check for case where an explicit ABORT_TASK has been received + * and transport_wait_for_tasks() will be waiting for completion.. + */ +- if (cmd->transport_state & CMD_T_ABORTED && ++ if (cmd->transport_state & CMD_T_ABORTED || + cmd->transport_state & CMD_T_STOP) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + complete_all(&cmd->t_transport_stop_comp); +@@ -1746,19 +1754,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd) + return true; + } + ++static int __transport_check_aborted_status(struct se_cmd *, int); ++ + void target_execute_cmd(struct se_cmd *cmd) + { + /* +- * If the received CDB has aleady been aborted stop processing it here. +- */ +- if (transport_check_aborted_status(cmd, 1)) +- return; +- +- /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. ++ * ++ * If the received CDB has aleady been aborted stop processing it here. + */ + spin_lock_irq(&cmd->t_state_lock); ++ if (__transport_check_aborted_status(cmd, 1)) { ++ spin_unlock_irq(&cmd->t_state_lock); ++ return; ++ } + if (cmd->transport_state & CMD_T_STOP) { + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", + __func__, __LINE__, +@@ -2076,20 +2086,14 @@ static inline void transport_free_pages(struct se_cmd *cmd) + } + + /** +- * transport_release_cmd - free a command +- * @cmd: command to free ++ * transport_put_cmd - release a reference to a command ++ * @cmd: command to release + * +- * This routine unconditionally frees a command, and reference counting +- * or list removal must be done in the caller. ++ * This routine releases our reference to the command and frees it if possible. + */ +-static int transport_release_cmd(struct se_cmd *cmd) ++static int transport_put_cmd(struct se_cmd *cmd) + { + BUG_ON(!cmd->se_tfo); +- +- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) +- core_tmr_release_req(cmd->se_tmr_req); +- if (cmd->t_task_cdb != cmd->__t_task_cdb) +- kfree(cmd->t_task_cdb); + /* + * If this cmd has been setup with target_get_sess_cmd(), drop + * the kref and call ->release_cmd() in kref callback. +@@ -2097,18 +2101,6 @@ static int transport_release_cmd(struct se_cmd *cmd) + return target_put_sess_cmd(cmd->se_sess, cmd); + } + +-/** +- * transport_put_cmd - release a reference to a command +- * @cmd: command to release +- * +- * This routine releases our reference to the command and frees it if possible. +- */ +-static int transport_put_cmd(struct se_cmd *cmd) +-{ +- transport_free_pages(cmd); +- return transport_release_cmd(cmd); +-} +- + void *transport_kmap_data_sg(struct se_cmd *cmd) + { + struct scatterlist *sg = cmd->t_data_sg; +@@ -2296,34 +2288,59 @@ static void transport_write_pending_qf(struct se_cmd *cmd) + } + } + +-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) ++static bool ++__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, ++ unsigned long *flags); ++ ++static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) + { + unsigned long flags; ++ ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++} ++ ++int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) ++{ + int ret = 0; ++ bool aborted = false, tas = false; + + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { + if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) +- transport_wait_for_tasks(cmd); ++ target_wait_free_cmd(cmd, &aborted, &tas); + +- ret = transport_release_cmd(cmd); ++ if (!aborted || tas) ++ ret = transport_put_cmd(cmd); + } else { + if (wait_for_tasks) +- transport_wait_for_tasks(cmd); ++ target_wait_free_cmd(cmd, &aborted, &tas); + /* + * Handle WRITE failure case where transport_generic_new_cmd() + * has already added se_cmd to state_list, but fabric has + * failed command before I/O submission. + */ +- if (cmd->state_active) { +- spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->state_active) + target_remove_from_state_list(cmd); +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); +- } + + if (cmd->se_lun) + transport_lun_remove_cmd(cmd); + +- ret = transport_put_cmd(cmd); ++ if (!aborted || tas) ++ ret = transport_put_cmd(cmd); ++ } ++ /* ++ * If the task has been internally aborted due to TMR ABORT_TASK ++ * or LUN_RESET, target_core_tmr.c is responsible for performing ++ * the remaining calls to target_put_sess_cmd(), and not the ++ * callers of this function. ++ */ ++ if (aborted) { ++ pr_debug("Detected CMD_T_ABORTED for ITT: %u\n", ++ cmd->se_tfo->get_task_tag(cmd)); ++ wait_for_completion(&cmd->cmd_wait_comp); ++ cmd->se_tfo->release_cmd(cmd); ++ ret = 1; + } + return ret; + } +@@ -2366,24 +2383,44 @@ out: + } + EXPORT_SYMBOL(target_get_sess_cmd); + ++static void target_free_cmd_mem(struct se_cmd *cmd) ++{ ++ transport_free_pages(cmd); ++ ++ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) ++ core_tmr_release_req(cmd->se_tmr_req); ++ if (cmd->t_task_cdb != cmd->__t_task_cdb) ++ kfree(cmd->t_task_cdb); ++} ++ + static void target_release_cmd_kref(struct kref *kref) + { + struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); + struct se_session *se_sess = se_cmd->se_sess; ++ bool fabric_stop; + + if (list_empty(&se_cmd->se_cmd_list)) { + spin_unlock(&se_sess->sess_cmd_lock); ++ target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + return; + } +- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { ++ ++ spin_lock(&se_cmd->t_state_lock); ++ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP); ++ spin_unlock(&se_cmd->t_state_lock); ++ ++ if (se_cmd->cmd_wait_set || fabric_stop) { ++ list_del_init(&se_cmd->se_cmd_list); + spin_unlock(&se_sess->sess_cmd_lock); ++ target_free_cmd_mem(se_cmd); + complete(&se_cmd->cmd_wait_comp); + return; + } +- list_del(&se_cmd->se_cmd_list); ++ list_del_init(&se_cmd->se_cmd_list); + spin_unlock(&se_sess->sess_cmd_lock); + ++ target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + } + +@@ -2394,6 +2431,7 @@ static void target_release_cmd_kref(struct kref *kref) + int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) + { + if (!se_sess) { ++ target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + return 1; + } +@@ -2411,6 +2449,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) + { + struct se_cmd *se_cmd; + unsigned long flags; ++ int rc; + + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + if (se_sess->sess_tearing_down) { +@@ -2420,8 +2459,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) + se_sess->sess_tearing_down = 1; + list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); + +- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) +- se_cmd->cmd_wait_set = 1; ++ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) { ++ rc = kref_get_unless_zero(&se_cmd->cmd_kref); ++ if (rc) { ++ se_cmd->cmd_wait_set = 1; ++ spin_lock(&se_cmd->t_state_lock); ++ se_cmd->transport_state |= CMD_T_FABRIC_STOP; ++ spin_unlock(&se_cmd->t_state_lock); ++ } ++ } + + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + } +@@ -2434,15 +2480,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) + { + struct se_cmd *se_cmd, *tmp_cmd; + unsigned long flags; ++ bool tas; + + list_for_each_entry_safe(se_cmd, tmp_cmd, + &se_sess->sess_wait_list, se_cmd_list) { +- list_del(&se_cmd->se_cmd_list); ++ list_del_init(&se_cmd->se_cmd_list); + + pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" + " %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); + ++ spin_lock_irqsave(&se_cmd->t_state_lock, flags); ++ tas = (se_cmd->transport_state & CMD_T_TAS); ++ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); ++ ++ if (!target_put_sess_cmd(se_sess, se_cmd)) { ++ if (tas) ++ target_put_sess_cmd(se_sess, se_cmd); ++ } ++ + wait_for_completion(&se_cmd->cmd_wait_comp); + pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" + " fabric state: %d\n", se_cmd, se_cmd->t_state, +@@ -2485,34 +2541,38 @@ int transport_clear_lun_ref(struct se_lun *lun) + return 0; + } + +-/** +- * transport_wait_for_tasks - wait for completion to occur +- * @cmd: command to wait +- * +- * Called from frontend fabric context to wait for storage engine +- * to pause and/or release frontend generated struct se_cmd. +- */ +-bool transport_wait_for_tasks(struct se_cmd *cmd) ++static bool ++__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, ++ bool *aborted, bool *tas, unsigned long *flags) ++ __releases(&cmd->t_state_lock) ++ __acquires(&cmd->t_state_lock) + { +- unsigned long flags; + +- spin_lock_irqsave(&cmd->t_state_lock, flags); ++ assert_spin_locked(&cmd->t_state_lock); ++ WARN_ON_ONCE(!irqs_disabled()); ++ ++ if (fabric_stop) ++ cmd->transport_state |= CMD_T_FABRIC_STOP; ++ ++ if (cmd->transport_state & CMD_T_ABORTED) ++ *aborted = true; ++ ++ if (cmd->transport_state & CMD_T_TAS) ++ *tas = true; ++ + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && +- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) + return false; +- } + + if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && +- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) + return false; +- } + +- if (!(cmd->transport_state & CMD_T_ACTIVE)) { +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ if (!(cmd->transport_state & CMD_T_ACTIVE)) ++ return false; ++ ++ if (fabric_stop && *aborted) + return false; +- } + + cmd->transport_state |= CMD_T_STOP; + +@@ -2521,20 +2581,37 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) + cmd, cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); + +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ spin_unlock_irqrestore(&cmd->t_state_lock, *flags); + + wait_for_completion(&cmd->t_transport_stop_comp); + +- spin_lock_irqsave(&cmd->t_state_lock, flags); ++ spin_lock_irqsave(&cmd->t_state_lock, *flags); + cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); + + pr_debug("wait_for_tasks: Stopped wait_for_completion(" + "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", + cmd->se_tfo->get_task_tag(cmd)); + ++ return true; ++} ++ ++/** ++ * transport_wait_for_tasks - wait for completion to occur ++ * @cmd: command to wait ++ * ++ * Called from frontend fabric context to wait for storage engine ++ * to pause and/or release frontend generated struct se_cmd. ++ */ ++bool transport_wait_for_tasks(struct se_cmd *cmd) ++{ ++ unsigned long flags; ++ bool ret, aborted = false, tas = false; ++ ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + +- return true; ++ return ret; + } + EXPORT_SYMBOL(transport_wait_for_tasks); + +@@ -2820,24 +2897,51 @@ after_reason: + } + EXPORT_SYMBOL(transport_send_check_condition_and_sense); + +-int transport_check_aborted_status(struct se_cmd *cmd, int send_status) ++static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) ++ __releases(&cmd->t_state_lock) ++ __acquires(&cmd->t_state_lock) + { ++ assert_spin_locked(&cmd->t_state_lock); ++ WARN_ON_ONCE(!irqs_disabled()); ++ + if (!(cmd->transport_state & CMD_T_ABORTED)) + return 0; + +- if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) ++ /* ++ * If cmd has been aborted but either no status is to be sent or it has ++ * already been sent, just return ++ */ ++ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) { ++ if (send_status) ++ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; + return 1; ++ } + +- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", +- cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); ++ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:" ++ " 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0], ++ cmd->se_tfo->get_task_tag(cmd)); + +- cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; ++ cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; + cmd->scsi_status = SAM_STAT_TASK_ABORTED; + trace_target_cmd_complete(cmd); ++ ++ spin_unlock_irq(&cmd->t_state_lock); + cmd->se_tfo->queue_status(cmd); ++ spin_lock_irq(&cmd->t_state_lock); + + return 1; + } ++ ++int transport_check_aborted_status(struct se_cmd *cmd, int send_status) ++{ ++ int ret; ++ ++ spin_lock_irq(&cmd->t_state_lock); ++ ret = __transport_check_aborted_status(cmd, send_status); ++ spin_unlock_irq(&cmd->t_state_lock); ++ ++ return ret; ++} + EXPORT_SYMBOL(transport_check_aborted_status); + + void transport_send_task_abort(struct se_cmd *cmd) +@@ -2845,7 +2949,7 @@ void transport_send_task_abort(struct se_cmd *cmd) + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); +- if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { ++ if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } +@@ -2859,11 +2963,17 @@ void transport_send_task_abort(struct se_cmd *cmd) + */ + if (cmd->data_direction == DMA_TO_DEVICE) { + if (cmd->se_tfo->write_pending_status(cmd) != 0) { +- cmd->transport_state |= CMD_T_ABORTED; +- smp_mb__after_atomic_inc(); ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) { ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ goto send_abort; ++ } ++ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } + } ++send_abort: + cmd->scsi_status = SAM_STAT_TASK_ABORTED; + + transport_lun_remove_cmd(cmd); +@@ -2881,8 +2991,17 @@ static void target_tmr_work(struct work_struct *work) + struct se_cmd *cmd = container_of(work, struct se_cmd, work); + struct se_device *dev = cmd->se_dev; + struct se_tmr_req *tmr = cmd->se_tmr_req; ++ unsigned long flags; + int ret; + ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->transport_state & CMD_T_ABORTED) { ++ tmr->response = TMR_FUNCTION_REJECTED; ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ goto check_stop; ++ } ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ + switch (tmr->function) { + case TMR_ABORT_TASK: + core_tmr_abort_task(dev, tmr, cmd->se_sess); +@@ -2910,9 +3029,17 @@ static void target_tmr_work(struct work_struct *work) + break; + } + ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->transport_state & CMD_T_ABORTED) { ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ goto check_stop; ++ } + cmd->t_state = TRANSPORT_ISTATE_PROCESSING; ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ + cmd->se_tfo->queue_tm_rsp(cmd); + ++check_stop: + transport_cmd_check_stop_to_fabric(cmd); + } + +diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h +index 752863acecb8..4f4b97161228 100644 +--- a/drivers/target/tcm_fc/tcm_fc.h ++++ b/drivers/target/tcm_fc/tcm_fc.h +@@ -163,6 +163,7 @@ int ft_write_pending_status(struct se_cmd *); + u32 ft_get_task_tag(struct se_cmd *); + int ft_get_cmd_state(struct se_cmd *); + void ft_queue_tm_resp(struct se_cmd *); ++void ft_aborted_task(struct se_cmd *); + + /* + * other internal functions. +diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c +index d22cdc77e9d4..f5fd515b2bee 100644 +--- a/drivers/target/tcm_fc/tfc_cmd.c ++++ b/drivers/target/tcm_fc/tfc_cmd.c +@@ -426,6 +426,11 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd) + ft_send_resp_code(cmd, code); + } + ++void ft_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static void ft_send_work(struct work_struct *work); + + /* +diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c +index e879da81ad93..b8b5a719a784 100644 +--- a/drivers/target/tcm_fc/tfc_conf.c ++++ b/drivers/target/tcm_fc/tfc_conf.c +@@ -536,6 +536,7 @@ static struct target_core_fabric_ops ft_fabric_ops = { + .queue_data_in = ft_queue_data_in, + .queue_status = ft_queue_status, + .queue_tm_rsp = ft_queue_tm_resp, ++ .aborted_task = ft_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c +diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c +index 39bd7ec8bf75..f66a14b6fc73 100644 +--- a/drivers/usb/chipidea/otg.c ++++ b/drivers/usb/chipidea/otg.c +@@ -96,7 +96,7 @@ static void ci_otg_work(struct work_struct *work) + int ci_hdrc_otg_init(struct ci_hdrc *ci) + { + INIT_WORK(&ci->work, ci_otg_work); +- ci->wq = create_singlethread_workqueue("ci_otg"); ++ ci->wq = create_freezable_workqueue("ci_otg"); + if (!ci->wq) { + dev_err(ci->dev, "can't create workqueue\n"); + return -ENODEV; +diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c +index 460c266b8e24..cdec2492ff40 100644 +--- a/drivers/usb/gadget/tcm_usb_gadget.c ++++ b/drivers/usb/gadget/tcm_usb_gadget.c +@@ -1471,6 +1471,11 @@ static void usbg_queue_tm_rsp(struct se_cmd *se_cmd) + { + } + ++static void usbg_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static const char *usbg_check_wwn(const char *name) + { + const char *n; +@@ -1897,6 +1902,7 @@ static struct target_core_fabric_ops usbg_ops = { + .queue_data_in = usbg_send_read_response, + .queue_status = usbg_send_status_response, + .queue_tm_rsp = usbg_queue_tm_rsp, ++ .aborted_task = usbg_aborted_task, + .check_stop_free = usbg_check_stop_free, + + .fabric_make_wwn = usbg_make_tport, +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 21bf168981f9..922723edd6b0 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -164,6 +164,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ + { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ + { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ ++ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 9bab34cf01d4..24366a2afea6 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -271,6 +271,7 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_UE910_V2 0x1012 + #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 + #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 ++#define TELIT_PRODUCT_LE922_USBCFG5 0x1045 + #define TELIT_PRODUCT_LE920 0x1200 + #define TELIT_PRODUCT_LE910 0x1201 + +@@ -1140,6 +1141,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), +@@ -1191,6 +1194,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c +index 6aeea1936aea..8f48f2bf34d4 100644 +--- a/drivers/vhost/scsi.c ++++ b/drivers/vhost/scsi.c +@@ -539,6 +539,11 @@ static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) + return; + } + ++static void tcm_vhost_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) + { + vs->vs_events_nr--; +@@ -2173,6 +2178,7 @@ static struct target_core_fabric_ops tcm_vhost_ops = { + .queue_data_in = tcm_vhost_queue_data_in, + .queue_status = tcm_vhost_queue_status, + .queue_tm_rsp = tcm_vhost_queue_tm_rsp, ++ .aborted_task = tcm_vhost_aborted_task, + /* + * Setup callers for generic logic in target_core_fabric_configfs.c + */ +diff --git a/fs/bio.c b/fs/bio.c +index b2b1451912b5..6d8cf434bf94 100644 +--- a/fs/bio.c ++++ b/fs/bio.c +@@ -1096,9 +1096,12 @@ int bio_uncopy_user(struct bio *bio) + ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, + bio_data_dir(bio) == READ, + 0, bmd->is_our_pages); +- else if (bmd->is_our_pages) +- bio_for_each_segment_all(bvec, bio, i) +- __free_page(bvec->bv_page); ++ else { ++ ret = -EINTR; ++ if (bmd->is_our_pages) ++ bio_for_each_segment_all(bvec, bio, i) ++ __free_page(bvec->bv_page); ++ } + } + kfree(bmd); + bio_put(bio); +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index f3264bd7a83d..3f709ab0223b 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -1399,11 +1399,10 @@ openRetry: + * current bigbuf. + */ + static int +-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++discard_remaining_data(struct TCP_Server_Info *server) + { + unsigned int rfclen = get_rfc1002_length(server->smallbuf); + int remaining = rfclen + 4 - server->total_read; +- struct cifs_readdata *rdata = mid->callback_data; + + while (remaining > 0) { + int length; +@@ -1417,10 +1416,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) + remaining -= length; + } + +- dequeue_mid(mid, rdata->result); + return 0; + } + ++static int ++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++{ ++ int length; ++ struct cifs_readdata *rdata = mid->callback_data; ++ ++ length = discard_remaining_data(server); ++ dequeue_mid(mid, rdata->result); ++ return length; ++} ++ + int + cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + { +@@ -1449,6 +1458,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + return length; + server->total_read += length; + ++ if (server->ops->is_status_pending && ++ server->ops->is_status_pending(buf, server, 0)) { ++ discard_remaining_data(server); ++ return -1; ++ } ++ + /* Was the SMB read successful? */ + rdata->result = server->ops->map_error(buf, false); + if (rdata->result != 0) { +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 348792911e1f..ae375dff03da 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -1004,21 +1004,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp, + { + char *data_offset; + struct create_context *cc; +- unsigned int next = 0; ++ unsigned int next; ++ unsigned int remaining; + char *name; + + data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset); ++ remaining = le32_to_cpu(rsp->CreateContextsLength); + cc = (struct create_context *)data_offset; +- do { +- cc = (struct create_context *)((char *)cc + next); ++ while (remaining >= sizeof(struct create_context)) { + name = le16_to_cpu(cc->NameOffset) + (char *)cc; +- if (le16_to_cpu(cc->NameLength) != 4 || +- strncmp(name, "RqLs", 4)) { +- next = le32_to_cpu(cc->Next); +- continue; +- } +- return server->ops->parse_lease_buf(cc, epoch); +- } while (next != 0); ++ if (le16_to_cpu(cc->NameLength) == 4 && ++ strncmp(name, "RqLs", 4) == 0) ++ return server->ops->parse_lease_buf(cc, epoch); ++ ++ next = le32_to_cpu(cc->Next); ++ if (!next) ++ break; ++ remaining -= next; ++ cc = (struct create_context *)((char *)cc + next); ++ } + + return 0; + } +diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking +index 3ea36554107f..8918ac905a3b 100644 +--- a/fs/jffs2/README.Locking ++++ b/fs/jffs2/README.Locking +@@ -2,10 +2,6 @@ + JFFS2 LOCKING DOCUMENTATION + --------------------------- + +-At least theoretically, JFFS2 does not require the Big Kernel Lock +-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS +-code. It has its own locking, as described below. +- + This document attempts to describe the existing locking rules for + JFFS2. It is not expected to remain perfectly up to date, but ought to + be fairly close. +@@ -69,6 +65,7 @@ Ordering constraints: + any f->sem held. + 2. Never attempt to lock two file mutexes in one thread. + No ordering rules have been made for doing so. ++ 3. Never lock a page cache page with f->sem held. + + + erase_completion_lock spinlock +diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c +index a3750f902adc..c1f04947d7dc 100644 +--- a/fs/jffs2/build.c ++++ b/fs/jffs2/build.c +@@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) + + + static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, +- struct jffs2_inode_cache *ic) ++ struct jffs2_inode_cache *ic, ++ int *dir_hardlinks) + { + struct jffs2_full_dirent *fd; + +@@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", + fd->name, fd->ino, ic->ino); + jffs2_mark_node_obsolete(c, fd->raw); ++ /* Clear the ic/raw union so it doesn't cause problems later. */ ++ fd->ic = NULL; + continue; + } + ++ /* From this point, fd->raw is no longer used so we can set fd->ic */ ++ fd->ic = child_ic; ++ child_ic->pino_nlink++; ++ /* If we appear (at this stage) to have hard-linked directories, ++ * set a flag to trigger a scan later */ + if (fd->type == DT_DIR) { +- if (child_ic->pino_nlink) { +- JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", +- fd->name, fd->ino, ic->ino); +- /* TODO: What do we do about it? */ +- } else { +- child_ic->pino_nlink = ic->ino; +- } +- } else +- child_ic->pino_nlink++; ++ child_ic->flags |= INO_FLAGS_IS_DIR; ++ if (child_ic->pino_nlink > 1) ++ *dir_hardlinks = 1; ++ } + + dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); + /* Can't free scan_dents so far. We might need them in pass 2 */ +@@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + */ + static int jffs2_build_filesystem(struct jffs2_sb_info *c) + { +- int ret; +- int i; ++ int ret, i, dir_hardlinks = 0; + struct jffs2_inode_cache *ic; + struct jffs2_full_dirent *fd; + struct jffs2_full_dirent *dead_fds = NULL; +@@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + /* Now scan the directory tree, increasing nlink according to every dirent found. */ + for_each_inode(i, c, ic) { + if (ic->scan_dents) { +- jffs2_build_inode_pass1(c, ic); ++ jffs2_build_inode_pass1(c, ic, &dir_hardlinks); + cond_resched(); + } + } +@@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + } + + dbg_fsbuild("pass 2a complete\n"); ++ ++ if (dir_hardlinks) { ++ /* If we detected directory hardlinks earlier, *hopefully* ++ * they are gone now because some of the links were from ++ * dead directories which still had some old dirents lying ++ * around and not yet garbage-collected, but which have ++ * been discarded above. So clear the pino_nlink field ++ * in each directory, so that the final scan below can ++ * print appropriate warnings. */ ++ for_each_inode(i, c, ic) { ++ if (ic->flags & INO_FLAGS_IS_DIR) ++ ic->pino_nlink = 0; ++ } ++ } + dbg_fsbuild("freeing temporary data structures\n"); + + /* Finally, we can scan again and free the dirent structs */ +@@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + while(ic->scan_dents) { + fd = ic->scan_dents; + ic->scan_dents = fd->next; ++ /* We do use the pino_nlink field to count nlink of ++ * directories during fs build, so set it to the ++ * parent ino# now. Now that there's hopefully only ++ * one. */ ++ if (fd->type == DT_DIR) { ++ if (!fd->ic) { ++ /* We'll have complained about it and marked the coresponding ++ raw node obsolete already. Just skip it. */ ++ continue; ++ } ++ ++ /* We *have* to have set this in jffs2_build_inode_pass1() */ ++ BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR)); ++ ++ /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks ++ * is set. Otherwise, we know this should never trigger anyway, so ++ * we don't do the check. And ic->pino_nlink still contains the nlink ++ * value (which is 1). */ ++ if (dir_hardlinks && fd->ic->pino_nlink) { ++ JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n", ++ fd->name, fd->ino, ic->ino, fd->ic->pino_nlink); ++ /* Should we unlink it from its previous parent? */ ++ } ++ ++ /* For directories, ic->pino_nlink holds that parent inode # */ ++ fd->ic->pino_nlink = ic->ino; ++ } + jffs2_free_full_dirent(fd); + } + ic->scan_dents = NULL; +@@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, + + /* Reduce nlink of the child. If it's now zero, stick it on the + dead_fds list to be cleaned up later. Else just free the fd */ +- +- if (fd->type == DT_DIR) +- child_ic->pino_nlink = 0; +- else +- child_ic->pino_nlink--; ++ child_ic->pino_nlink--; + + if (!child_ic->pino_nlink) { + dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", +diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c +index 256cd19a3b78..d2570ae2e998 100644 +--- a/fs/jffs2/file.c ++++ b/fs/jffs2/file.c +@@ -139,39 +139,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + struct page *pg; + struct inode *inode = mapping->host; + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); +- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); +- struct jffs2_raw_inode ri; +- uint32_t alloc_len = 0; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + uint32_t pageofs = index << PAGE_CACHE_SHIFT; + int ret = 0; + +- jffs2_dbg(1, "%s()\n", __func__); +- +- if (pageofs > inode->i_size) { +- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, +- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); +- if (ret) +- return ret; +- } +- +- mutex_lock(&f->sem); + pg = grab_cache_page_write_begin(mapping, index, flags); +- if (!pg) { +- if (alloc_len) +- jffs2_complete_reservation(c); +- mutex_unlock(&f->sem); ++ if (!pg) + return -ENOMEM; +- } + *pagep = pg; + +- if (alloc_len) { ++ jffs2_dbg(1, "%s()\n", __func__); ++ ++ if (pageofs > inode->i_size) { + /* Make new hole frag from old EOF to new page */ ++ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); ++ struct jffs2_raw_inode ri; + struct jffs2_full_dnode *fn; ++ uint32_t alloc_len; + + jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", + (unsigned int)inode->i_size, pageofs); + ++ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, ++ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); ++ if (ret) ++ goto out_page; ++ ++ mutex_lock(&f->sem); + memset(&ri, 0, sizeof(ri)); + + ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); +@@ -198,6 +192,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + if (IS_ERR(fn)) { + ret = PTR_ERR(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + ret = jffs2_add_full_dnode_to_inode(c, f, fn); +@@ -212,10 +207,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + jffs2_mark_node_obsolete(c, fn->raw); + jffs2_free_full_dnode(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + jffs2_complete_reservation(c); + inode->i_size = pageofs; ++ mutex_unlock(&f->sem); + } + + /* +@@ -224,18 +221,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + * case of a short-copy. + */ + if (!PageUptodate(pg)) { ++ mutex_lock(&f->sem); + ret = jffs2_do_readpage_nolock(inode, pg); ++ mutex_unlock(&f->sem); + if (ret) + goto out_page; + } +- mutex_unlock(&f->sem); + jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); + return ret; + + out_page: + unlock_page(pg); + page_cache_release(pg); +- mutex_unlock(&f->sem); + return ret; + } + +diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c +index 5a2dec2b064c..95d5880a63ee 100644 +--- a/fs/jffs2/gc.c ++++ b/fs/jffs2/gc.c +@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era + BUG_ON(start > orig_start); + } + +- /* First, use readpage() to read the appropriate page into the page cache */ +- /* Q: What happens if we actually try to GC the _same_ page for which commit_write() +- * triggered garbage collection in the first place? +- * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the +- * page OK. We'll actually write it out again in commit_write, which is a little +- * suboptimal, but at least we're correct. +- */ ++ /* The rules state that we must obtain the page lock *before* f->sem, so ++ * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's ++ * actually going to *change* so we're safe; we only allow reading. ++ * ++ * It is important to note that jffs2_write_begin() will ensure that its ++ * page is marked Uptodate before allocating space. That means that if we ++ * end up here trying to GC the *same* page that jffs2_write_begin() is ++ * trying to write out, read_cache_page() will not deadlock. */ ++ mutex_unlock(&f->sem); + pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); ++ mutex_lock(&f->sem); + + if (IS_ERR(pg_ptr)) { + pr_warn("read_cache_page() returned error: %ld\n", +diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h +index fa35ff79ab35..0637271f3770 100644 +--- a/fs/jffs2/nodelist.h ++++ b/fs/jffs2/nodelist.h +@@ -194,6 +194,7 @@ struct jffs2_inode_cache { + #define INO_STATE_CLEARING 6 /* In clear_inode() */ + + #define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */ ++#define INO_FLAGS_IS_DIR 0x02 /* is a directory */ + + #define RAWNODE_CLASS_INODE_CACHE 0 + #define RAWNODE_CLASS_XATTR_DATUM 1 +@@ -249,7 +250,10 @@ struct jffs2_readinode_info + + struct jffs2_full_dirent + { +- struct jffs2_raw_node_ref *raw; ++ union { ++ struct jffs2_raw_node_ref *raw; ++ struct jffs2_inode_cache *ic; /* Just during part of build */ ++ }; + struct jffs2_full_dirent *next; + uint32_t version; + uint32_t ino; /* == zero for unlink */ +diff --git a/fs/locks.c b/fs/locks.c +index 2c61c4e9368c..f3197830f07e 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -2007,7 +2007,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, + goto out; + } + +-again: + error = flock_to_posix_lock(filp, file_lock, &flock); + if (error) + goto out; +@@ -2038,19 +2037,22 @@ again: + * Attempt to detect a close/fcntl race and recover by + * releasing the lock that was just acquired. + */ +- /* +- * we need that spin_lock here - it prevents reordering between +- * update of inode->i_flock and check for it done in close(). +- * rcu_read_lock() wouldn't do. +- */ +- spin_lock(¤t->files->file_lock); +- f = fcheck(fd); +- spin_unlock(¤t->files->file_lock); +- if (!error && f != filp && flock.l_type != F_UNLCK) { +- flock.l_type = F_UNLCK; +- goto again; ++ if (!error && file_lock->fl_type != F_UNLCK) { ++ /* ++ * We need that spin_lock here - it prevents reordering between ++ * update of inode->i_flock and check for it done in ++ * close(). rcu_read_lock() wouldn't do. ++ */ ++ spin_lock(¤t->files->file_lock); ++ f = fcheck(fd); ++ spin_unlock(¤t->files->file_lock); ++ if (f != filp) { ++ file_lock->fl_type = F_UNLCK; ++ error = do_lock_file_wait(filp, cmd, file_lock); ++ WARN_ON_ONCE(error); ++ error = -EBADF; ++ } + } +- + out: + locks_free_lock(file_lock); + return error; +@@ -2125,7 +2127,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, + goto out; + } + +-again: + error = flock64_to_posix_lock(filp, file_lock, &flock); + if (error) + goto out; +@@ -2156,14 +2157,22 @@ again: + * Attempt to detect a close/fcntl race and recover by + * releasing the lock that was just acquired. + */ +- spin_lock(¤t->files->file_lock); +- f = fcheck(fd); +- spin_unlock(¤t->files->file_lock); +- if (!error && f != filp && flock.l_type != F_UNLCK) { +- flock.l_type = F_UNLCK; +- goto again; ++ if (!error && file_lock->fl_type != F_UNLCK) { ++ /* ++ * We need that spin_lock here - it prevents reordering between ++ * update of inode->i_flock and check for it done in ++ * close(). rcu_read_lock() wouldn't do. ++ */ ++ spin_lock(¤t->files->file_lock); ++ f = fcheck(fd); ++ spin_unlock(¤t->files->file_lock); ++ if (f != filp) { ++ file_lock->fl_type = F_UNLCK; ++ error = do_lock_file_wait(filp, cmd, file_lock); ++ WARN_ON_ONCE(error); ++ error = -EBADF; ++ } + } +- + out: + locks_free_lock(file_lock); + return error; +diff --git a/include/linux/ata.h b/include/linux/ata.h +index f2f4d8da97c0..f7ff6554a354 100644 +--- a/include/linux/ata.h ++++ b/include/linux/ata.h +@@ -484,8 +484,8 @@ enum ata_tf_protocols { + }; + + enum ata_ioctls { +- ATA_IOC_GET_IO32 = 0x309, +- ATA_IOC_SET_IO32 = 0x324, ++ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */ ++ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */ + }; + + /* core structures */ +diff --git a/include/linux/libata.h b/include/linux/libata.h +index 189c9ff97b29..a445209be917 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -712,7 +712,7 @@ struct ata_device { + union { + u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ + u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ +- }; ++ } ____cacheline_aligned; + + /* DEVSLP Timing Variables from Identify Device Data Log */ + u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; +diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h +index d1fb912740f3..0d7a62fea895 100644 +--- a/include/target/iscsi/iscsi_transport.h ++++ b/include/target/iscsi/iscsi_transport.h +@@ -21,6 +21,7 @@ struct iscsit_transport { + int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool); + int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *); + int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *); ++ void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *); + }; + + static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd) +diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h +index f5915b39386a..522ae25a61a7 100644 +--- a/include/target/target_core_backend.h ++++ b/include/target/target_core_backend.h +@@ -94,4 +94,8 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, + + void array_free(void *array, int n); + ++sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); ++bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, ++ struct request_queue *q, int block_size); ++ + #endif /* TARGET_CORE_BACKEND_H */ +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 42606764d830..3beb2fed86aa 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -162,7 +162,7 @@ enum se_cmd_flags_table { + SCF_SENT_CHECK_CONDITION = 0x00000800, + SCF_OVERFLOW_BIT = 0x00001000, + SCF_UNDERFLOW_BIT = 0x00002000, +- SCF_SENT_DELAYED_TAS = 0x00004000, ++ SCF_SEND_DELAYED_TAS = 0x00004000, + SCF_ALUA_NON_OPTIMIZED = 0x00008000, + SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, + SCF_ACK_KREF = 0x00040000, +@@ -528,6 +528,8 @@ struct se_cmd { + #define CMD_T_DEV_ACTIVE (1 << 7) + #define CMD_T_REQUEST_STOP (1 << 8) + #define CMD_T_BUSY (1 << 9) ++#define CMD_T_TAS (1 << 10) ++#define CMD_T_FABRIC_STOP (1 << 11) + spinlock_t t_state_lock; + struct completion t_transport_stop_comp; + +diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h +index 0218d689b3d7..1d1043644b9b 100644 +--- a/include/target/target_core_fabric.h ++++ b/include/target/target_core_fabric.h +@@ -62,6 +62,7 @@ struct target_core_fabric_ops { + int (*queue_data_in)(struct se_cmd *); + int (*queue_status)(struct se_cmd *); + void (*queue_tm_rsp)(struct se_cmd *); ++ void (*aborted_task)(struct se_cmd *); + /* + * fabric module calls for target_core_fabric_configfs.c + */ +diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c +index 2bb95a7a8809..c14565bde887 100644 +--- a/sound/core/control_compat.c ++++ b/sound/core/control_compat.c +@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 { + unsigned char reserved[128]; + }; + ++#ifdef CONFIG_X86_X32 ++/* x32 has a different alignment for 64bit values from ia32 */ ++struct snd_ctl_elem_value_x32 { ++ struct snd_ctl_elem_id id; ++ unsigned int indirect; /* bit-field causes misalignment */ ++ union { ++ s32 integer[128]; ++ unsigned char data[512]; ++ s64 integer64[64]; ++ } value; ++ unsigned char reserved[128]; ++}; ++#endif /* CONFIG_X86_X32 */ + + /* get the value type and count of the control */ + static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id, +@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count) + + static int copy_ctl_value_from_user(struct snd_card *card, + struct snd_ctl_elem_value *data, +- struct snd_ctl_elem_value32 __user *data32, ++ void __user *userdata, ++ void __user *valuep, + int *typep, int *countp) + { ++ struct snd_ctl_elem_value32 __user *data32 = userdata; + int i, type, size; + int uninitialized_var(count); + unsigned int indirect; +@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; +- if (get_user(val, &data32->value.integer[i])) ++ if (get_user(val, &intp[i])) + return -EFAULT; + data->value.integer.value[i] = val; + } +@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card, + printk(KERN_ERR "snd_ioctl32_ctl_elem_value: unknown type %d\n", type); + return -EINVAL; + } +- if (copy_from_user(data->value.bytes.data, +- data32->value.data, size)) ++ if (copy_from_user(data->value.bytes.data, valuep, size)) + return -EFAULT; + } + +@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card, + } + + /* restore the value to 32bit */ +-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, ++static int copy_ctl_value_to_user(void __user *userdata, ++ void __user *valuep, + struct snd_ctl_elem_value *data, + int type, int count) + { +@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; + val = data->value.integer.value[i]; +- if (put_user(val, &data32->value.integer[i])) ++ if (put_user(val, &intp[i])) + return -EFAULT; + } + } else { + size = get_elem_size(type, count); +- if (copy_to_user(data32->value.data, +- data->value.bytes.data, size)) ++ if (copy_to_user(valuep, data->value.bytes.data, size)) + return -EFAULT; + } + return 0; + } + +-static int snd_ctl_elem_read_user_compat(struct snd_card *card, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_read_user(struct snd_card *card, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + int err, type, count; +@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + err = snd_ctl_elem_read(card, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + +-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_write_user(struct snd_ctl_file *file, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + struct snd_card *card = file->card; +@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + err = snd_ctl_elem_write(card, file, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + ++static int snd_ctl_elem_read_user_compat(struct snd_card *card, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++ ++#ifdef CONFIG_X86_X32 ++static int snd_ctl_elem_read_user_x32(struct snd_card *card, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++#endif /* CONFIG_X86_X32 */ ++ + /* add or replace a user control */ + static int snd_ctl_elem_add_compat(struct snd_ctl_file *file, + struct snd_ctl_elem_info32 __user *data32, +@@ -393,6 +441,10 @@ enum { + SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32), + SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32), + SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32), ++ SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_ctl_elem_add_compat(ctl, argp, 0); + case SNDRV_CTL_IOCTL_ELEM_REPLACE32: + return snd_ctl_elem_add_compat(ctl, argp, 1); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_CTL_IOCTL_ELEM_READ_X32: ++ return snd_ctl_elem_read_user_x32(ctl->card, argp); ++ case SNDRV_CTL_IOCTL_ELEM_WRITE_X32: ++ return snd_ctl_elem_write_user_x32(ctl, argp); ++#endif /* CONFIG_X86_X32 */ + } + + down_read(&snd_ioctl_rwsem); +diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c +index 5268c1f58c25..09a89094dcf7 100644 +--- a/sound/core/rawmidi_compat.c ++++ b/sound/core/rawmidi_compat.c +@@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has 64bit timespec and 64bit alignment */ ++struct snd_rawmidi_status_x32 { ++ s32 stream; ++ u32 rsvd; /* alignment */ ++ struct timespec tstamp; ++ u32 avail; ++ u32 xruns; ++ unsigned char reserved[16]; ++} __attribute__((packed)); ++ ++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst)) ++ ++static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile, ++ struct snd_rawmidi_status_x32 __user *src) ++{ ++ int err; ++ struct snd_rawmidi_status status; ++ ++ if (rfile->output == NULL) ++ return -EINVAL; ++ if (get_user(status.stream, &src->stream)) ++ return -EFAULT; ++ ++ switch (status.stream) { ++ case SNDRV_RAWMIDI_STREAM_OUTPUT: ++ err = snd_rawmidi_output_status(rfile->output, &status); ++ break; ++ case SNDRV_RAWMIDI_STREAM_INPUT: ++ err = snd_rawmidi_input_status(rfile->input, &status); ++ break; ++ default: ++ return -EINVAL; ++ } ++ if (err < 0) ++ return err; ++ ++ if (put_timespec(&status.tstamp, &src->tstamp) || ++ put_user(status.avail, &src->avail) || ++ put_user(status.xruns, &src->xruns)) ++ return -EFAULT; ++ ++ return 0; ++} ++#endif /* CONFIG_X86_X32 */ ++ + enum { + SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32), + SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign + return snd_rawmidi_ioctl_params_compat(rfile, argp); + case SNDRV_RAWMIDI_IOCTL_STATUS32: + return snd_rawmidi_ioctl_status_compat(rfile, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_RAWMIDI_IOCTL_STATUS_X32: ++ return snd_rawmidi_ioctl_status_x32(rfile, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c +index 8d4d5e853efe..ab774954c985 100644 +--- a/sound/core/seq/oss/seq_oss.c ++++ b/sound/core/seq/oss/seq_oss.c +@@ -150,8 +150,6 @@ odev_release(struct inode *inode, struct file *file) + if ((dp = file->private_data) == NULL) + return 0; + +- snd_seq_oss_drain_write(dp); +- + mutex_lock(®ister_mutex); + snd_seq_oss_release(dp); + mutex_unlock(®ister_mutex); +diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h +index c0154a959d55..2464112b08ad 100644 +--- a/sound/core/seq/oss/seq_oss_device.h ++++ b/sound/core/seq/oss/seq_oss_device.h +@@ -131,7 +131,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co + unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait); + + void snd_seq_oss_reset(struct seq_oss_devinfo *dp); +-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp); + + /* */ + void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time); +diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c +index b3f39b5ed742..f9e09e458227 100644 +--- a/sound/core/seq/oss/seq_oss_init.c ++++ b/sound/core/seq/oss/seq_oss_init.c +@@ -457,23 +457,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp) + + + /* +- * Wait until the queue is empty (if we don't have nonblock) +- */ +-void +-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp) +-{ +- if (! dp->timer->running) +- return; +- if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) && +- dp->writeq) { +- debug_printk(("syncing..\n")); +- while (snd_seq_oss_writeq_sync(dp->writeq)) +- ; +- } +-} +- +- +-/* + * reset sequencer devices + */ + void +diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c +index e05802ae6e1b..2e908225d754 100644 +--- a/sound/core/timer_compat.c ++++ b/sound/core/timer_compat.c +@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file, + struct snd_timer_status32 __user *_status) + { + struct snd_timer_user *tu; +- struct snd_timer_status status; ++ struct snd_timer_status32 status; + + tu = file->private_data; + if (snd_BUG_ON(!tu->timeri)) + return -ENXIO; + memset(&status, 0, sizeof(status)); +- status.tstamp = tu->tstamp; ++ status.tstamp.tv_sec = tu->tstamp.tv_sec; ++ status.tstamp.tv_nsec = tu->tstamp.tv_nsec; + status.resolution = snd_timer_resolution(tu->timeri); + status.lost = tu->timeri->lost; + status.overrun = tu->overrun; +@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has the same struct as x86-64 */ ++#define snd_timer_user_status_x32(file, s) \ ++ snd_timer_user_status(file, s) ++#endif /* CONFIG_X86_X32 */ ++ + /* + */ + + enum { + SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32), + SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_timer_user_info_compat(file, argp); + case SNDRV_TIMER_IOCTL_STATUS32: + return snd_timer_user_status_compat(file, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_TIMER_IOCTL_STATUS_X32: ++ return snd_timer_user_status_x32(file, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c +index bd90c80bb494..4bc0d66377d6 100644 +--- a/sound/pci/rme9652/hdsp.c ++++ b/sound/pci/rme9652/hdsp.c +@@ -2917,7 +2917,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + { + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp); ++ ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp); + return 0; + } + +@@ -2929,7 +2929,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + + if (!snd_hdsp_use_is_exclusive(hdsp)) + return -EBUSY; +- val = ucontrol->value.enumerated.item[0]; ++ val = ucontrol->value.integer.value[0]; + spin_lock_irq(&hdsp->lock); + if (val != hdsp_dds_offset(hdsp)) + change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0; +diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c +index 21167503a3f9..dec173081798 100644 +--- a/sound/pci/rme9652/hdspm.c ++++ b/sound/pci/rme9652/hdspm.c +@@ -1602,6 +1602,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate) + { + u64 n; + ++ if (snd_BUG_ON(rate <= 0)) ++ return; ++ + if (rate >= 112000) + rate /= 4; + else if (rate >= 56000) +@@ -2224,6 +2227,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm) + } else { + /* slave mode, return external sample rate */ + rate = hdspm_external_sample_rate(hdspm); ++ if (!rate) ++ rate = hdspm->system_sample_rate; + } + } + +@@ -2269,8 +2274,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol, + ucontrol) + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); ++ int rate = ucontrol->value.integer.value[0]; + +- hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]); ++ if (rate < 27000 || rate > 207000) ++ return -EINVAL; ++ hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]); + return 0; + } + +@@ -4469,7 +4477,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdspm->tco->term; ++ ucontrol->value.integer.value[0] = hdspm->tco->term; + + return 0; + } +@@ -4480,8 +4488,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) { +- hdspm->tco->term = ucontrol->value.enumerated.item[0]; ++ if (hdspm->tco->term != ucontrol->value.integer.value[0]) { ++ hdspm->tco->term = ucontrol->value.integer.value[0]; + + hdspm_tco_write(hdspm); + diff --git a/patch/kernel/cubox-default/patch-3.14.64-65.patch b/patch/kernel/cubox-default/patch-3.14.64-65.patch new file mode 100644 index 000000000..73548c75e --- /dev/null +++ b/patch/kernel/cubox-default/patch-3.14.64-65.patch @@ -0,0 +1,1518 @@ +diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt +index c477af086e65..686a64bba775 100644 +--- a/Documentation/filesystems/efivarfs.txt ++++ b/Documentation/filesystems/efivarfs.txt +@@ -14,3 +14,10 @@ filesystem. + efivarfs is typically mounted like this, + + mount -t efivarfs none /sys/firmware/efi/efivars ++ ++Due to the presence of numerous firmware bugs where removing non-standard ++UEFI variables causes the system firmware to fail to POST, efivarfs ++files that are not well-known standardized variables are created ++as immutable files. This doesn't prevent removal - "chattr -i" will work - ++but it does prevent this kind of failure from being accomplished ++accidentally. +diff --git a/Makefile b/Makefile +index de41fa82652f..a19c22a77728 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 64 ++SUBLEVEL = 65 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c +index 12664c130d73..78a859ec4946 100644 +--- a/arch/powerpc/kernel/module_64.c ++++ b/arch/powerpc/kernel/module_64.c +@@ -202,7 +202,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) + if (syms[i].st_shndx == SHN_UNDEF) { + char *name = strtab + syms[i].st_name; + if (name[0] == '.') +- memmove(name, name+1, strlen(name)); ++ syms[i].st_name++; + } + } + } +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +index b3bad672e5d9..87fa70f8472a 100644 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +@@ -1148,6 +1148,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) + std r6, VCPU_ACOP(r9) + stw r7, VCPU_GUEST_PID(r9) + std r8, VCPU_WORT(r9) ++ /* ++ * Restore various registers to 0, where non-zero values ++ * set by the guest could disrupt the host. ++ */ ++ li r0, 0 ++ mtspr SPRN_IAMR, r0 ++ mtspr SPRN_CIABR, r0 ++ mtspr SPRN_DAWRX, r0 ++ mtspr SPRN_TCSCR, r0 ++ mtspr SPRN_WORT, r0 ++ /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ ++ li r0, 1 ++ sldi r0, r0, 31 ++ mtspr SPRN_MMCRS, r0 + 8: + + /* Save and reset AMR and UAMOR before turning on the MMU */ +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 80c22a3ca688..b6fc5fc64cef 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1555,6 +1555,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, + return; + } + break; ++ case MSR_IA32_PEBS_ENABLE: ++ /* PEBS needs a quiescent period after being disabled (to write ++ * a record). Disabling PEBS through VMX MSR swapping doesn't ++ * provide that period, so a CPU could write host's record into ++ * guest's memory. ++ */ ++ wrmsrl(MSR_IA32_PEBS_ENABLE, 0); + } + + for (i = 0; i < m->nr; ++i) +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 1777f89875fb..8db66424be97 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1991,6 +1991,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu) + + static void record_steal_time(struct kvm_vcpu *vcpu) + { ++ accumulate_steal_time(vcpu); ++ + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) + return; + +@@ -2123,12 +2125,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + if (!(data & KVM_MSR_ENABLED)) + break; + +- vcpu->arch.st.last_steal = current->sched_info.run_delay; +- +- preempt_disable(); +- accumulate_steal_time(vcpu); +- preempt_enable(); +- + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + + break; +@@ -2818,7 +2814,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + vcpu->cpu = cpu; + } + +- accumulate_steal_time(vcpu); + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + } + +diff --git a/crypto/af_alg.c b/crypto/af_alg.c +index 1971f3ccb09a..eeb2fb239934 100644 +--- a/crypto/af_alg.c ++++ b/crypto/af_alg.c +@@ -125,23 +125,6 @@ int af_alg_release(struct socket *sock) + } + EXPORT_SYMBOL_GPL(af_alg_release); + +-void af_alg_release_parent(struct sock *sk) +-{ +- struct alg_sock *ask = alg_sk(sk); +- bool last; +- +- sk = ask->parent; +- ask = alg_sk(sk); +- +- lock_sock(sk); +- last = !--ask->refcnt; +- release_sock(sk); +- +- if (last) +- sock_put(sk); +-} +-EXPORT_SYMBOL_GPL(af_alg_release_parent); +- + static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + { + struct sock *sk = sock->sk; +@@ -149,7 +132,6 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + struct sockaddr_alg *sa = (void *)uaddr; + const struct af_alg_type *type; + void *private; +- int err; + + if (sock->state == SS_CONNECTED) + return -EINVAL; +@@ -175,22 +157,16 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + return PTR_ERR(private); + } + +- err = -EBUSY; + lock_sock(sk); +- if (ask->refcnt) +- goto unlock; + + swap(ask->type, type); + swap(ask->private, private); + +- err = 0; +- +-unlock: + release_sock(sk); + + alg_do_release(type, private); + +- return err; ++ return 0; + } + + static int alg_setkey(struct sock *sk, char __user *ukey, +@@ -223,15 +199,11 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type; +- int err = -EBUSY; ++ int err = -ENOPROTOOPT; + + lock_sock(sk); +- if (ask->refcnt) +- goto unlock; +- + type = ask->type; + +- err = -ENOPROTOOPT; + if (level != SOL_ALG || !type) + goto unlock; + +@@ -280,8 +252,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) + + sk2->sk_family = PF_ALG; + +- if (!ask->refcnt++) +- sock_hold(sk); ++ sock_hold(sk); + alg_sk(sk2)->parent = sk; + alg_sk(sk2)->type = type; + +diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c +index 3dc248239197..f04ce007bf7f 100644 +--- a/drivers/firmware/efi/efivars.c ++++ b/drivers/firmware/efi/efivars.c +@@ -219,7 +219,8 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) + } + + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || +- efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) { ++ efivar_validate(new_var->VendorGuid, new_var->VariableName, ++ new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } +@@ -334,7 +335,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, + return -EACCES; + + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || +- efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) { ++ efivar_validate(new_var->VendorGuid, new_var->VariableName, ++ new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } +@@ -409,35 +411,27 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) + { + int i, short_name_size; + char *short_name; +- unsigned long variable_name_size; +- efi_char16_t *variable_name; +- +- variable_name = new_var->var.VariableName; +- variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t); ++ unsigned long utf8_name_size; ++ efi_char16_t *variable_name = new_var->var.VariableName; + + /* +- * Length of the variable bytes in ASCII, plus the '-' separator, ++ * Length of the variable bytes in UTF8, plus the '-' separator, + * plus the GUID, plus trailing NUL + */ +- short_name_size = variable_name_size / sizeof(efi_char16_t) +- + 1 + EFI_VARIABLE_GUID_LEN + 1; +- +- short_name = kzalloc(short_name_size, GFP_KERNEL); ++ utf8_name_size = ucs2_utf8size(variable_name); ++ short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1; + ++ short_name = kmalloc(short_name_size, GFP_KERNEL); + if (!short_name) + return 1; + +- /* Convert Unicode to normal chars (assume top bits are 0), +- ala UTF-8 */ +- for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) { +- short_name[i] = variable_name[i] & 0xFF; +- } ++ ucs2_as_utf8(short_name, variable_name, short_name_size); ++ + /* This is ugly, but necessary to separate one vendor's + private variables from another's. */ +- +- *(short_name + strlen(short_name)) = '-'; ++ short_name[utf8_name_size] = '-'; + efi_guid_unparse(&new_var->var.VendorGuid, +- short_name + strlen(short_name)); ++ short_name + utf8_name_size + 1); + + new_var->kobj.kset = efivars_kset; + +diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c +index e6125522860a..4e2f46938bf0 100644 +--- a/drivers/firmware/efi/vars.c ++++ b/drivers/firmware/efi/vars.c +@@ -42,7 +42,7 @@ DECLARE_WORK(efivar_work, NULL); + EXPORT_SYMBOL_GPL(efivar_work); + + static bool +-validate_device_path(struct efi_variable *var, int match, u8 *buffer, ++validate_device_path(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + struct efi_generic_dev_path *node; +@@ -75,7 +75,7 @@ validate_device_path(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_boot_order(struct efi_variable *var, int match, u8 *buffer, ++validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + /* An array of 16-bit integers */ +@@ -86,18 +86,18 @@ validate_boot_order(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_load_option(struct efi_variable *var, int match, u8 *buffer, ++validate_load_option(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + u16 filepathlength; + int i, desclength = 0, namelen; + +- namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName)); ++ namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN); + + /* Either "Boot" or "Driver" followed by four digits of hex */ + for (i = match; i < match+4; i++) { +- if (var->VariableName[i] > 127 || +- hex_to_bin(var->VariableName[i] & 0xff) < 0) ++ if (var_name[i] > 127 || ++ hex_to_bin(var_name[i] & 0xff) < 0) + return true; + } + +@@ -132,12 +132,12 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, + /* + * And, finally, check the filepath + */ +- return validate_device_path(var, match, buffer + desclength + 6, ++ return validate_device_path(var_name, match, buffer + desclength + 6, + filepathlength); + } + + static bool +-validate_uint16(struct efi_variable *var, int match, u8 *buffer, ++validate_uint16(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + /* A single 16-bit integer */ +@@ -148,7 +148,7 @@ validate_uint16(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, ++validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + int i; +@@ -165,67 +165,133 @@ validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, + } + + struct variable_validate { ++ efi_guid_t vendor; + char *name; +- bool (*validate)(struct efi_variable *var, int match, u8 *data, ++ bool (*validate)(efi_char16_t *var_name, int match, u8 *data, + unsigned long len); + }; + ++/* ++ * This is the list of variables we need to validate, as well as the ++ * whitelist for what we think is safe not to default to immutable. ++ * ++ * If it has a validate() method that's not NULL, it'll go into the ++ * validation routine. If not, it is assumed valid, but still used for ++ * whitelisting. ++ * ++ * Note that it's sorted by {vendor,name}, but globbed names must come after ++ * any other name with the same prefix. ++ */ + static const struct variable_validate variable_validate[] = { +- { "BootNext", validate_uint16 }, +- { "BootOrder", validate_boot_order }, +- { "DriverOrder", validate_boot_order }, +- { "Boot*", validate_load_option }, +- { "Driver*", validate_load_option }, +- { "ConIn", validate_device_path }, +- { "ConInDev", validate_device_path }, +- { "ConOut", validate_device_path }, +- { "ConOutDev", validate_device_path }, +- { "ErrOut", validate_device_path }, +- { "ErrOutDev", validate_device_path }, +- { "Timeout", validate_uint16 }, +- { "Lang", validate_ascii_string }, +- { "PlatformLang", validate_ascii_string }, +- { "", NULL }, ++ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 }, ++ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option }, ++ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string }, ++ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL }, ++ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 }, ++ { LINUX_EFI_CRASH_GUID, "*", NULL }, ++ { NULL_GUID, "", NULL }, + }; + ++static bool ++variable_matches(const char *var_name, size_t len, const char *match_name, ++ int *match) ++{ ++ for (*match = 0; ; (*match)++) { ++ char c = match_name[*match]; ++ char u = var_name[*match]; ++ ++ /* Wildcard in the matching name means we've matched */ ++ if (c == '*') ++ return true; ++ ++ /* Case sensitive match */ ++ if (!c && *match == len) ++ return true; ++ ++ if (c != u) ++ return false; ++ ++ if (!c) ++ return true; ++ } ++ return true; ++} ++ + bool +-efivar_validate(struct efi_variable *var, u8 *data, unsigned long len) ++efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, ++ unsigned long data_size) + { + int i; +- u16 *unicode_name = var->VariableName; ++ unsigned long utf8_size; ++ u8 *utf8_name; + +- for (i = 0; variable_validate[i].validate != NULL; i++) { +- const char *name = variable_validate[i].name; +- int match; ++ utf8_size = ucs2_utf8size(var_name); ++ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL); ++ if (!utf8_name) ++ return false; + +- for (match = 0; ; match++) { +- char c = name[match]; +- u16 u = unicode_name[match]; ++ ucs2_as_utf8(utf8_name, var_name, utf8_size); ++ utf8_name[utf8_size] = '\0'; + +- /* All special variables are plain ascii */ +- if (u > 127) +- return true; ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { ++ const char *name = variable_validate[i].name; ++ int match = 0; + +- /* Wildcard in the matching name means we've matched */ +- if (c == '*') +- return variable_validate[i].validate(var, +- match, data, len); ++ if (efi_guidcmp(vendor, variable_validate[i].vendor)) ++ continue; + +- /* Case sensitive match */ +- if (c != u) ++ if (variable_matches(utf8_name, utf8_size+1, name, &match)) { ++ if (variable_validate[i].validate == NULL) + break; +- +- /* Reached the end of the string while matching */ +- if (!c) +- return variable_validate[i].validate(var, +- match, data, len); ++ kfree(utf8_name); ++ return variable_validate[i].validate(var_name, match, ++ data, data_size); + } + } +- ++ kfree(utf8_name); + return true; + } + EXPORT_SYMBOL_GPL(efivar_validate); + ++bool ++efivar_variable_is_removable(efi_guid_t vendor, const char *var_name, ++ size_t len) ++{ ++ int i; ++ bool found = false; ++ int match = 0; ++ ++ /* ++ * Check if our variable is in the validated variables list ++ */ ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { ++ if (efi_guidcmp(variable_validate[i].vendor, vendor)) ++ continue; ++ ++ if (variable_matches(var_name, len, ++ variable_validate[i].name, &match)) { ++ found = true; ++ break; ++ } ++ } ++ ++ /* ++ * If it's in our list, it is removable. ++ */ ++ return found; ++} ++EXPORT_SYMBOL_GPL(efivar_variable_is_removable); ++ + static efi_status_t + check_var_size(u32 attributes, unsigned long size) + { +@@ -805,7 +871,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, + + *set = false; + +- if (efivar_validate(&entry->var, data, *size) == false) ++ if (efivar_validate(*vendor, name, data, *size) == false) + return -EINVAL; + + /* +diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c +index 8dd524f32284..08f105a06fbf 100644 +--- a/fs/efivarfs/file.c ++++ b/fs/efivarfs/file.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include "internal.h" + +@@ -108,9 +109,79 @@ out_free: + return size; + } + ++static int ++efivarfs_ioc_getxflags(struct file *file, void __user *arg) ++{ ++ struct inode *inode = file->f_mapping->host; ++ unsigned int i_flags; ++ unsigned int flags = 0; ++ ++ i_flags = inode->i_flags; ++ if (i_flags & S_IMMUTABLE) ++ flags |= FS_IMMUTABLE_FL; ++ ++ if (copy_to_user(arg, &flags, sizeof(flags))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ++efivarfs_ioc_setxflags(struct file *file, void __user *arg) ++{ ++ struct inode *inode = file->f_mapping->host; ++ unsigned int flags; ++ unsigned int i_flags = 0; ++ int error; ++ ++ if (!inode_owner_or_capable(inode)) ++ return -EACCES; ++ ++ if (copy_from_user(&flags, arg, sizeof(flags))) ++ return -EFAULT; ++ ++ if (flags & ~FS_IMMUTABLE_FL) ++ return -EOPNOTSUPP; ++ ++ if (!capable(CAP_LINUX_IMMUTABLE)) ++ return -EPERM; ++ ++ if (flags & FS_IMMUTABLE_FL) ++ i_flags |= S_IMMUTABLE; ++ ++ ++ error = mnt_want_write_file(file); ++ if (error) ++ return error; ++ ++ mutex_lock(&inode->i_mutex); ++ inode->i_flags &= ~S_IMMUTABLE; ++ inode->i_flags |= i_flags; ++ mutex_unlock(&inode->i_mutex); ++ ++ mnt_drop_write_file(file); ++ ++ return 0; ++} ++ ++long ++efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p) ++{ ++ void __user *arg = (void __user *)p; ++ ++ switch (cmd) { ++ case FS_IOC_GETFLAGS: ++ return efivarfs_ioc_getxflags(file, arg); ++ case FS_IOC_SETFLAGS: ++ return efivarfs_ioc_setxflags(file, arg); ++ } ++ ++ return -ENOTTY; ++} ++ + const struct file_operations efivarfs_file_operations = { + .open = simple_open, + .read = efivarfs_file_read, + .write = efivarfs_file_write, + .llseek = no_llseek, ++ .unlocked_ioctl = efivarfs_file_ioctl, + }; +diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c +index 07ab49745e31..7e7318f10575 100644 +--- a/fs/efivarfs/inode.c ++++ b/fs/efivarfs/inode.c +@@ -15,7 +15,8 @@ + #include "internal.h" + + struct inode *efivarfs_get_inode(struct super_block *sb, +- const struct inode *dir, int mode, dev_t dev) ++ const struct inode *dir, int mode, ++ dev_t dev, bool is_removable) + { + struct inode *inode = new_inode(sb); + +@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb, + inode->i_ino = get_next_ino(); + inode->i_mode = mode; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; ++ inode->i_flags = is_removable ? 0 : S_IMMUTABLE; + switch (mode & S_IFMT) { + case S_IFREG: + inode->i_fop = &efivarfs_file_operations; +@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid) + static int efivarfs_create(struct inode *dir, struct dentry *dentry, + umode_t mode, bool excl) + { +- struct inode *inode; ++ struct inode *inode = NULL; + struct efivar_entry *var; + int namelen, i = 0, err = 0; ++ bool is_removable = false; + + if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len)) + return -EINVAL; + +- inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0); +- if (!inode) +- return -ENOMEM; +- + var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL); +- if (!var) { +- err = -ENOMEM; +- goto out; +- } ++ if (!var) ++ return -ENOMEM; + + /* length of the variable name itself: remove GUID and separator */ + namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1; +@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, + efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1, + &var->var.VendorGuid); + ++ if (efivar_variable_is_removable(var->var.VendorGuid, ++ dentry->d_name.name, namelen)) ++ is_removable = true; ++ ++ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable); ++ if (!inode) { ++ err = -ENOMEM; ++ goto out; ++ } ++ + for (i = 0; i < namelen; i++) + var->var.VariableName[i] = dentry->d_name.name[i]; + +@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, + out: + if (err) { + kfree(var); +- iput(inode); ++ if (inode) ++ iput(inode); + } + return err; + } +diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h +index b5ff16addb7c..b4505188e799 100644 +--- a/fs/efivarfs/internal.h ++++ b/fs/efivarfs/internal.h +@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations; + extern const struct inode_operations efivarfs_dir_inode_operations; + extern bool efivarfs_valid_name(const char *str, int len); + extern struct inode *efivarfs_get_inode(struct super_block *sb, +- const struct inode *dir, int mode, dev_t dev); ++ const struct inode *dir, int mode, dev_t dev, ++ bool is_removable); + + extern struct list_head efivarfs_list; + +diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c +index becc725a1953..a3a30f84f917 100644 +--- a/fs/efivarfs/super.c ++++ b/fs/efivarfs/super.c +@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + struct dentry *dentry, *root = sb->s_root; + unsigned long size = 0; + char *name; +- int len, i; ++ int len; + int err = -ENOMEM; ++ bool is_removable = false; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) +@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + memcpy(entry->var.VariableName, name16, name_size); + memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); + +- len = ucs2_strlen(entry->var.VariableName); ++ len = ucs2_utf8size(entry->var.VariableName); + + /* name, plus '-', plus GUID, plus NUL*/ + name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); + if (!name) + goto fail; + +- for (i = 0; i < len; i++) +- name[i] = entry->var.VariableName[i] & 0xFF; ++ ucs2_as_utf8(name, entry->var.VariableName, len); ++ ++ if (efivar_variable_is_removable(entry->var.VendorGuid, name, len)) ++ is_removable = true; + + name[len] = '-'; + +@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + + name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; + +- inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0); ++ inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0, ++ is_removable); + if (!inode) + goto fail_name; + +@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) + sb->s_d_op = &efivarfs_d_ops; + sb->s_time_gran = 1; + +- inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0); ++ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true); + if (!inode) + return -ENOMEM; + inode->i_op = &efivarfs_dir_inode_operations; +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h +index 2f38daaab3d7..d61c11170213 100644 +--- a/include/crypto/if_alg.h ++++ b/include/crypto/if_alg.h +@@ -30,8 +30,6 @@ struct alg_sock { + + struct sock *parent; + +- unsigned int refcnt; +- + const struct af_alg_type *type; + void *private; + }; +@@ -66,7 +64,6 @@ int af_alg_register_type(const struct af_alg_type *type); + int af_alg_unregister_type(const struct af_alg_type *type); + + int af_alg_release(struct socket *sock); +-void af_alg_release_parent(struct sock *sk); + int af_alg_accept(struct sock *sk, struct socket *newsock); + + int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len, +@@ -83,6 +80,11 @@ static inline struct alg_sock *alg_sk(struct sock *sk) + return (struct alg_sock *)sk; + } + ++static inline void af_alg_release_parent(struct sock *sk) ++{ ++ sock_put(alg_sk(sk)->parent); ++} ++ + static inline void af_alg_init_completion(struct af_alg_completion *completion) + { + init_completion(&completion->completion); +diff --git a/include/linux/efi.h b/include/linux/efi.h +index 0a819e7a60c9..b39a4651360a 100644 +--- a/include/linux/efi.h ++++ b/include/linux/efi.h +@@ -792,8 +792,10 @@ struct efivars { + * and we use a page for reading/writing. + */ + ++#define EFI_VAR_NAME_LEN 1024 ++ + struct efi_variable { +- efi_char16_t VariableName[1024/sizeof(efi_char16_t)]; ++ efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)]; + efi_guid_t VendorGuid; + unsigned long DataSize; + __u8 Data[1024]; +@@ -864,7 +866,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), + struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, + struct list_head *head, bool remove); + +-bool efivar_validate(struct efi_variable *var, u8 *data, unsigned long len); ++bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, ++ unsigned long data_size); ++bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, ++ size_t len); + + extern struct work_struct efivar_work; + void efivar_run_worker(void); +diff --git a/include/linux/module.h b/include/linux/module.h +index eaf60ff9ba94..adea1d663a06 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -227,6 +227,12 @@ struct module_ref { + unsigned long decs; + } __attribute((aligned(2 * sizeof(unsigned long)))); + ++struct mod_kallsyms { ++ Elf_Sym *symtab; ++ unsigned int num_symtab; ++ char *strtab; ++}; ++ + struct module { + enum module_state state; + +@@ -314,14 +320,9 @@ struct module { + #endif + + #ifdef CONFIG_KALLSYMS +- /* +- * We keep the symbol and string tables for kallsyms. +- * The core_* fields below are temporary, loader-only (they +- * could really be discarded after module init). +- */ +- Elf_Sym *symtab, *core_symtab; +- unsigned int num_symtab, core_num_syms; +- char *strtab, *core_strtab; ++ /* Protected by RCU and/or module_mutex: use rcu_dereference() */ ++ struct mod_kallsyms *kallsyms; ++ struct mod_kallsyms core_kallsyms; + + /* Section attributes */ + struct module_sect_attrs *sect_attrs; +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h +index 97c8689c7e51..326f8b238c31 100644 +--- a/include/linux/tracepoint.h ++++ b/include/linux/tracepoint.h +@@ -129,9 +129,6 @@ static inline void tracepoint_synchronize_unregister(void) + void *it_func; \ + void *__data; \ + \ +- if (!cpu_online(raw_smp_processor_id())) \ +- return; \ +- \ + if (!(cond)) \ + return; \ + prercu; \ +@@ -265,15 +262,19 @@ static inline void tracepoint_synchronize_unregister(void) + * "void *__data, proto" as the callback prototype. + */ + #define DECLARE_TRACE_NOARGS(name) \ +- __DECLARE_TRACE(name, void, , 1, void *__data, __data) ++ __DECLARE_TRACE(name, void, , \ ++ cpu_online(raw_smp_processor_id()), \ ++ void *__data, __data) + + #define DECLARE_TRACE(name, proto, args) \ +- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \ +- PARAMS(void *__data, proto), \ +- PARAMS(__data, args)) ++ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ ++ cpu_online(raw_smp_processor_id()), \ ++ PARAMS(void *__data, proto), \ ++ PARAMS(__data, args)) + + #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ +- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ ++ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ ++ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) + +diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h +index cbb20afdbc01..bb679b48f408 100644 +--- a/include/linux/ucs2_string.h ++++ b/include/linux/ucs2_string.h +@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s); + unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); + int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); + ++unsigned long ucs2_utf8size(const ucs2_char_t *src); ++unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, ++ unsigned long maxlength); ++ + #endif /* _LINUX_UCS2_STRING_H_ */ +diff --git a/kernel/module.c b/kernel/module.c +index e40617fac8ad..3a311a1d26d7 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -180,6 +180,9 @@ struct load_info { + struct _ddebug *debug; + unsigned int num_debug; + bool sig_ok; ++#ifdef CONFIG_KALLSYMS ++ unsigned long mod_kallsyms_init_off; ++#endif + struct { + unsigned int sym, str, mod, vers, info, pcpu; + } index; +@@ -2320,8 +2323,20 @@ static void layout_symtab(struct module *mod, struct load_info *info) + strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, + info->index.str) | INIT_OFFSET_MASK; + pr_debug("\t%s\n", info->secstrings + strsect->sh_name); ++ ++ /* We'll tack temporary mod_kallsyms on the end. */ ++ mod->init_size = ALIGN(mod->init_size, ++ __alignof__(struct mod_kallsyms)); ++ info->mod_kallsyms_init_off = mod->init_size; ++ mod->init_size += sizeof(struct mod_kallsyms); ++ mod->init_size = debug_align(mod->init_size); + } + ++/* ++ * We use the full symtab and strtab which layout_symtab arranged to ++ * be appended to the init section. Later we switch to the cut-down ++ * core-only ones. ++ */ + static void add_kallsyms(struct module *mod, const struct load_info *info) + { + unsigned int i, ndst; +@@ -2330,28 +2345,33 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) + char *s; + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + +- mod->symtab = (void *)symsec->sh_addr; +- mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); ++ /* Set up to point into init section. */ ++ mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off; ++ ++ mod->kallsyms->symtab = (void *)symsec->sh_addr; ++ mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + /* Make sure we get permanent strtab: don't use info->strtab. */ +- mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; ++ mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + + /* Set types up while we still have access to sections. */ +- for (i = 0; i < mod->num_symtab; i++) +- mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); +- +- mod->core_symtab = dst = mod->module_core + info->symoffs; +- mod->core_strtab = s = mod->module_core + info->stroffs; +- src = mod->symtab; +- for (ndst = i = 0; i < mod->num_symtab; i++) { ++ for (i = 0; i < mod->kallsyms->num_symtab; i++) ++ mod->kallsyms->symtab[i].st_info ++ = elf_type(&mod->kallsyms->symtab[i], info); ++ ++ /* Now populate the cut down core kallsyms for after init. */ ++ mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs; ++ mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs; ++ src = mod->kallsyms->symtab; ++ for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { + if (i == 0 || + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { + dst[ndst] = src[i]; +- dst[ndst++].st_name = s - mod->core_strtab; +- s += strlcpy(s, &mod->strtab[src[i].st_name], ++ dst[ndst++].st_name = s - mod->core_kallsyms.strtab; ++ s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], + KSYM_NAME_LEN) + 1; + } + } +- mod->core_num_syms = ndst; ++ mod->core_kallsyms.num_symtab = ndst; + } + #else + static inline void layout_symtab(struct module *mod, struct load_info *info) +@@ -3089,9 +3109,8 @@ static int do_init_module(struct module *mod) + module_put(mod); + trim_init_extable(mod); + #ifdef CONFIG_KALLSYMS +- mod->num_symtab = mod->core_num_syms; +- mod->symtab = mod->core_symtab; +- mod->strtab = mod->core_strtab; ++ /* Switch to core kallsyms now init is done: kallsyms may be walking! */ ++ rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); + #endif + unset_module_init_ro_nx(mod); + module_free(mod, mod->module_init); +@@ -3381,9 +3400,9 @@ static inline int is_arm_mapping_symbol(const char *str) + && (str[2] == '\0' || str[2] == '.'); + } + +-static const char *symname(struct module *mod, unsigned int symnum) ++static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum) + { +- return mod->strtab + mod->symtab[symnum].st_name; ++ return kallsyms->strtab + kallsyms->symtab[symnum].st_name; + } + + static const char *get_ksymbol(struct module *mod, +@@ -3393,6 +3412,7 @@ static const char *get_ksymbol(struct module *mod, + { + unsigned int i, best = 0; + unsigned long nextval; ++ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + + /* At worse, next value is at end of module */ + if (within_module_init(addr, mod)) +@@ -3402,32 +3422,32 @@ static const char *get_ksymbol(struct module *mod, + + /* Scan for closest preceding symbol, and next symbol. (ELF + starts real symbols at 1). */ +- for (i = 1; i < mod->num_symtab; i++) { +- if (mod->symtab[i].st_shndx == SHN_UNDEF) ++ for (i = 1; i < kallsyms->num_symtab; i++) { ++ if (kallsyms->symtab[i].st_shndx == SHN_UNDEF) + continue; + + /* We ignore unnamed symbols: they're uninformative + * and inserted at a whim. */ +- if (*symname(mod, i) == '\0' +- || is_arm_mapping_symbol(symname(mod, i))) ++ if (*symname(kallsyms, i) == '\0' ++ || is_arm_mapping_symbol(symname(kallsyms, i))) + continue; + +- if (mod->symtab[i].st_value <= addr +- && mod->symtab[i].st_value > mod->symtab[best].st_value) ++ if (kallsyms->symtab[i].st_value <= addr ++ && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value) + best = i; +- if (mod->symtab[i].st_value > addr +- && mod->symtab[i].st_value < nextval) +- nextval = mod->symtab[i].st_value; ++ if (kallsyms->symtab[i].st_value > addr ++ && kallsyms->symtab[i].st_value < nextval) ++ nextval = kallsyms->symtab[i].st_value; + } + + if (!best) + return NULL; + + if (size) +- *size = nextval - mod->symtab[best].st_value; ++ *size = nextval - kallsyms->symtab[best].st_value; + if (offset) +- *offset = addr - mod->symtab[best].st_value; +- return symname(mod, best); ++ *offset = addr - kallsyms->symtab[best].st_value; ++ return symname(kallsyms, best); + } + + /* For kallsyms to ask for address resolution. NULL means not found. Careful +@@ -3523,18 +3543,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { ++ struct mod_kallsyms *kallsyms; ++ + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- if (symnum < mod->num_symtab) { +- *value = mod->symtab[symnum].st_value; +- *type = mod->symtab[symnum].st_info; +- strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN); ++ kallsyms = rcu_dereference_sched(mod->kallsyms); ++ if (symnum < kallsyms->num_symtab) { ++ *value = kallsyms->symtab[symnum].st_value; ++ *type = kallsyms->symtab[symnum].st_info; ++ strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN); + strlcpy(module_name, mod->name, MODULE_NAME_LEN); + *exported = is_exported(name, *value, mod); + preempt_enable(); + return 0; + } +- symnum -= mod->num_symtab; ++ symnum -= kallsyms->num_symtab; + } + preempt_enable(); + return -ERANGE; +@@ -3543,11 +3566,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + static unsigned long mod_find_symname(struct module *mod, const char *name) + { + unsigned int i; ++ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + +- for (i = 0; i < mod->num_symtab; i++) +- if (strcmp(name, symname(mod, i)) == 0 && +- mod->symtab[i].st_info != 'U') +- return mod->symtab[i].st_value; ++ for (i = 0; i < kallsyms->num_symtab; i++) ++ if (strcmp(name, symname(kallsyms, i)) == 0 && ++ kallsyms->symtab[i].st_info != 'U') ++ return kallsyms->symtab[i].st_value; + return 0; + } + +@@ -3584,11 +3608,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + int ret; + + list_for_each_entry(mod, &modules, list) { ++ /* We hold module_mutex: no need for rcu_dereference_sched */ ++ struct mod_kallsyms *kallsyms = mod->kallsyms; ++ + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- for (i = 0; i < mod->num_symtab; i++) { +- ret = fn(data, symname(mod, i), +- mod, mod->symtab[i].st_value); ++ for (i = 0; i < kallsyms->num_symtab; i++) { ++ ret = fn(data, symname(kallsyms, i), ++ mod, kallsyms->symtab[i].st_value); + if (ret != 0) + return ret; + } +diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c +index 6f500ef2301d..f0b323abb4c6 100644 +--- a/lib/ucs2_string.c ++++ b/lib/ucs2_string.c +@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) + } + } + EXPORT_SYMBOL(ucs2_strncmp); ++ ++unsigned long ++ucs2_utf8size(const ucs2_char_t *src) ++{ ++ unsigned long i; ++ unsigned long j = 0; ++ ++ for (i = 0; i < ucs2_strlen(src); i++) { ++ u16 c = src[i]; ++ ++ if (c >= 0x800) ++ j += 3; ++ else if (c >= 0x80) ++ j += 2; ++ else ++ j += 1; ++ } ++ ++ return j; ++} ++EXPORT_SYMBOL(ucs2_utf8size); ++ ++/* ++ * copy at most maxlength bytes of whole utf8 characters to dest from the ++ * ucs2 string src. ++ * ++ * The return value is the number of characters copied, not including the ++ * final NUL character. ++ */ ++unsigned long ++ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength) ++{ ++ unsigned int i; ++ unsigned long j = 0; ++ unsigned long limit = ucs2_strnlen(src, maxlength); ++ ++ for (i = 0; maxlength && i < limit; i++) { ++ u16 c = src[i]; ++ ++ if (c >= 0x800) { ++ if (maxlength < 3) ++ break; ++ maxlength -= 3; ++ dest[j++] = 0xe0 | (c & 0xf000) >> 12; ++ dest[j++] = 0x80 | (c & 0x0fc0) >> 6; ++ dest[j++] = 0x80 | (c & 0x003f); ++ } else if (c >= 0x80) { ++ if (maxlength < 2) ++ break; ++ maxlength -= 2; ++ dest[j++] = 0xc0 | (c & 0x7c0) >> 6; ++ dest[j++] = 0x80 | (c & 0x03f); ++ } else { ++ maxlength -= 1; ++ dest[j++] = c & 0x7f; ++ } ++ } ++ if (maxlength) ++ dest[j] = '\0'; ++ return j; ++} ++EXPORT_SYMBOL(ucs2_as_utf8); +diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c +index 31bf2586fb84..864408026202 100644 +--- a/net/mac80211/agg-rx.c ++++ b/net/mac80211/agg-rx.c +@@ -290,7 +290,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, + } + + /* prepare A-MPDU MLME for Rx aggregation */ +- tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); ++ tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL); + if (!tid_agg_rx) + goto end; + +diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c +index c1b5b73c5b91..c3111e2b6fa0 100644 +--- a/net/mac80211/rc80211_minstrel_ht.c ++++ b/net/mac80211/rc80211_minstrel_ht.c +@@ -463,7 +463,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) + if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) + return; + +- ieee80211_start_tx_ba_session(pubsta, tid, 5000); ++ ieee80211_start_tx_ba_session(pubsta, tid, 0); + } + + static void +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c +index c8717c1d082e..87dd619fb2e9 100644 +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -342,6 +342,39 @@ static const int compat_event_type_size[] = { + + /* IW event code */ + ++static void wireless_nlevent_flush(void) ++{ ++ struct sk_buff *skb; ++ struct net *net; ++ ++ ASSERT_RTNL(); ++ ++ for_each_net(net) { ++ while ((skb = skb_dequeue(&net->wext_nlevents))) ++ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, ++ GFP_KERNEL); ++ } ++} ++ ++static int wext_netdev_notifier_call(struct notifier_block *nb, ++ unsigned long state, void *ptr) ++{ ++ /* ++ * When a netdev changes state in any way, flush all pending messages ++ * to avoid them going out in a strange order, e.g. RTM_NEWLINK after ++ * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close() ++ * or similar - all of which could otherwise happen due to delays from ++ * schedule_work(). ++ */ ++ wireless_nlevent_flush(); ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block wext_netdev_notifier = { ++ .notifier_call = wext_netdev_notifier_call, ++}; ++ + static int __net_init wext_pernet_init(struct net *net) + { + skb_queue_head_init(&net->wext_nlevents); +@@ -360,7 +393,12 @@ static struct pernet_operations wext_pernet_ops = { + + static int __init wireless_nlevent_init(void) + { +- return register_pernet_subsys(&wext_pernet_ops); ++ int err = register_pernet_subsys(&wext_pernet_ops); ++ ++ if (err) ++ return err; ++ ++ return register_netdevice_notifier(&wext_netdev_notifier); + } + + subsys_initcall(wireless_nlevent_init); +@@ -368,17 +406,8 @@ subsys_initcall(wireless_nlevent_init); + /* Process events generated by the wireless layer or the driver. */ + static void wireless_nlevent_process(struct work_struct *work) + { +- struct sk_buff *skb; +- struct net *net; +- + rtnl_lock(); +- +- for_each_net(net) { +- while ((skb = skb_dequeue(&net->wext_nlevents))) +- rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, +- GFP_KERNEL); +- } +- ++ wireless_nlevent_flush(); + rtnl_unlock(); + } + +diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c +index d4248e00160e..4f736197f6ee 100644 +--- a/sound/soc/codecs/wm8958-dsp2.c ++++ b/sound/soc/codecs/wm8958-dsp2.c +@@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c +index 66aec0c3b7bb..355b07e4fe2a 100644 +--- a/sound/soc/codecs/wm8994.c ++++ b/sound/soc/codecs/wm8994.c +@@ -360,7 +360,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol, + struct wm8994 *control = wm8994->wm8994; + struct wm8994_pdata *pdata = &control->pdata; + int drc = wm8994_get_drc(kcontrol->id.name); +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + + if (drc < 0) + return drc; +@@ -467,7 +467,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol, + struct wm8994 *control = wm8994->wm8994; + struct wm8994_pdata *pdata = &control->pdata; + int block = wm8994_get_retune_mobile_block(kcontrol->id.name); +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + + if (block < 0) + return block; +diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh +index 77edcdcc016b..057278448515 100644 +--- a/tools/testing/selftests/efivarfs/efivarfs.sh ++++ b/tools/testing/selftests/efivarfs/efivarfs.sh +@@ -88,7 +88,11 @@ test_delete() + exit 1 + fi + +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + + if [ -e $file ]; then + echo "$file couldn't be deleted" >&2 +@@ -111,6 +115,7 @@ test_zero_size_delete() + exit 1 + fi + ++ chattr -i $file + printf "$attrs" > $file + + if [ -e $file ]; then +@@ -141,7 +146,11 @@ test_valid_filenames() + echo "$file could not be created" >&2 + ret=1 + else +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + fi + done + +@@ -174,7 +183,11 @@ test_invalid_filenames() + + if [ -e $file ]; then + echo "Creating $file should have failed" >&2 +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + ret=1 + fi + done +diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c +index 8c0764407b3c..4af74f733036 100644 +--- a/tools/testing/selftests/efivarfs/open-unlink.c ++++ b/tools/testing/selftests/efivarfs/open-unlink.c +@@ -1,10 +1,68 @@ ++#include + #include + #include + #include + #include ++#include + #include + #include + #include ++#include ++ ++static int set_immutable(const char *path, int immutable) ++{ ++ unsigned int flags; ++ int fd; ++ int rc; ++ int error; ++ ++ fd = open(path, O_RDONLY); ++ if (fd < 0) ++ return fd; ++ ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); ++ if (rc < 0) { ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++ } ++ ++ if (immutable) ++ flags |= FS_IMMUTABLE_FL; ++ else ++ flags &= ~FS_IMMUTABLE_FL; ++ ++ rc = ioctl(fd, FS_IOC_SETFLAGS, &flags); ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++} ++ ++static int get_immutable(const char *path) ++{ ++ unsigned int flags; ++ int fd; ++ int rc; ++ int error; ++ ++ fd = open(path, O_RDONLY); ++ if (fd < 0) ++ return fd; ++ ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); ++ if (rc < 0) { ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++ } ++ close(fd); ++ if (flags & FS_IMMUTABLE_FL) ++ return 1; ++ return 0; ++} + + int main(int argc, char **argv) + { +@@ -27,7 +85,7 @@ int main(int argc, char **argv) + buf[4] = 0; + + /* create a test variable */ +- fd = open(path, O_WRONLY | O_CREAT); ++ fd = open(path, O_WRONLY | O_CREAT, 0600); + if (fd < 0) { + perror("open(O_WRONLY)"); + return EXIT_FAILURE; +@@ -41,6 +99,18 @@ int main(int argc, char **argv) + + close(fd); + ++ rc = get_immutable(path); ++ if (rc < 0) { ++ perror("ioctl(FS_IOC_GETFLAGS)"); ++ return EXIT_FAILURE; ++ } else if (rc) { ++ rc = set_immutable(path, 0); ++ if (rc < 0) { ++ perror("ioctl(FS_IOC_SETFLAGS)"); ++ return EXIT_FAILURE; ++ } ++ } ++ + fd = open(path, O_RDONLY); + if (fd < 0) { + perror("open"); diff --git a/patch/kernel/marvell-default/patch-3.10.96-97.patch b/patch/kernel/marvell-default/patch-3.10.96-97.patch new file mode 100644 index 000000000..487d76eb1 --- /dev/null +++ b/patch/kernel/marvell-default/patch-3.10.96-97.patch @@ -0,0 +1,2341 @@ +diff --git a/Makefile b/Makefile +index c88ea5d8d19c..f26470169c70 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 96 ++SUBLEVEL = 97 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h +index 294d251ca7b2..2ae13ce592e8 100644 +--- a/arch/parisc/include/uapi/asm/mman.h ++++ b/arch/parisc/include/uapi/asm/mman.h +@@ -46,16 +46,6 @@ + #define MADV_DONTFORK 10 /* don't inherit across fork */ + #define MADV_DOFORK 11 /* do inherit across fork */ + +-/* The range 12-64 is reserved for page size specification. */ +-#define MADV_4K_PAGES 12 /* Use 4K pages */ +-#define MADV_16K_PAGES 14 /* Use 16K pages */ +-#define MADV_64K_PAGES 16 /* Use 64K pages */ +-#define MADV_256K_PAGES 18 /* Use 256K pages */ +-#define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */ +-#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */ +-#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ +-#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ +- + #define MADV_MERGEABLE 65 /* KSM may merge identical pages */ + #define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ + +diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h +index d7034728f377..1c75565d984b 100644 +--- a/arch/parisc/include/uapi/asm/siginfo.h ++++ b/arch/parisc/include/uapi/asm/siginfo.h +@@ -1,6 +1,10 @@ + #ifndef _PARISC_SIGINFO_H + #define _PARISC_SIGINFO_H + ++#if defined(__LP64__) ++#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) ++#endif ++ + #include + + #undef NSIGTRAP +diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c +index 940188d1942c..ae9aa83854c0 100644 +--- a/arch/parisc/kernel/signal.c ++++ b/arch/parisc/kernel/signal.c +@@ -449,6 +449,55 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, + regs->gr[28]); + } + ++/* ++ * Check how the syscall number gets loaded into %r20 within ++ * the delay branch in userspace and adjust as needed. ++ */ ++ ++static void check_syscallno_in_delay_branch(struct pt_regs *regs) ++{ ++ u32 opcode, source_reg; ++ u32 __user *uaddr; ++ int err; ++ ++ /* Usually we don't have to restore %r20 (the system call number) ++ * because it gets loaded in the delay slot of the branch external ++ * instruction via the ldi instruction. ++ * In some cases a register-to-register copy instruction might have ++ * been used instead, in which case we need to copy the syscall ++ * number into the source register before returning to userspace. ++ */ ++ ++ /* A syscall is just a branch, so all we have to do is fiddle the ++ * return pointer so that the ble instruction gets executed again. ++ */ ++ regs->gr[31] -= 8; /* delayed branching */ ++ ++ /* Get assembler opcode of code in delay branch */ ++ uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4); ++ err = get_user(opcode, uaddr); ++ if (err) ++ return; ++ ++ /* Check if delay branch uses "ldi int,%r20" */ ++ if ((opcode & 0xffff0000) == 0x34140000) ++ return; /* everything ok, just return */ ++ ++ /* Check if delay branch uses "nop" */ ++ if (opcode == INSN_NOP) ++ return; ++ ++ /* Check if delay branch uses "copy %rX,%r20" */ ++ if ((opcode & 0xffe0ffff) == 0x08000254) { ++ source_reg = (opcode >> 16) & 31; ++ regs->gr[source_reg] = regs->gr[20]; ++ return; ++ } ++ ++ pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n", ++ current->comm, task_pid_nr(current), opcode); ++} ++ + static inline void + syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) + { +@@ -471,10 +520,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) + } + /* fallthrough */ + case -ERESTARTNOINTR: +- /* A syscall is just a branch, so all +- * we have to do is fiddle the return pointer. +- */ +- regs->gr[31] -= 8; /* delayed branching */ ++ check_syscallno_in_delay_branch(regs); + break; + } + } +@@ -523,15 +569,9 @@ insert_restart_trampoline(struct pt_regs *regs) + } + case -ERESTARTNOHAND: + case -ERESTARTSYS: +- case -ERESTARTNOINTR: { +- /* Hooray for delayed branching. We don't +- * have to restore %r20 (the system call +- * number) because it gets loaded in the delay +- * slot of the branch external instruction. +- */ +- regs->gr[31] -= 8; ++ case -ERESTARTNOINTR: ++ check_syscallno_in_delay_branch(regs); + return; +- } + default: + break; + } +diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h +index e6820c86e8c7..47ebd5b5ed55 100644 +--- a/arch/sh/include/uapi/asm/unistd_64.h ++++ b/arch/sh/include/uapi/asm/unistd_64.h +@@ -278,7 +278,7 @@ + #define __NR_fsetxattr 256 + #define __NR_getxattr 257 + #define __NR_lgetxattr 258 +-#define __NR_fgetxattr 269 ++#define __NR_fgetxattr 259 + #define __NR_listxattr 260 + #define __NR_llistxattr 261 + #define __NR_flistxattr 262 +diff --git a/crypto/af_alg.c b/crypto/af_alg.c +index 6ef6e2ad344e..0ca108f3c840 100644 +--- a/crypto/af_alg.c ++++ b/crypto/af_alg.c +@@ -125,6 +125,23 @@ int af_alg_release(struct socket *sock) + } + EXPORT_SYMBOL_GPL(af_alg_release); + ++void af_alg_release_parent(struct sock *sk) ++{ ++ struct alg_sock *ask = alg_sk(sk); ++ bool last; ++ ++ sk = ask->parent; ++ ask = alg_sk(sk); ++ ++ lock_sock(sk); ++ last = !--ask->refcnt; ++ release_sock(sk); ++ ++ if (last) ++ sock_put(sk); ++} ++EXPORT_SYMBOL_GPL(af_alg_release_parent); ++ + static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + { + struct sock *sk = sock->sk; +@@ -132,6 +149,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + struct sockaddr_alg *sa = (void *)uaddr; + const struct af_alg_type *type; + void *private; ++ int err; + + if (sock->state == SS_CONNECTED) + return -EINVAL; +@@ -157,16 +175,22 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + return PTR_ERR(private); + } + ++ err = -EBUSY; + lock_sock(sk); ++ if (ask->refcnt) ++ goto unlock; + + swap(ask->type, type); + swap(ask->private, private); + ++ err = 0; ++ ++unlock: + release_sock(sk); + + alg_do_release(type, private); + +- return 0; ++ return err; + } + + static int alg_setkey(struct sock *sk, char __user *ukey, +@@ -199,11 +223,15 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type; +- int err = -ENOPROTOOPT; ++ int err = -EBUSY; + + lock_sock(sk); ++ if (ask->refcnt) ++ goto unlock; ++ + type = ask->type; + ++ err = -ENOPROTOOPT; + if (level != SOL_ALG || !type) + goto unlock; + +@@ -247,14 +275,13 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) + security_sk_clone(sk, sk2); + + err = type->accept(ask->private, sk2); +- if (err) { +- sk_free(sk2); ++ if (err) + goto unlock; +- } + + sk2->sk_family = PF_ALG; + +- sock_hold(sk); ++ if (!ask->refcnt++) ++ sock_hold(sk); + alg_sk(sk2)->parent = sk; + alg_sk(sk2)->type = type; + +diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c +index 850246206b12..c542c0d88afd 100644 +--- a/crypto/algif_hash.c ++++ b/crypto/algif_hash.c +@@ -51,7 +51,8 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock, + + lock_sock(sk); + if (!ctx->more) { +- err = crypto_ahash_init(&ctx->req); ++ err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), ++ &ctx->completion); + if (err) + goto unlock; + } +@@ -131,6 +132,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, + } else { + if (!ctx->more) { + err = crypto_ahash_init(&ctx->req); ++ err = af_alg_wait_for_completion(err, &ctx->completion); + if (err) + goto unlock; + } +@@ -192,9 +194,14 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags) + struct sock *sk2; + struct alg_sock *ask2; + struct hash_ctx *ctx2; ++ bool more; + int err; + +- err = crypto_ahash_export(req, state); ++ lock_sock(sk); ++ more = ctx->more; ++ err = more ? crypto_ahash_export(req, state) : 0; ++ release_sock(sk); ++ + if (err) + return err; + +@@ -205,7 +212,10 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags) + sk2 = newsock->sk; + ask2 = alg_sk(sk2); + ctx2 = ask2->private; +- ctx2->more = 1; ++ ctx2->more = more; ++ ++ if (!more) ++ return err; + + err = crypto_ahash_import(&ctx2->req, state); + if (err) { +diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c +index c7666f401381..a3dfc0d83107 100644 +--- a/crypto/crypto_user.c ++++ b/crypto/crypto_user.c +@@ -477,6 +477,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + if (link->dump == NULL) + return -EINVAL; + ++ down_read(&crypto_alg_sem); + list_for_each_entry(alg, &crypto_alg_list, cra_list) + dump_alloc += CRYPTO_REPORT_MAXSIZE; + +@@ -486,8 +487,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + .done = link->done, + .min_dump_alloc = dump_alloc, + }; +- return netlink_dump_start(crypto_nlsk, skb, nlh, &c); ++ err = netlink_dump_start(crypto_nlsk, skb, nlh, &c); + } ++ up_read(&crypto_alg_sem); ++ ++ return err; + } + + err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX, +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c +index 9064a2f2760c..cb106934bf1c 100644 +--- a/drivers/ata/ahci.c ++++ b/drivers/ata/ahci.c +@@ -247,6 +247,26 @@ static const struct pci_device_id ahci_pci_tbl[] = { + { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ ++ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ + { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ + { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c +index cf5f35877559..d04f5c8dbbdc 100644 +--- a/drivers/ata/libahci.c ++++ b/drivers/ata/libahci.c +@@ -486,8 +486,8 @@ void ahci_save_initial_config(struct device *dev, + } + } + +- /* fabricate port_map from cap.nr_ports */ +- if (!port_map) { ++ /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */ ++ if (!port_map && vers < 0x10300) { + port_map = (1 << ahci_nr_ports(cap)) - 1; + dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map); + +@@ -1244,6 +1244,15 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, + ata_tf_to_fis(tf, pmp, is_cmd, fis); + ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); + ++ /* set port value for softreset of Port Multiplier */ ++ if (pp->fbs_enabled && pp->fbs_last_dev != pmp) { ++ tmp = readl(port_mmio + PORT_FBS); ++ tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC); ++ tmp |= pmp << PORT_FBS_DEV_OFFSET; ++ writel(tmp, port_mmio + PORT_FBS); ++ pp->fbs_last_dev = pmp; ++ } ++ + /* issue & wait */ + writel(1, port_mmio + PORT_CMD_ISSUE); + +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c +index a6524c3efdf7..ce854bbd33ef 100644 +--- a/drivers/char/tpm/tpm_ibmvtpm.c ++++ b/drivers/char/tpm/tpm_ibmvtpm.c +@@ -529,7 +529,7 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq, + } + ibmvtpm->rtce_size = be16_to_cpu(crq->len); + ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size, +- GFP_KERNEL); ++ GFP_ATOMIC); + if (!ibmvtpm->rtce_buf) { + dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n"); + return; +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c +index de904e6a4ab7..5da58e3899eb 100644 +--- a/drivers/hid/usbhid/hid-core.c ++++ b/drivers/hid/usbhid/hid-core.c +@@ -490,8 +490,6 @@ static void hid_ctrl(struct urb *urb) + struct usbhid_device *usbhid = hid->driver_data; + int unplug = 0, status = urb->status; + +- spin_lock(&usbhid->lock); +- + switch (status) { + case 0: /* success */ + if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN) +@@ -511,6 +509,8 @@ static void hid_ctrl(struct urb *urb) + hid_warn(urb->dev, "ctrl urb status %d received\n", status); + } + ++ spin_lock(&usbhid->lock); ++ + if (unplug) { + usbhid->ctrltail = usbhid->ctrlhead; + } else { +diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c +index 0ba21b0f3972..eb7ddb20fd48 100644 +--- a/drivers/md/dm-mpath.c ++++ b/drivers/md/dm-mpath.c +@@ -1608,11 +1608,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, + /* + * Only pass ioctls through if the device sizes match exactly. + */ +- if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) { +- int err = scsi_verify_blk_ioctl(NULL, cmd); +- if (err) +- r = err; +- } ++ if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) ++ r = scsi_verify_blk_ioctl(NULL, cmd); + + if (r == -ENOTCONN && !fatal_signal_pending(current)) + queue_work(kmultipathd, &m->process_queued_ios); +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c +index b53669404cb5..6d7f4d950b8f 100644 +--- a/drivers/md/persistent-data/dm-btree.c ++++ b/drivers/md/persistent-data/dm-btree.c +@@ -455,8 +455,10 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root, + + r = insert_at(sizeof(__le64), pn, parent_index + 1, + le64_to_cpu(rn->keys[0]), &location); +- if (r) ++ if (r) { ++ unlock_block(s->info, right); + return r; ++ } + + if (key < le64_to_cpu(rn->keys[0])) { + unlock_block(s->info, right); +diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c +index dbcdfbf8aed0..11b0ef3a2858 100644 +--- a/drivers/media/pci/saa7134/saa7134-alsa.c ++++ b/drivers/media/pci/saa7134/saa7134-alsa.c +@@ -1145,6 +1145,8 @@ static int alsa_device_init(struct saa7134_dev *dev) + + static int alsa_device_exit(struct saa7134_dev *dev) + { ++ if (!snd_saa7134_cards[dev->nr]) ++ return 1; + + snd_card_free(snd_saa7134_cards[dev->nr]); + snd_saa7134_cards[dev->nr] = NULL; +@@ -1194,7 +1196,8 @@ static void saa7134_alsa_exit(void) + int idx; + + for (idx = 0; idx < SNDRV_CARDS; idx++) { +- snd_card_free(snd_saa7134_cards[idx]); ++ if (snd_saa7134_cards[idx]) ++ snd_card_free(snd_saa7134_cards[idx]); + } + + saa7134_dmasound_init = NULL; +diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +index e2b0a0969ebb..35fb8f0cb539 100644 +--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c ++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +@@ -264,7 +264,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_ + + struct v4l2_standard32 { + __u32 index; +- __u32 id[2]; /* __u64 would get the alignment wrong */ ++ compat_u64 id; + __u8 name[24]; + struct v4l2_fract frameperiod; /* Frames, not fields */ + __u32 framelines; +@@ -284,7 +284,7 @@ static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 + { + if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) || + put_user(kp->index, &up->index) || +- copy_to_user(up->id, &kp->id, sizeof(__u64)) || ++ put_user(kp->id, &up->id) || + copy_to_user(up->name, kp->name, 24) || + copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) || + put_user(kp->framelines, &up->framelines) || +@@ -576,10 +576,10 @@ struct v4l2_input32 { + __u32 type; /* Type of input */ + __u32 audioset; /* Associated audios (bitfield) */ + __u32 tuner; /* Associated tuner */ +- v4l2_std_id std; ++ compat_u64 std; + __u32 status; + __u32 reserved[4]; +-} __attribute__ ((packed)); ++}; + + /* The 64-bit v4l2_input struct has extra padding at the end of the struct. + Otherwise it is identical to the 32-bit version. */ +@@ -719,6 +719,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext + struct v4l2_event32 { + __u32 type; + union { ++ compat_s64 value64; + __u8 data[64]; + } u; + __u32 pending; +diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c +index fd56f2563201..297fbc59a800 100644 +--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c ++++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c +@@ -117,7 +117,8 @@ static void vb2_dc_prepare(void *buf_priv) + if (!sgt || buf->db_attach) + return; + +- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); ++ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents, ++ buf->dma_dir); + } + + static void vb2_dc_finish(void *buf_priv) +@@ -129,7 +130,7 @@ static void vb2_dc_finish(void *buf_priv) + if (!sgt || buf->db_attach) + return; + +- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); ++ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); + } + + /*********************************************/ +diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c +index 301493382cd0..f8013c1d8cd5 100644 +--- a/drivers/mtd/mtdpart.c ++++ b/drivers/mtd/mtdpart.c +@@ -634,8 +634,10 @@ int add_mtd_partitions(struct mtd_info *master, + + for (i = 0; i < nbparts; i++) { + slave = allocate_partition(master, parts + i, i, cur_offset); +- if (IS_ERR(slave)) ++ if (IS_ERR(slave)) { ++ del_mtd_partitions(master); + return PTR_ERR(slave); ++ } + + mutex_lock(&mtd_partitions_mutex); + list_add(&slave->list, &mtd_partitions); +diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h +index af7d9f9b3b4d..beed58b0c795 100644 +--- a/drivers/net/wireless/ti/wlcore/io.h ++++ b/drivers/net/wireless/ti/wlcore/io.h +@@ -203,19 +203,23 @@ static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg, + + static inline void wl1271_power_off(struct wl1271 *wl) + { +- int ret; ++ int ret = 0; + + if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags)) + return; + +- ret = wl->if_ops->power(wl->dev, false); ++ if (wl->if_ops->power) ++ ret = wl->if_ops->power(wl->dev, false); + if (!ret) + clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); + } + + static inline int wl1271_power_on(struct wl1271 *wl) + { +- int ret = wl->if_ops->power(wl->dev, true); ++ int ret = 0; ++ ++ if (wl->if_ops->power) ++ ret = wl->if_ops->power(wl->dev, true); + if (ret == 0) + set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); + +diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c +index e26447832683..bfb57e671034 100644 +--- a/drivers/net/wireless/ti/wlcore/spi.c ++++ b/drivers/net/wireless/ti/wlcore/spi.c +@@ -72,7 +72,10 @@ + */ + #define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) + +-#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) ++/* Maximum number of SPI write chunks */ ++#define WSPI_MAX_NUM_OF_CHUNKS \ ++ ((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1) ++ + + struct wl12xx_spi_glue { + struct device *dev; +@@ -270,9 +273,10 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr, + void *buf, size_t len, bool fixed) + { + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); +- struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)]; ++ /* SPI write buffers - 2 for each chunk */ ++ struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS]; + struct spi_message m; +- u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; ++ u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */ + u32 *cmd; + u32 chunk_len; + int i; +diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c +index 157a57309601..4ef0dbdcace1 100644 +--- a/drivers/remoteproc/remoteproc_debugfs.c ++++ b/drivers/remoteproc/remoteproc_debugfs.c +@@ -156,7 +156,7 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf, + char buf[10]; + int ret; + +- if (count > sizeof(buf)) ++ if (count < 1 || count > sizeof(buf)) + return count; + + ret = copy_from_user(buf, user_buf, count); +diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c +index 380387a47b1d..462af46ceee7 100644 +--- a/drivers/spi/spi-atmel.c ++++ b/drivers/spi/spi-atmel.c +@@ -594,7 +594,8 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, + + *plen = len; + +- if (atmel_spi_dma_slave_config(as, &slave_config, 8)) ++ if (atmel_spi_dma_slave_config(as, &slave_config, ++ xfer->bits_per_word)) + goto err_exit; + + /* Send both scatterlists */ +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index 7c159634aaae..cc80ab14aa32 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -1047,7 +1047,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size) + master->bus_num = -1; + master->num_chipselect = 1; + master->dev.class = &spi_master_class; +- master->dev.parent = get_device(dev); ++ master->dev.parent = dev; + spi_master_set_devdata(master, &master[1]); + + return master; +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c +index 2967b6eb4c70..8977eaf24d9f 100644 +--- a/drivers/tty/tty_io.c ++++ b/drivers/tty/tty_io.c +@@ -2576,6 +2576,28 @@ static int tiocsetd(struct tty_struct *tty, int __user *p) + } + + /** ++ * tiocgetd - get line discipline ++ * @tty: tty device ++ * @p: pointer to user data ++ * ++ * Retrieves the line discipline id directly from the ldisc. ++ * ++ * Locking: waits for ldisc reference (in case the line discipline ++ * is changing or the tty is being hungup) ++ */ ++ ++static int tiocgetd(struct tty_struct *tty, int __user *p) ++{ ++ struct tty_ldisc *ld; ++ int ret; ++ ++ ld = tty_ldisc_ref_wait(tty); ++ ret = put_user(ld->ops->num, p); ++ tty_ldisc_deref(ld); ++ return ret; ++} ++ ++/** + * send_break - performed time break + * @tty: device to break on + * @duration: timeout in mS +@@ -2789,7 +2811,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + case TIOCGSID: + return tiocgsid(tty, real_tty, p); + case TIOCGETD: +- return put_user(tty->ldisc->ops->num, (int __user *)p); ++ return tiocgetd(tty, p); + case TIOCSETD: + return tiocsetd(tty, p); + case TIOCVHANGUP: +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 4dc18615cd0f..9dd6fa3a1260 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -4788,6 +4788,9 @@ static int __init xhci_hcd_init(void) + { + int retval; + ++ if (usb_disabled()) ++ return -ENODEV; ++ + retval = xhci_register_pci(); + if (retval < 0) { + printk(KERN_DEBUG "Problem registering PCI driver."); +@@ -4816,9 +4819,6 @@ static int __init xhci_hcd_init(void) + /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ + BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); + +- if (usb_disabled()) +- return -ENODEV; +- + return 0; + unreg_pci: + xhci_unregister_pci(); +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 72c14d7d604f..89ba7cfba5bc 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -98,6 +98,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */ + { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */ + { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ ++ { USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */ + { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */ + { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ + { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 514f3117ee2b..4e865664699b 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -840,6 +840,7 @@ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, ++ { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) }, + { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, + + /* Papouch devices based on FTDI chip */ +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index bfb0ecd98808..3eff1d6a2b17 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -615,6 +615,7 @@ + */ + #define RATOC_VENDOR_ID 0x0584 + #define RATOC_PRODUCT_ID_USB60F 0xb020 ++#define RATOC_PRODUCT_ID_SCU18 0xb03a + + /* + * Infineon Technologies +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index bdbe642e6569..81f6a572f016 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -269,6 +269,8 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_CC864_SINGLE 0x1006 + #define TELIT_PRODUCT_DE910_DUAL 0x1010 + #define TELIT_PRODUCT_UE910_V2 0x1012 ++#define TELIT_PRODUCT_LE922_USBCFG0 0x1042 ++#define TELIT_PRODUCT_LE922_USBCFG3 0x1043 + #define TELIT_PRODUCT_LE920 0x1200 + #define TELIT_PRODUCT_LE910 0x1201 + +@@ -623,6 +625,16 @@ static const struct option_blacklist_info telit_le920_blacklist = { + .reserved = BIT(1) | BIT(5), + }; + ++static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { ++ .sendsetup = BIT(2), ++ .reserved = BIT(0) | BIT(1) | BIT(3), ++}; ++ ++static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = { ++ .sendsetup = BIT(0), ++ .reserved = BIT(1) | BIT(2) | BIT(3), ++}; ++ + static const struct usb_device_id option_ids[] = { + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, +@@ -1168,6 +1180,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), +@@ -1679,7 +1695,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, +- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c +index 4cc84c0c990d..0a7c68fa5e5e 100644 +--- a/drivers/usb/serial/ti_usb_3410_5052.c ++++ b/drivers/usb/serial/ti_usb_3410_5052.c +@@ -158,7 +158,7 @@ static unsigned int product_5052_count; + /* the array dimension is the number of default entries plus */ + /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ + /* null entry */ +-static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = { ++static struct usb_device_id ti_id_table_3410[16+TI_EXTRA_VID_PID_COUNT+1] = { + { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, + { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, + { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, +@@ -184,7 +184,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = { + { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, + }; + +-static struct usb_device_id ti_id_table_combined[19+2*TI_EXTRA_VID_PID_COUNT+1] = { ++static struct usb_device_id ti_id_table_combined[20+2*TI_EXTRA_VID_PID_COUNT+1] = { + { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, + { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, + { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, +diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c +index 727905de0ba4..605068e6acf2 100644 +--- a/drivers/usb/serial/visor.c ++++ b/drivers/usb/serial/visor.c +@@ -551,6 +551,11 @@ static int treo_attach(struct usb_serial *serial) + (serial->num_interrupt_in == 0)) + return 0; + ++ if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ + /* + * It appears that Treos and Kyoceras want to use the + * 1st bulk in endpoint to communicate with the 2nd bulk out endpoint, +@@ -604,8 +609,10 @@ static int clie_5_attach(struct usb_serial *serial) + */ + + /* some sanity check */ +- if (serial->num_ports < 2) +- return -1; ++ if (serial->num_bulk_out < 2) { ++ dev_err(&serial->interface->dev, "missing bulk out endpoints\n"); ++ return -ENODEV; ++ } + + /* port 0 now uses the modified endpoint Address */ + port = serial->port[0]; +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index 618bcc84a09e..948e6f21b594 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -682,16 +682,16 @@ static int load_elf_binary(struct linux_binprm *bprm) + */ + would_dump(bprm, interpreter); + +- retval = kernel_read(interpreter, 0, bprm->buf, +- BINPRM_BUF_SIZE); +- if (retval != BINPRM_BUF_SIZE) { ++ /* Get the exec headers */ ++ retval = kernel_read(interpreter, 0, ++ (void *)&loc->interp_elf_ex, ++ sizeof(loc->interp_elf_ex)); ++ if (retval != sizeof(loc->interp_elf_ex)) { + if (retval >= 0) + retval = -EIO; + goto out_free_dentry; + } + +- /* Get the exec headers */ +- loc->interp_elf_ex = *((struct elfhdr *)bprm->buf); + break; + } + elf_ppnt++; +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 2a71466b0115..6f74b8919237 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -728,19 +729,55 @@ struct move_extent { + <= (EXT4_GOOD_OLD_INODE_SIZE + \ + (einode)->i_extra_isize)) \ + ++/* ++ * We use an encoding that preserves the times for extra epoch "00": ++ * ++ * extra msb of adjust for signed ++ * epoch 32-bit 32-bit tv_sec to ++ * bits time decoded 64-bit tv_sec 64-bit tv_sec valid time range ++ * 0 0 1 -0x80000000..-0x00000001 0x000000000 1901-12-13..1969-12-31 ++ * 0 0 0 0x000000000..0x07fffffff 0x000000000 1970-01-01..2038-01-19 ++ * 0 1 1 0x080000000..0x0ffffffff 0x100000000 2038-01-19..2106-02-07 ++ * 0 1 0 0x100000000..0x17fffffff 0x100000000 2106-02-07..2174-02-25 ++ * 1 0 1 0x180000000..0x1ffffffff 0x200000000 2174-02-25..2242-03-16 ++ * 1 0 0 0x200000000..0x27fffffff 0x200000000 2242-03-16..2310-04-04 ++ * 1 1 1 0x280000000..0x2ffffffff 0x300000000 2310-04-04..2378-04-22 ++ * 1 1 0 0x300000000..0x37fffffff 0x300000000 2378-04-22..2446-05-10 ++ * ++ * Note that previous versions of the kernel on 64-bit systems would ++ * incorrectly use extra epoch bits 1,1 for dates between 1901 and ++ * 1970. e2fsck will correct this, assuming that it is run on the ++ * affected filesystem before 2242. ++ */ ++ + static inline __le32 ext4_encode_extra_time(struct timespec *time) + { +- return cpu_to_le32((sizeof(time->tv_sec) > 4 ? +- (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) | +- ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK)); ++ u32 extra = sizeof(time->tv_sec) > 4 ? ++ ((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK : 0; ++ return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS)); + } + + static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra) + { +- if (sizeof(time->tv_sec) > 4) +- time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) +- << 32; +- time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS; ++ if (unlikely(sizeof(time->tv_sec) > 4 && ++ (extra & cpu_to_le32(EXT4_EPOCH_MASK)))) { ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0) ++ /* Handle legacy encoding of pre-1970 dates with epoch ++ * bits 1,1. We assume that by kernel version 4.20, ++ * everyone will have run fsck over the affected ++ * filesystems to correct the problem. (This ++ * backwards compatibility may be removed before this ++ * time, at the discretion of the ext4 developers.) ++ */ ++ u64 extra_bits = le32_to_cpu(extra) & EXT4_EPOCH_MASK; ++ if (extra_bits == 3 && ((time->tv_sec) & 0x80000000) != 0) ++ extra_bits = 0; ++ time->tv_sec += extra_bits << 32; ++#else ++ time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32; ++#endif ++ } ++ time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS; + } + + #define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \ +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c +index a69bd74ed390..fa7d2e668c3a 100644 +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -1025,7 +1025,7 @@ exit_free: + * do not copy the full number of backups at this time. The resize + * which changed s_groups_count will backup again. + */ +-static void update_backups(struct super_block *sb, int blk_off, char *data, ++static void update_backups(struct super_block *sb, sector_t blk_off, char *data, + int size, int meta_bg) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); +@@ -1050,7 +1050,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data, + group = ext4_list_backups(sb, &three, &five, &seven); + last = sbi->s_groups_count; + } else { +- group = ext4_meta_bg_first_group(sb, group) + 1; ++ group = ext4_get_group_number(sb, blk_off) + 1; + last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2); + } + +diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c +index e028b8eb1c40..0912b90e05bc 100644 +--- a/fs/fscache/netfs.c ++++ b/fs/fscache/netfs.c +@@ -45,9 +45,6 @@ int __fscache_register_netfs(struct fscache_netfs *netfs) + netfs->primary_index->parent = &fscache_fsdef_index; + netfs->primary_index->netfs_data = netfs; + +- atomic_inc(&netfs->primary_index->parent->usage); +- atomic_inc(&netfs->primary_index->parent->n_children); +- + spin_lock_init(&netfs->primary_index->lock); + INIT_HLIST_HEAD(&netfs->primary_index->backing_objects); + +@@ -60,6 +57,9 @@ int __fscache_register_netfs(struct fscache_netfs *netfs) + goto already_registered; + } + ++ atomic_inc(&netfs->primary_index->parent->usage); ++ atomic_inc(&netfs->primary_index->parent->n_children); ++ + list_add(&netfs->link, &fscache_netfs_list); + ret = 0; + +@@ -70,8 +70,7 @@ already_registered: + up_write(&fscache_addremove_sem); + + if (ret < 0) { +- netfs->primary_index->parent = NULL; +- __fscache_cookie_put(netfs->primary_index); ++ kmem_cache_free(fscache_cookie_jar, netfs->primary_index); + netfs->primary_index = NULL; + } + +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c +index ec34e11d6854..21b828c713cc 100644 +--- a/fs/jbd2/transaction.c ++++ b/fs/jbd2/transaction.c +@@ -1936,6 +1936,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, + + if (!buffer_dirty(bh)) { + /* bdflush has written it. We can drop it now */ ++ __jbd2_journal_remove_checkpoint(jh); + goto zap_buffer; + } + +@@ -1965,6 +1966,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, + /* The orphan record's transaction has + * committed. We can cleanse this buffer */ + clear_buffer_jbddirty(bh); ++ __jbd2_journal_remove_checkpoint(jh); + goto zap_buffer; + } + } +diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c +index 2c119d5d04c9..d084200dbc4e 100644 +--- a/fs/ocfs2/dlm/dlmmaster.c ++++ b/fs/ocfs2/dlm/dlmmaster.c +@@ -2456,6 +2456,11 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, + spin_lock(&dlm->master_lock); + ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, + namelen, target, dlm->node_num); ++ /* get an extra reference on the mle. ++ * otherwise the assert_master from the new ++ * master will destroy this. ++ */ ++ dlm_get_mle_inuse(mle); + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + +@@ -2491,6 +2496,7 @@ fail: + if (mle_added) { + dlm_mle_detach_hb_events(dlm, mle); + dlm_put_mle(mle); ++ dlm_put_mle_inuse(mle); + } else if (mle) { + kmem_cache_free(dlm_mle_cache, mle); + mle = NULL; +@@ -2508,17 +2514,6 @@ fail: + * ensure that all assert_master work is flushed. */ + flush_workqueue(dlm->dlm_worker); + +- /* get an extra reference on the mle. +- * otherwise the assert_master from the new +- * master will destroy this. +- * also, make sure that all callers of dlm_get_mle +- * take both dlm->spinlock and dlm->master_lock */ +- spin_lock(&dlm->spinlock); +- spin_lock(&dlm->master_lock); +- dlm_get_mle_inuse(mle); +- spin_unlock(&dlm->master_lock); +- spin_unlock(&dlm->spinlock); +- + /* notify new node and send all lock state */ + /* call send_one_lockres with migration flag. + * this serves as notice to the target node that a +@@ -3246,6 +3241,15 @@ top: + mle->new_master != dead_node) + continue; + ++ if (mle->new_master == dead_node && mle->inuse) { ++ mlog(ML_NOTICE, "%s: target %u died during " ++ "migration from %u, the MLE is " ++ "still keep used, ignore it!\n", ++ dlm->name, dead_node, ++ mle->master); ++ continue; ++ } ++ + /* If we have reached this point, this mle needs to be + * removed from the list and freed. */ + dlm_clean_migration_mle(dlm, mle); +diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c +index 9bd981cd3142..01c69f24e416 100644 +--- a/fs/ocfs2/dlm/dlmrecovery.c ++++ b/fs/ocfs2/dlm/dlmrecovery.c +@@ -2326,6 +2326,8 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) + break; + } + } ++ dlm_lockres_clear_refmap_bit(dlm, res, ++ dead_node); + spin_unlock(&res->spinlock); + continue; + } +diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c +index c327d4ee1235..7b3792e5844a 100644 +--- a/fs/sysv/inode.c ++++ b/fs/sysv/inode.c +@@ -161,14 +161,8 @@ void sysv_set_inode(struct inode *inode, dev_t rdev) + inode->i_fop = &sysv_dir_operations; + inode->i_mapping->a_ops = &sysv_aops; + } else if (S_ISLNK(inode->i_mode)) { +- if (inode->i_blocks) { +- inode->i_op = &sysv_symlink_inode_operations; +- inode->i_mapping->a_ops = &sysv_aops; +- } else { +- inode->i_op = &sysv_fast_symlink_inode_operations; +- nd_terminate_link(SYSV_I(inode)->i_data, inode->i_size, +- sizeof(SYSV_I(inode)->i_data) - 1); +- } ++ inode->i_op = &sysv_symlink_inode_operations; ++ inode->i_mapping->a_ops = &sysv_aops; + } else + init_special_inode(inode, inode->i_mode, rdev); + } +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h +index d61c11170213..2f38daaab3d7 100644 +--- a/include/crypto/if_alg.h ++++ b/include/crypto/if_alg.h +@@ -30,6 +30,8 @@ struct alg_sock { + + struct sock *parent; + ++ unsigned int refcnt; ++ + const struct af_alg_type *type; + void *private; + }; +@@ -64,6 +66,7 @@ int af_alg_register_type(const struct af_alg_type *type); + int af_alg_unregister_type(const struct af_alg_type *type); + + int af_alg_release(struct socket *sock); ++void af_alg_release_parent(struct sock *sk); + int af_alg_accept(struct sock *sk, struct socket *newsock); + + int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len, +@@ -80,11 +83,6 @@ static inline struct alg_sock *alg_sk(struct sock *sk) + return (struct alg_sock *)sk; + } + +-static inline void af_alg_release_parent(struct sock *sk) +-{ +- sock_put(alg_sk(sk)->parent); +-} +- + static inline void af_alg_init_completion(struct af_alg_completion *completion) + { + init_completion(&completion->completion); +diff --git a/include/linux/signal.h b/include/linux/signal.h +index 2ac423bdb676..53944e50e421 100644 +--- a/include/linux/signal.h ++++ b/include/linux/signal.h +@@ -247,7 +247,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *); + extern void set_current_blocked(sigset_t *); + extern void __set_current_blocked(const sigset_t *); + extern int show_unhandled_signals; +-extern int sigsuspend(sigset_t *); + + struct sigaction { + #ifndef __ARCH_HAS_IRIX_SIGACTION +diff --git a/kernel/signal.c b/kernel/signal.c +index 2e51bcbea1e3..4d1f7fa3138d 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -3551,7 +3551,7 @@ SYSCALL_DEFINE0(pause) + + #endif + +-int sigsuspend(sigset_t *set) ++static int sigsuspend(sigset_t *set) + { + current->saved_sigmask = current->blocked; + set_current_blocked(set); +diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h +index 49b582a225b0..b9897e2be404 100644 +--- a/scripts/recordmcount.h ++++ b/scripts/recordmcount.h +@@ -377,7 +377,7 @@ static void nop_mcount(Elf_Shdr const *const relhdr, + + if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { + if (make_nop) +- ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset); ++ ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset)); + if (warn_on_notrace_sect && !once) { + printf("Section %s has mcount callers being ignored\n", + txtname); +diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c +index 3fdf998ad057..572f95175e97 100644 +--- a/sound/core/compress_offload.c ++++ b/sound/core/compress_offload.c +@@ -44,6 +44,13 @@ + #include + #include + ++/* struct snd_compr_codec_caps overflows the ioctl bit size for some ++ * architectures, so we need to disable the relevant ioctls. ++ */ ++#if _IOC_SIZEBITS < 14 ++#define COMPR_CODEC_CAPS_OVERFLOW ++#endif ++ + /* TODO: + * - add substream support for multiple devices in case of + * SND_DYNAMIC_MINORS is not used +@@ -427,6 +434,7 @@ out: + return retval; + } + ++#ifndef COMPR_CODEC_CAPS_OVERFLOW + static int + snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg) + { +@@ -450,6 +458,7 @@ out: + kfree(caps); + return retval; + } ++#endif /* !COMPR_CODEC_CAPS_OVERFLOW */ + + /* revisit this with snd_pcm_preallocate_xxx */ + static int snd_compr_allocate_buffer(struct snd_compr_stream *stream, +@@ -791,9 +800,11 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg) + case _IOC_NR(SNDRV_COMPRESS_GET_CAPS): + retval = snd_compr_get_caps(stream, arg); + break; ++#ifndef COMPR_CODEC_CAPS_OVERFLOW + case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS): + retval = snd_compr_get_codec_caps(stream, arg); + break; ++#endif + case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS): + retval = snd_compr_set_params(stream, arg); + break; +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c +index 4c1cc51772e6..7417f96cea6e 100644 +--- a/sound/core/oss/pcm_oss.c ++++ b/sound/core/oss/pcm_oss.c +@@ -834,7 +834,8 @@ static int choose_rate(struct snd_pcm_substream *substream, + return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL); + } + +-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream) ++static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream, ++ bool trylock) + { + struct snd_pcm_runtime *runtime = substream->runtime; + struct snd_pcm_hw_params *params, *sparams; +@@ -848,7 +849,10 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream) + struct snd_mask sformat_mask; + struct snd_mask mask; + +- if (mutex_lock_interruptible(&runtime->oss.params_lock)) ++ if (trylock) { ++ if (!(mutex_trylock(&runtime->oss.params_lock))) ++ return -EAGAIN; ++ } else if (mutex_lock_interruptible(&runtime->oss.params_lock)) + return -EINTR; + sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL); + params = kmalloc(sizeof(*params), GFP_KERNEL); +@@ -1091,7 +1095,7 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil + if (asubstream == NULL) + asubstream = substream; + if (substream->runtime->oss.params) { +- err = snd_pcm_oss_change_params(substream); ++ err = snd_pcm_oss_change_params(substream, false); + if (err < 0) + return err; + } +@@ -1130,7 +1134,7 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream) + return 0; + runtime = substream->runtime; + if (runtime->oss.params) { +- err = snd_pcm_oss_change_params(substream); ++ err = snd_pcm_oss_change_params(substream, false); + if (err < 0) + return err; + } +@@ -2168,7 +2172,7 @@ static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stre + runtime = substream->runtime; + + if (runtime->oss.params && +- (err = snd_pcm_oss_change_params(substream)) < 0) ++ (err = snd_pcm_oss_change_params(substream, false)) < 0) + return err; + + info.fragsize = runtime->oss.period_bytes; +@@ -2804,7 +2808,12 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area) + return -EIO; + + if (runtime->oss.params) { +- if ((err = snd_pcm_oss_change_params(substream)) < 0) ++ /* use mutex_trylock() for params_lock for avoiding a deadlock ++ * between mmap_sem and params_lock taken by ++ * copy_from/to_user() in snd_pcm_oss_write/read() ++ */ ++ err = snd_pcm_oss_change_params(substream, true); ++ if (err < 0) + return err; + } + #ifdef CONFIG_SND_PCM_OSS_PLUGINS +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c +index 7b596b5751db..500765f20843 100644 +--- a/sound/core/rawmidi.c ++++ b/sound/core/rawmidi.c +@@ -934,31 +934,36 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, + unsigned long flags; + long result = 0, count1; + struct snd_rawmidi_runtime *runtime = substream->runtime; ++ unsigned long appl_ptr; + ++ spin_lock_irqsave(&runtime->lock, flags); + while (count > 0 && runtime->avail) { + count1 = runtime->buffer_size - runtime->appl_ptr; + if (count1 > count) + count1 = count; +- spin_lock_irqsave(&runtime->lock, flags); + if (count1 > (int)runtime->avail) + count1 = runtime->avail; ++ ++ /* update runtime->appl_ptr before unlocking for userbuf */ ++ appl_ptr = runtime->appl_ptr; ++ runtime->appl_ptr += count1; ++ runtime->appl_ptr %= runtime->buffer_size; ++ runtime->avail -= count1; ++ + if (kernelbuf) +- memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1); ++ memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1); + if (userbuf) { + spin_unlock_irqrestore(&runtime->lock, flags); + if (copy_to_user(userbuf + result, +- runtime->buffer + runtime->appl_ptr, count1)) { ++ runtime->buffer + appl_ptr, count1)) { + return result > 0 ? result : -EFAULT; + } + spin_lock_irqsave(&runtime->lock, flags); + } +- runtime->appl_ptr += count1; +- runtime->appl_ptr %= runtime->buffer_size; +- runtime->avail -= count1; +- spin_unlock_irqrestore(&runtime->lock, flags); + result += count1; + count -= count1; + } ++ spin_unlock_irqrestore(&runtime->lock, flags); + return result; + } + +@@ -1161,8 +1166,9 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, + unsigned long flags; + long count1, result; + struct snd_rawmidi_runtime *runtime = substream->runtime; ++ unsigned long appl_ptr; + +- if (snd_BUG_ON(!kernelbuf && !userbuf)) ++ if (!kernelbuf && !userbuf) + return -EINVAL; + if (snd_BUG_ON(!runtime->buffer)) + return -EINVAL; +@@ -1181,12 +1187,19 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, + count1 = count; + if (count1 > (long)runtime->avail) + count1 = runtime->avail; ++ ++ /* update runtime->appl_ptr before unlocking for userbuf */ ++ appl_ptr = runtime->appl_ptr; ++ runtime->appl_ptr += count1; ++ runtime->appl_ptr %= runtime->buffer_size; ++ runtime->avail -= count1; ++ + if (kernelbuf) +- memcpy(runtime->buffer + runtime->appl_ptr, ++ memcpy(runtime->buffer + appl_ptr, + kernelbuf + result, count1); + else if (userbuf) { + spin_unlock_irqrestore(&runtime->lock, flags); +- if (copy_from_user(runtime->buffer + runtime->appl_ptr, ++ if (copy_from_user(runtime->buffer + appl_ptr, + userbuf + result, count1)) { + spin_lock_irqsave(&runtime->lock, flags); + result = result > 0 ? result : -EFAULT; +@@ -1194,9 +1207,6 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, + } + spin_lock_irqsave(&runtime->lock, flags); + } +- runtime->appl_ptr += count1; +- runtime->appl_ptr %= runtime->buffer_size; +- runtime->avail -= count1; + result += count1; + count -= count1; + } +diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c +index c5b773a1eea9..4a09c3085ca4 100644 +--- a/sound/core/seq/oss/seq_oss_synth.c ++++ b/sound/core/seq/oss/seq_oss_synth.c +@@ -310,7 +310,7 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp) + struct seq_oss_synth *rec; + struct seq_oss_synthinfo *info; + +- if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS)) ++ if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS)) + return; + for (i = 0; i < dp->max_synthdev; i++) { + info = &dp->synths[i]; +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c +index ecfbf5f39d38..08865dcbf5f1 100644 +--- a/sound/core/seq/seq_clientmgr.c ++++ b/sound/core/seq/seq_clientmgr.c +@@ -678,6 +678,9 @@ static int deliver_to_subscribers(struct snd_seq_client *client, + else + down_read(&grp->list_mutex); + list_for_each_entry(subs, &grp->list_head, src_list) { ++ /* both ports ready? */ ++ if (atomic_read(&subs->ref_count) != 2) ++ continue; + event->dest = subs->info.dest; + if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP) + /* convert time according to flag with subscription */ +diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c +index 9516e5ce3aad..67c91d226552 100644 +--- a/sound/core/seq/seq_ports.c ++++ b/sound/core/seq/seq_ports.c +@@ -175,10 +175,6 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, + } + + /* */ +-enum group_type { +- SRC_LIST, DEST_LIST +-}; +- + static int subscribe_port(struct snd_seq_client *client, + struct snd_seq_client_port *port, + struct snd_seq_port_subs_info *grp, +@@ -205,6 +201,20 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr, + return NULL; + } + ++static void delete_and_unsubscribe_port(struct snd_seq_client *client, ++ struct snd_seq_client_port *port, ++ struct snd_seq_subscribers *subs, ++ bool is_src, bool ack); ++ ++static inline struct snd_seq_subscribers * ++get_subscriber(struct list_head *p, bool is_src) ++{ ++ if (is_src) ++ return list_entry(p, struct snd_seq_subscribers, src_list); ++ else ++ return list_entry(p, struct snd_seq_subscribers, dest_list); ++} ++ + /* + * remove all subscribers on the list + * this is called from port_delete, for each src and dest list. +@@ -212,7 +222,7 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr, + static void clear_subscriber_list(struct snd_seq_client *client, + struct snd_seq_client_port *port, + struct snd_seq_port_subs_info *grp, +- int grptype) ++ int is_src) + { + struct list_head *p, *n; + +@@ -221,15 +231,13 @@ static void clear_subscriber_list(struct snd_seq_client *client, + struct snd_seq_client *c; + struct snd_seq_client_port *aport; + +- if (grptype == SRC_LIST) { +- subs = list_entry(p, struct snd_seq_subscribers, src_list); ++ subs = get_subscriber(p, is_src); ++ if (is_src) + aport = get_client_port(&subs->info.dest, &c); +- } else { +- subs = list_entry(p, struct snd_seq_subscribers, dest_list); ++ else + aport = get_client_port(&subs->info.sender, &c); +- } +- list_del(p); +- unsubscribe_port(client, port, grp, &subs->info, 0); ++ delete_and_unsubscribe_port(client, port, subs, is_src, false); ++ + if (!aport) { + /* looks like the connected port is being deleted. + * we decrease the counter, and when both ports are deleted +@@ -237,21 +245,14 @@ static void clear_subscriber_list(struct snd_seq_client *client, + */ + if (atomic_dec_and_test(&subs->ref_count)) + kfree(subs); +- } else { +- /* ok we got the connected port */ +- struct snd_seq_port_subs_info *agrp; +- agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src; +- down_write(&agrp->list_mutex); +- if (grptype == SRC_LIST) +- list_del(&subs->dest_list); +- else +- list_del(&subs->src_list); +- up_write(&agrp->list_mutex); +- unsubscribe_port(c, aport, agrp, &subs->info, 1); +- kfree(subs); +- snd_seq_port_unlock(aport); +- snd_seq_client_unlock(c); ++ continue; + } ++ ++ /* ok we got the connected port */ ++ delete_and_unsubscribe_port(c, aport, subs, !is_src, true); ++ kfree(subs); ++ snd_seq_port_unlock(aport); ++ snd_seq_client_unlock(c); + } + } + +@@ -264,8 +265,8 @@ static int port_delete(struct snd_seq_client *client, + snd_use_lock_sync(&port->use_lock); + + /* clear subscribers info */ +- clear_subscriber_list(client, port, &port->c_src, SRC_LIST); +- clear_subscriber_list(client, port, &port->c_dest, DEST_LIST); ++ clear_subscriber_list(client, port, &port->c_src, true); ++ clear_subscriber_list(client, port, &port->c_dest, false); + + if (port->private_free) + port->private_free(port->private_data); +@@ -484,85 +485,120 @@ static int match_subs_info(struct snd_seq_port_subscribe *r, + return 0; + } + +- +-/* connect two ports */ +-int snd_seq_port_connect(struct snd_seq_client *connector, +- struct snd_seq_client *src_client, +- struct snd_seq_client_port *src_port, +- struct snd_seq_client *dest_client, +- struct snd_seq_client_port *dest_port, +- struct snd_seq_port_subscribe *info) ++static int check_and_subscribe_port(struct snd_seq_client *client, ++ struct snd_seq_client_port *port, ++ struct snd_seq_subscribers *subs, ++ bool is_src, bool exclusive, bool ack) + { +- struct snd_seq_port_subs_info *src = &src_port->c_src; +- struct snd_seq_port_subs_info *dest = &dest_port->c_dest; +- struct snd_seq_subscribers *subs, *s; +- int err, src_called = 0; +- unsigned long flags; +- int exclusive; ++ struct snd_seq_port_subs_info *grp; ++ struct list_head *p; ++ struct snd_seq_subscribers *s; ++ int err; + +- subs = kzalloc(sizeof(*subs), GFP_KERNEL); +- if (! subs) +- return -ENOMEM; +- +- subs->info = *info; +- atomic_set(&subs->ref_count, 2); +- +- down_write(&src->list_mutex); +- down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING); +- +- exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0; ++ grp = is_src ? &port->c_src : &port->c_dest; + err = -EBUSY; ++ down_write(&grp->list_mutex); + if (exclusive) { +- if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head)) ++ if (!list_empty(&grp->list_head)) + goto __error; + } else { +- if (src->exclusive || dest->exclusive) ++ if (grp->exclusive) + goto __error; + /* check whether already exists */ +- list_for_each_entry(s, &src->list_head, src_list) { +- if (match_subs_info(info, &s->info)) +- goto __error; +- } +- list_for_each_entry(s, &dest->list_head, dest_list) { +- if (match_subs_info(info, &s->info)) ++ list_for_each(p, &grp->list_head) { ++ s = get_subscriber(p, is_src); ++ if (match_subs_info(&subs->info, &s->info)) + goto __error; + } + } + +- if ((err = subscribe_port(src_client, src_port, src, info, +- connector->number != src_client->number)) < 0) +- goto __error; +- src_called = 1; +- +- if ((err = subscribe_port(dest_client, dest_port, dest, info, +- connector->number != dest_client->number)) < 0) ++ err = subscribe_port(client, port, grp, &subs->info, ack); ++ if (err < 0) { ++ grp->exclusive = 0; + goto __error; ++ } + + /* add to list */ +- write_lock_irqsave(&src->list_lock, flags); +- // write_lock(&dest->list_lock); // no other lock yet +- list_add_tail(&subs->src_list, &src->list_head); +- list_add_tail(&subs->dest_list, &dest->list_head); +- // write_unlock(&dest->list_lock); // no other lock yet +- write_unlock_irqrestore(&src->list_lock, flags); ++ write_lock_irq(&grp->list_lock); ++ if (is_src) ++ list_add_tail(&subs->src_list, &grp->list_head); ++ else ++ list_add_tail(&subs->dest_list, &grp->list_head); ++ grp->exclusive = exclusive; ++ atomic_inc(&subs->ref_count); ++ write_unlock_irq(&grp->list_lock); ++ err = 0; ++ ++ __error: ++ up_write(&grp->list_mutex); ++ return err; ++} + +- src->exclusive = dest->exclusive = exclusive; ++static void delete_and_unsubscribe_port(struct snd_seq_client *client, ++ struct snd_seq_client_port *port, ++ struct snd_seq_subscribers *subs, ++ bool is_src, bool ack) ++{ ++ struct snd_seq_port_subs_info *grp; ++ ++ grp = is_src ? &port->c_src : &port->c_dest; ++ down_write(&grp->list_mutex); ++ write_lock_irq(&grp->list_lock); ++ if (is_src) ++ list_del(&subs->src_list); ++ else ++ list_del(&subs->dest_list); ++ grp->exclusive = 0; ++ write_unlock_irq(&grp->list_lock); ++ up_write(&grp->list_mutex); ++ ++ unsubscribe_port(client, port, grp, &subs->info, ack); ++} ++ ++/* connect two ports */ ++int snd_seq_port_connect(struct snd_seq_client *connector, ++ struct snd_seq_client *src_client, ++ struct snd_seq_client_port *src_port, ++ struct snd_seq_client *dest_client, ++ struct snd_seq_client_port *dest_port, ++ struct snd_seq_port_subscribe *info) ++{ ++ struct snd_seq_subscribers *subs; ++ bool exclusive; ++ int err; ++ ++ subs = kzalloc(sizeof(*subs), GFP_KERNEL); ++ if (!subs) ++ return -ENOMEM; ++ ++ subs->info = *info; ++ atomic_set(&subs->ref_count, 0); ++ INIT_LIST_HEAD(&subs->src_list); ++ INIT_LIST_HEAD(&subs->dest_list); ++ ++ exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE); ++ ++ err = check_and_subscribe_port(src_client, src_port, subs, true, ++ exclusive, ++ connector->number != src_client->number); ++ if (err < 0) ++ goto error; ++ err = check_and_subscribe_port(dest_client, dest_port, subs, false, ++ exclusive, ++ connector->number != dest_client->number); ++ if (err < 0) ++ goto error_dest; + +- up_write(&dest->list_mutex); +- up_write(&src->list_mutex); + return 0; + +- __error: +- if (src_called) +- unsubscribe_port(src_client, src_port, src, info, +- connector->number != src_client->number); ++ error_dest: ++ delete_and_unsubscribe_port(src_client, src_port, subs, true, ++ connector->number != src_client->number); ++ error: + kfree(subs); +- up_write(&dest->list_mutex); +- up_write(&src->list_mutex); + return err; + } + +- + /* remove the connection */ + int snd_seq_port_disconnect(struct snd_seq_client *connector, + struct snd_seq_client *src_client, +@@ -572,37 +608,28 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector, + struct snd_seq_port_subscribe *info) + { + struct snd_seq_port_subs_info *src = &src_port->c_src; +- struct snd_seq_port_subs_info *dest = &dest_port->c_dest; + struct snd_seq_subscribers *subs; + int err = -ENOENT; +- unsigned long flags; + + down_write(&src->list_mutex); +- down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING); +- + /* look for the connection */ + list_for_each_entry(subs, &src->list_head, src_list) { + if (match_subs_info(info, &subs->info)) { +- write_lock_irqsave(&src->list_lock, flags); +- // write_lock(&dest->list_lock); // no lock yet +- list_del(&subs->src_list); +- list_del(&subs->dest_list); +- // write_unlock(&dest->list_lock); +- write_unlock_irqrestore(&src->list_lock, flags); +- src->exclusive = dest->exclusive = 0; +- unsubscribe_port(src_client, src_port, src, info, +- connector->number != src_client->number); +- unsubscribe_port(dest_client, dest_port, dest, info, +- connector->number != dest_client->number); +- kfree(subs); ++ atomic_dec(&subs->ref_count); /* mark as not ready */ + err = 0; + break; + } + } +- +- up_write(&dest->list_mutex); + up_write(&src->list_mutex); +- return err; ++ if (err < 0) ++ return err; ++ ++ delete_and_unsubscribe_port(src_client, src_port, subs, true, ++ connector->number != src_client->number); ++ delete_and_unsubscribe_port(dest_client, dest_port, subs, false, ++ connector->number != dest_client->number); ++ kfree(subs); ++ return 0; + } + + +diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c +index 24d44b2f61ac..6ec30a98a92a 100644 +--- a/sound/core/seq/seq_timer.c ++++ b/sound/core/seq/seq_timer.c +@@ -92,6 +92,9 @@ void snd_seq_timer_delete(struct snd_seq_timer **tmr) + + void snd_seq_timer_defaults(struct snd_seq_timer * tmr) + { ++ unsigned long flags; ++ ++ spin_lock_irqsave(&tmr->lock, flags); + /* setup defaults */ + tmr->ppq = 96; /* 96 PPQ */ + tmr->tempo = 500000; /* 120 BPM */ +@@ -107,21 +110,25 @@ void snd_seq_timer_defaults(struct snd_seq_timer * tmr) + tmr->preferred_resolution = seq_default_timer_resolution; + + tmr->skew = tmr->skew_base = SKEW_BASE; ++ spin_unlock_irqrestore(&tmr->lock, flags); + } + +-void snd_seq_timer_reset(struct snd_seq_timer * tmr) ++static void seq_timer_reset(struct snd_seq_timer *tmr) + { +- unsigned long flags; +- +- spin_lock_irqsave(&tmr->lock, flags); +- + /* reset time & songposition */ + tmr->cur_time.tv_sec = 0; + tmr->cur_time.tv_nsec = 0; + + tmr->tick.cur_tick = 0; + tmr->tick.fraction = 0; ++} ++ ++void snd_seq_timer_reset(struct snd_seq_timer *tmr) ++{ ++ unsigned long flags; + ++ spin_lock_irqsave(&tmr->lock, flags); ++ seq_timer_reset(tmr); + spin_unlock_irqrestore(&tmr->lock, flags); + } + +@@ -140,8 +147,11 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri, + tmr = q->timer; + if (tmr == NULL) + return; +- if (!tmr->running) ++ spin_lock_irqsave(&tmr->lock, flags); ++ if (!tmr->running) { ++ spin_unlock_irqrestore(&tmr->lock, flags); + return; ++ } + + resolution *= ticks; + if (tmr->skew != tmr->skew_base) { +@@ -150,8 +160,6 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri, + (((resolution & 0xffff) * tmr->skew) >> 16); + } + +- spin_lock_irqsave(&tmr->lock, flags); +- + /* update timer */ + snd_seq_inc_time_nsec(&tmr->cur_time, resolution); + +@@ -298,26 +306,30 @@ int snd_seq_timer_open(struct snd_seq_queue *q) + t->callback = snd_seq_timer_interrupt; + t->callback_data = q; + t->flags |= SNDRV_TIMER_IFLG_AUTO; ++ spin_lock_irq(&tmr->lock); + tmr->timeri = t; ++ spin_unlock_irq(&tmr->lock); + return 0; + } + + int snd_seq_timer_close(struct snd_seq_queue *q) + { + struct snd_seq_timer *tmr; ++ struct snd_timer_instance *t; + + tmr = q->timer; + if (snd_BUG_ON(!tmr)) + return -EINVAL; +- if (tmr->timeri) { +- snd_timer_stop(tmr->timeri); +- snd_timer_close(tmr->timeri); +- tmr->timeri = NULL; +- } ++ spin_lock_irq(&tmr->lock); ++ t = tmr->timeri; ++ tmr->timeri = NULL; ++ spin_unlock_irq(&tmr->lock); ++ if (t) ++ snd_timer_close(t); + return 0; + } + +-int snd_seq_timer_stop(struct snd_seq_timer * tmr) ++static int seq_timer_stop(struct snd_seq_timer *tmr) + { + if (! tmr->timeri) + return -EINVAL; +@@ -328,6 +340,17 @@ int snd_seq_timer_stop(struct snd_seq_timer * tmr) + return 0; + } + ++int snd_seq_timer_stop(struct snd_seq_timer *tmr) ++{ ++ unsigned long flags; ++ int err; ++ ++ spin_lock_irqsave(&tmr->lock, flags); ++ err = seq_timer_stop(tmr); ++ spin_unlock_irqrestore(&tmr->lock, flags); ++ return err; ++} ++ + static int initialize_timer(struct snd_seq_timer *tmr) + { + struct snd_timer *t; +@@ -360,13 +383,13 @@ static int initialize_timer(struct snd_seq_timer *tmr) + return 0; + } + +-int snd_seq_timer_start(struct snd_seq_timer * tmr) ++static int seq_timer_start(struct snd_seq_timer *tmr) + { + if (! tmr->timeri) + return -EINVAL; + if (tmr->running) +- snd_seq_timer_stop(tmr); +- snd_seq_timer_reset(tmr); ++ seq_timer_stop(tmr); ++ seq_timer_reset(tmr); + if (initialize_timer(tmr) < 0) + return -EINVAL; + snd_timer_start(tmr->timeri, tmr->ticks); +@@ -375,14 +398,25 @@ int snd_seq_timer_start(struct snd_seq_timer * tmr) + return 0; + } + +-int snd_seq_timer_continue(struct snd_seq_timer * tmr) ++int snd_seq_timer_start(struct snd_seq_timer *tmr) ++{ ++ unsigned long flags; ++ int err; ++ ++ spin_lock_irqsave(&tmr->lock, flags); ++ err = seq_timer_start(tmr); ++ spin_unlock_irqrestore(&tmr->lock, flags); ++ return err; ++} ++ ++static int seq_timer_continue(struct snd_seq_timer *tmr) + { + if (! tmr->timeri) + return -EINVAL; + if (tmr->running) + return -EBUSY; + if (! tmr->initialized) { +- snd_seq_timer_reset(tmr); ++ seq_timer_reset(tmr); + if (initialize_timer(tmr) < 0) + return -EINVAL; + } +@@ -392,11 +426,24 @@ int snd_seq_timer_continue(struct snd_seq_timer * tmr) + return 0; + } + ++int snd_seq_timer_continue(struct snd_seq_timer *tmr) ++{ ++ unsigned long flags; ++ int err; ++ ++ spin_lock_irqsave(&tmr->lock, flags); ++ err = seq_timer_continue(tmr); ++ spin_unlock_irqrestore(&tmr->lock, flags); ++ return err; ++} ++ + /* return current 'real' time. use timeofday() to get better granularity. */ + snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr) + { + snd_seq_real_time_t cur_time; ++ unsigned long flags; + ++ spin_lock_irqsave(&tmr->lock, flags); + cur_time = tmr->cur_time; + if (tmr->running) { + struct timeval tm; +@@ -412,7 +459,7 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr) + } + snd_seq_sanity_real_time(&cur_time); + } +- ++ spin_unlock_irqrestore(&tmr->lock, flags); + return cur_time; + } + +diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c +index 4b50e604276d..0fa691e01384 100644 +--- a/sound/core/seq/seq_virmidi.c ++++ b/sound/core/seq/seq_virmidi.c +@@ -254,9 +254,13 @@ static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream) + */ + static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream) + { ++ struct snd_virmidi_dev *rdev = substream->rmidi->private_data; + struct snd_virmidi *vmidi = substream->runtime->private_data; +- snd_midi_event_free(vmidi->parser); ++ ++ write_lock_irq(&rdev->filelist_lock); + list_del(&vmidi->list); ++ write_unlock_irq(&rdev->filelist_lock); ++ snd_midi_event_free(vmidi->parser); + substream->runtime->private_data = NULL; + kfree(vmidi); + return 0; +diff --git a/sound/core/timer.c b/sound/core/timer.c +index 4e436fe53afa..d90d8f4b85fe 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -300,8 +300,7 @@ int snd_timer_open(struct snd_timer_instance **ti, + return 0; + } + +-static int _snd_timer_stop(struct snd_timer_instance *timeri, +- int keep_flag, int event); ++static int _snd_timer_stop(struct snd_timer_instance *timeri, int event); + + /* + * close a timer instance +@@ -343,7 +342,7 @@ int snd_timer_close(struct snd_timer_instance *timeri) + spin_unlock_irq(&timer->lock); + mutex_lock(®ister_mutex); + list_del(&timeri->open_list); +- if (timer && list_empty(&timer->open_list_head) && ++ if (list_empty(&timer->open_list_head) && + timer->hw.close) + timer->hw.close(timer); + /* remove slave links */ +@@ -415,7 +414,7 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event) + spin_lock_irqsave(&timer->lock, flags); + list_for_each_entry(ts, &ti->slave_active_head, active_list) + if (ts->ccallback) +- ts->ccallback(ti, event + 100, &tstamp, resolution); ++ ts->ccallback(ts, event + 100, &tstamp, resolution); + spin_unlock_irqrestore(&timer->lock, flags); + } + +@@ -444,6 +443,10 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri) + unsigned long flags; + + spin_lock_irqsave(&slave_active_lock, flags); ++ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) { ++ spin_unlock_irqrestore(&slave_active_lock, flags); ++ return -EBUSY; ++ } + timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; + if (timeri->master && timeri->timer) { + spin_lock(&timeri->timer->lock); +@@ -468,23 +471,30 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) + return -EINVAL; + if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { + result = snd_timer_start_slave(timeri); +- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); ++ if (result >= 0) ++ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); + return result; + } + timer = timeri->timer; + if (timer == NULL) + return -EINVAL; + spin_lock_irqsave(&timer->lock, flags); ++ if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING | ++ SNDRV_TIMER_IFLG_START)) { ++ result = -EBUSY; ++ goto unlock; ++ } + timeri->ticks = timeri->cticks = ticks; + timeri->pticks = 0; + result = snd_timer_start1(timer, timeri, ticks); ++ unlock: + spin_unlock_irqrestore(&timer->lock, flags); +- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); ++ if (result >= 0) ++ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); + return result; + } + +-static int _snd_timer_stop(struct snd_timer_instance * timeri, +- int keep_flag, int event) ++static int _snd_timer_stop(struct snd_timer_instance *timeri, int event) + { + struct snd_timer *timer; + unsigned long flags; +@@ -493,19 +503,30 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri, + return -ENXIO; + + if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { +- if (!keep_flag) { +- spin_lock_irqsave(&slave_active_lock, flags); +- timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; +- list_del_init(&timeri->ack_list); +- list_del_init(&timeri->active_list); ++ spin_lock_irqsave(&slave_active_lock, flags); ++ if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) { + spin_unlock_irqrestore(&slave_active_lock, flags); ++ return -EBUSY; + } ++ if (timeri->timer) ++ spin_lock(&timeri->timer->lock); ++ timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; ++ list_del_init(&timeri->ack_list); ++ list_del_init(&timeri->active_list); ++ if (timeri->timer) ++ spin_unlock(&timeri->timer->lock); ++ spin_unlock_irqrestore(&slave_active_lock, flags); + goto __end; + } + timer = timeri->timer; + if (!timer) + return -EINVAL; + spin_lock_irqsave(&timer->lock, flags); ++ if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING | ++ SNDRV_TIMER_IFLG_START))) { ++ spin_unlock_irqrestore(&timer->lock, flags); ++ return -EBUSY; ++ } + list_del_init(&timeri->ack_list); + list_del_init(&timeri->active_list); + if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) && +@@ -520,9 +541,7 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri, + } + } + } +- if (!keep_flag) +- timeri->flags &= +- ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START); ++ timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START); + spin_unlock_irqrestore(&timer->lock, flags); + __end: + if (event != SNDRV_TIMER_EVENT_RESOLUTION) +@@ -541,7 +560,7 @@ int snd_timer_stop(struct snd_timer_instance *timeri) + unsigned long flags; + int err; + +- err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP); ++ err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP); + if (err < 0) + return err; + timer = timeri->timer; +@@ -571,10 +590,15 @@ int snd_timer_continue(struct snd_timer_instance *timeri) + if (! timer) + return -EINVAL; + spin_lock_irqsave(&timer->lock, flags); ++ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) { ++ result = -EBUSY; ++ goto unlock; ++ } + if (!timeri->cticks) + timeri->cticks = 1; + timeri->pticks = 0; + result = snd_timer_start1(timer, timeri, timer->sticks); ++ unlock: + spin_unlock_irqrestore(&timer->lock, flags); + snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE); + return result; +@@ -585,7 +609,7 @@ int snd_timer_continue(struct snd_timer_instance *timeri) + */ + int snd_timer_pause(struct snd_timer_instance * timeri) + { +- return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE); ++ return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE); + } + + /* +@@ -702,8 +726,8 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left) + ti->cticks = ti->ticks; + } else { + ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING; +- if (--timer->running) +- list_del_init(&ti->active_list); ++ --timer->running; ++ list_del_init(&ti->active_list); + } + if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) || + (ti->flags & SNDRV_TIMER_IFLG_FAST)) +diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c +index fd798f753609..982a2c2faf24 100644 +--- a/sound/drivers/dummy.c ++++ b/sound/drivers/dummy.c +@@ -109,6 +109,9 @@ struct dummy_timer_ops { + snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *); + }; + ++#define get_dummy_ops(substream) \ ++ (*(const struct dummy_timer_ops **)(substream)->runtime->private_data) ++ + struct dummy_model { + const char *name; + int (*playback_constraints)(struct snd_pcm_runtime *runtime); +@@ -137,7 +140,6 @@ struct snd_dummy { + int iobox; + struct snd_kcontrol *cd_volume_ctl; + struct snd_kcontrol *cd_switch_ctl; +- const struct dummy_timer_ops *timer_ops; + }; + + /* +@@ -231,6 +233,8 @@ struct dummy_model *dummy_models[] = { + */ + + struct dummy_systimer_pcm { ++ /* ops must be the first item */ ++ const struct dummy_timer_ops *timer_ops; + spinlock_t lock; + struct timer_list timer; + unsigned long base_time; +@@ -368,6 +372,8 @@ static struct dummy_timer_ops dummy_systimer_ops = { + */ + + struct dummy_hrtimer_pcm { ++ /* ops must be the first item */ ++ const struct dummy_timer_ops *timer_ops; + ktime_t base_time; + ktime_t period_time; + atomic_t running; +@@ -494,31 +500,25 @@ static struct dummy_timer_ops dummy_hrtimer_ops = { + + static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd) + { +- struct snd_dummy *dummy = snd_pcm_substream_chip(substream); +- + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: +- return dummy->timer_ops->start(substream); ++ return get_dummy_ops(substream)->start(substream); + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: +- return dummy->timer_ops->stop(substream); ++ return get_dummy_ops(substream)->stop(substream); + } + return -EINVAL; + } + + static int dummy_pcm_prepare(struct snd_pcm_substream *substream) + { +- struct snd_dummy *dummy = snd_pcm_substream_chip(substream); +- +- return dummy->timer_ops->prepare(substream); ++ return get_dummy_ops(substream)->prepare(substream); + } + + static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream) + { +- struct snd_dummy *dummy = snd_pcm_substream_chip(substream); +- +- return dummy->timer_ops->pointer(substream); ++ return get_dummy_ops(substream)->pointer(substream); + } + + static struct snd_pcm_hardware dummy_pcm_hardware = { +@@ -564,17 +564,19 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream) + struct snd_dummy *dummy = snd_pcm_substream_chip(substream); + struct dummy_model *model = dummy->model; + struct snd_pcm_runtime *runtime = substream->runtime; ++ const struct dummy_timer_ops *ops; + int err; + +- dummy->timer_ops = &dummy_systimer_ops; ++ ops = &dummy_systimer_ops; + #ifdef CONFIG_HIGH_RES_TIMERS + if (hrtimer) +- dummy->timer_ops = &dummy_hrtimer_ops; ++ ops = &dummy_hrtimer_ops; + #endif + +- err = dummy->timer_ops->create(substream); ++ err = ops->create(substream); + if (err < 0) + return err; ++ get_dummy_ops(substream) = ops; + + runtime->hw = dummy->pcm_hw; + if (substream->pcm->device & 1) { +@@ -596,7 +598,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream) + err = model->capture_constraints(substream->runtime); + } + if (err < 0) { +- dummy->timer_ops->free(substream); ++ get_dummy_ops(substream)->free(substream); + return err; + } + return 0; +@@ -604,8 +606,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream) + + static int dummy_pcm_close(struct snd_pcm_substream *substream) + { +- struct snd_dummy *dummy = snd_pcm_substream_chip(substream); +- dummy->timer_ops->free(substream); ++ get_dummy_ops(substream)->free(substream); + return 0; + } + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 69a2aafb0b0f..babbf238a648 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -2188,6 +2188,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), ++ SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), + + /* All Apple entries are in codec SSIDs */ + SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 02d26915b61d..c6d408c819b1 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -1248,7 +1248,8 @@ static int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream) + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) && +- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP)) ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) && ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND)) + continue; + + dev_dbg(be->dev, "ASoC: hw_free BE %s\n", +diff --git a/sound/usb/midi.c b/sound/usb/midi.c +index dabbe05d17f5..37ecba340876 100644 +--- a/sound/usb/midi.c ++++ b/sound/usb/midi.c +@@ -2291,7 +2291,6 @@ int snd_usbmidi_create(struct snd_card *card, + else + err = snd_usbmidi_create_endpoints(umidi, endpoints); + if (err < 0) { +- snd_usbmidi_free(umidi); + return err; + } + +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index 94cd28c2bd8d..44550a4cf893 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -900,8 +900,12 @@ void snd_usb_set_interface_quirk(struct usb_device *dev) + * "Playback Design" products need a 50ms delay after setting the + * USB interface. + */ +- if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) ++ switch (le16_to_cpu(dev->descriptor.idVendor)) { ++ case 0x23ba: /* Playback Design */ ++ case 0x0644: /* TEAC Corp. */ + mdelay(50); ++ break; ++ } + } + + void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, +@@ -916,6 +920,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) + mdelay(20); + ++ /* ++ * "TEAC Corp." products need a 20ms delay after each ++ * class compliant request ++ */ ++ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x0644) && ++ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) ++ mdelay(20); ++ + /* Marantz/Denon devices with USB DAC functionality need a delay + * after each class compliant request + */ diff --git a/patch/kernel/marvell-default/patch-3.10.97-98.patch b/patch/kernel/marvell-default/patch-3.10.97-98.patch new file mode 100644 index 000000000..332f8fede --- /dev/null +++ b/patch/kernel/marvell-default/patch-3.10.97-98.patch @@ -0,0 +1,1792 @@ +diff --git a/Makefile b/Makefile +index f26470169c70..dadd1edc6f84 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 97 ++SUBLEVEL = 98 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c +index 2dc6da70ae59..d7ed252708c5 100644 +--- a/arch/arm/common/icst.c ++++ b/arch/arm/common/icst.c +@@ -16,7 +16,7 @@ + */ + #include + #include +- ++#include + #include + + /* +@@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div); + + unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco) + { +- return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]); ++ u64 dividend = p->ref * 2 * (u64)(vco.v + 8); ++ u32 divisor = (vco.r + 2) * p->s2div[vco.s]; ++ ++ do_div(dividend, divisor); ++ return (unsigned long)dividend; + } + + EXPORT_SYMBOL(icst_hz); +@@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq) + + if (f > p->vco_min && f <= p->vco_max) + break; ++ i++; + } while (i < 8); + + if (i >= 8) +diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c +index 0392112a5d70..a5ecef7188ba 100644 +--- a/arch/m32r/kernel/setup.c ++++ b/arch/m32r/kernel/setup.c +@@ -81,7 +81,10 @@ static struct resource code_resource = { + }; + + unsigned long memory_start; ++EXPORT_SYMBOL(memory_start); ++ + unsigned long memory_end; ++EXPORT_SYMBOL(memory_end); + + void __init setup_arch(char **); + int get_cpuinfo(char *); +diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h +index c48a95035a77..4dde707a6ff7 100644 +--- a/arch/x86/include/asm/segment.h ++++ b/arch/x86/include/asm/segment.h +@@ -212,8 +212,19 @@ + #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) + + #ifdef __KERNEL__ ++ ++/* ++ * early_idt_handler_array is an array of entry points referenced in the ++ * early IDT. For simplicity, it's a real array with one entry point ++ * every nine bytes. That leaves room for an optional 'push $0' if the ++ * vector has no error code (two bytes), a 'push $vector_number' (two ++ * bytes), and a jump to the common entry code (up to five bytes). ++ */ ++#define EARLY_IDT_HANDLER_SIZE 9 ++ + #ifndef __ASSEMBLY__ +-extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; ++ ++extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; + + /* + * Load a segment. Fall back on loading the zero +diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c +index 55b67614ed94..3b861b7661ee 100644 +--- a/arch/x86/kernel/head64.c ++++ b/arch/x86/kernel/head64.c +@@ -162,7 +162,7 @@ void __init x86_64_start_kernel(char * real_mode_data) + clear_bss(); + + for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) +- set_intr_gate(i, &early_idt_handlers[i]); ++ set_intr_gate(i, &early_idt_handler_array[i]); + load_idt((const struct desc_ptr *)&idt_descr); + + copy_bootdata(__va(real_mode_data)); +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S +index df63cae573e0..8060c8b95b3a 100644 +--- a/arch/x86/kernel/head_32.S ++++ b/arch/x86/kernel/head_32.S +@@ -499,21 +499,22 @@ check_x87: + __INIT + setup_once: + /* +- * Set up a idt with 256 entries pointing to ignore_int, +- * interrupt gates. It doesn't actually load idt - that needs +- * to be done on each CPU. Interrupts are enabled elsewhere, +- * when we can be relatively sure everything is ok. ++ * Set up a idt with 256 interrupt gates that push zero if there ++ * is no error code and then jump to early_idt_handler_common. ++ * It doesn't actually load the idt - that needs to be done on ++ * each CPU. Interrupts are enabled elsewhere, when we can be ++ * relatively sure everything is ok. + */ + + movl $idt_table,%edi +- movl $early_idt_handlers,%eax ++ movl $early_idt_handler_array,%eax + movl $NUM_EXCEPTION_VECTORS,%ecx + 1: + movl %eax,(%edi) + movl %eax,4(%edi) + /* interrupt gate, dpl=0, present */ + movl $(0x8E000000 + __KERNEL_CS),2(%edi) +- addl $9,%eax ++ addl $EARLY_IDT_HANDLER_SIZE,%eax + addl $8,%edi + loop 1b + +@@ -545,26 +546,28 @@ setup_once: + andl $0,setup_once_ref /* Once is enough, thanks */ + ret + +-ENTRY(early_idt_handlers) ++ENTRY(early_idt_handler_array) + # 36(%esp) %eflags + # 32(%esp) %cs + # 28(%esp) %eip + # 24(%rsp) error code + i = 0 + .rept NUM_EXCEPTION_VECTORS +- .if (EXCEPTION_ERRCODE_MASK >> i) & 1 +- ASM_NOP2 +- .else ++ .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 + pushl $0 # Dummy error code, to make stack frame uniform + .endif + pushl $i # 20(%esp) Vector number +- jmp early_idt_handler ++ jmp early_idt_handler_common + i = i + 1 ++ .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc + .endr +-ENDPROC(early_idt_handlers) ++ENDPROC(early_idt_handler_array) + +- /* This is global to keep gas from relaxing the jumps */ +-ENTRY(early_idt_handler) ++early_idt_handler_common: ++ /* ++ * The stack is the hardware frame, an error code or zero, and the ++ * vector number. ++ */ + cld + + cmpl $2,(%esp) # X86_TRAP_NMI +@@ -624,7 +627,7 @@ ex_entry: + is_nmi: + addl $8,%esp /* drop vector number and error code */ + iret +-ENDPROC(early_idt_handler) ++ENDPROC(early_idt_handler_common) + + /* This is the default interrupt "handler" :-) */ + ALIGN +diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S +index 3280489905a8..54bf9c2d0d13 100644 +--- a/arch/x86/kernel/head_64.S ++++ b/arch/x86/kernel/head_64.S +@@ -329,26 +329,28 @@ bad_address: + jmp bad_address + + __INIT +- .globl early_idt_handlers +-early_idt_handlers: ++ENTRY(early_idt_handler_array) + # 104(%rsp) %rflags + # 96(%rsp) %cs + # 88(%rsp) %rip + # 80(%rsp) error code + i = 0 + .rept NUM_EXCEPTION_VECTORS +- .if (EXCEPTION_ERRCODE_MASK >> i) & 1 +- ASM_NOP2 +- .else ++ .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 + pushq $0 # Dummy error code, to make stack frame uniform + .endif + pushq $i # 72(%rsp) Vector number +- jmp early_idt_handler ++ jmp early_idt_handler_common + i = i + 1 ++ .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc + .endr ++ENDPROC(early_idt_handler_array) + +-/* This is global to keep gas from relaxing the jumps */ +-ENTRY(early_idt_handler) ++early_idt_handler_common: ++ /* ++ * The stack is the hardware frame, an error code or zero, and the ++ * vector number. ++ */ + cld + + cmpl $2,(%rsp) # X86_TRAP_NMI +@@ -420,7 +422,7 @@ ENTRY(early_idt_handler) + is_nmi: + addq $16,%rsp # drop vector number and error code + INTERRUPT_RETURN +-ENDPROC(early_idt_handler) ++ENDPROC(early_idt_handler_common) + + __INITDATA + +diff --git a/block/blk-core.c b/block/blk-core.c +index 5a750b18172e..9ae84ae05e6a 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -3097,6 +3097,9 @@ int blk_pre_runtime_suspend(struct request_queue *q) + { + int ret = 0; + ++ if (!q->dev) ++ return ret; ++ + spin_lock_irq(q->queue_lock); + if (q->nr_pending) { + ret = -EBUSY; +@@ -3124,6 +3127,9 @@ EXPORT_SYMBOL(blk_pre_runtime_suspend); + */ + void blk_post_runtime_suspend(struct request_queue *q, int err) + { ++ if (!q->dev) ++ return; ++ + spin_lock_irq(q->queue_lock); + if (!err) { + q->rpm_status = RPM_SUSPENDED; +@@ -3148,6 +3154,9 @@ EXPORT_SYMBOL(blk_post_runtime_suspend); + */ + void blk_pre_runtime_resume(struct request_queue *q) + { ++ if (!q->dev) ++ return; ++ + spin_lock_irq(q->queue_lock); + q->rpm_status = RPM_RESUMING; + spin_unlock_irq(q->queue_lock); +@@ -3170,6 +3179,9 @@ EXPORT_SYMBOL(blk_pre_runtime_resume); + */ + void blk_post_runtime_resume(struct request_queue *q, int err) + { ++ if (!q->dev) ++ return; ++ + spin_lock_irq(q->queue_lock); + if (!err) { + q->rpm_status = RPM_ACTIVE; +diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c +index 334e31ff7a4e..6bd0c1ade9f2 100644 +--- a/drivers/iio/adc/ad7793.c ++++ b/drivers/iio/adc/ad7793.c +@@ -101,7 +101,7 @@ + #define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ + + /* ID Register Bit Designations (AD7793_REG_ID) */ +-#define AD7785_ID 0xB ++#define AD7785_ID 0x3 + #define AD7792_ID 0xA + #define AD7793_ID 0xB + #define AD7794_ID 0xF +diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c +index aa26d50ab638..4eda4ea037b7 100644 +--- a/drivers/iio/dac/ad5064.c ++++ b/drivers/iio/dac/ad5064.c +@@ -602,10 +602,16 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd, + unsigned int addr, unsigned int val) + { + struct i2c_client *i2c = to_i2c_client(st->dev); ++ int ret; + + st->data.i2c[0] = (cmd << 4) | addr; + put_unaligned_be16(val, &st->data.i2c[1]); +- return i2c_master_send(i2c, st->data.i2c, 3); ++ ++ ret = i2c_master_send(i2c, st->data.i2c, 3); ++ if (ret < 0) ++ return ret; ++ ++ return 0; + } + + static int ad5064_i2c_probe(struct i2c_client *i2c, +diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c +index a612ec766d96..029207bbf03d 100644 +--- a/drivers/iio/dac/mcp4725.c ++++ b/drivers/iio/dac/mcp4725.c +@@ -166,6 +166,7 @@ static int mcp4725_probe(struct i2c_client *client, + data->client = client; + + indio_dev->dev.parent = &client->dev; ++ indio_dev->name = id->name; + indio_dev->info = &mcp4725_info; + indio_dev->channels = &mcp4725_channel; + indio_dev->num_channels = 1; +diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c +index 99d8e0b0dd34..d0538bcdc1b8 100644 +--- a/drivers/iio/imu/adis_buffer.c ++++ b/drivers/iio/imu/adis_buffer.c +@@ -43,7 +43,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, + return -ENOMEM; + + rx = adis->buffer; +- tx = rx + indio_dev->scan_bytes; ++ tx = rx + scan_count; + + spi_message_init(&adis->msg); + +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c +index 02099afb6c79..77f06d001a66 100644 +--- a/drivers/input/mouse/elantech.c ++++ b/drivers/input/mouse/elantech.c +@@ -1081,7 +1081,7 @@ static int elantech_set_input_params(struct psmouse *psmouse) + input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2, + ETP_WMAX_V2, 0, 0); + } +- input_mt_init_slots(dev, 2, 0); ++ input_mt_init_slots(dev, 2, INPUT_MT_SEMI_MT); + input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); + input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); + break; +@@ -1357,6 +1357,13 @@ static const struct dmi_system_id no_hw_res_dmi_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "U2442"), + }, + }, ++ { ++ /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"), ++ }, ++ }, + #endif + { } + }; +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 4de2571938b8..5102b4f68f18 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -258,6 +258,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { + }, + }, + { ++ /* Fujitsu Lifebook U745 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"), ++ }, ++ }, ++ { + /* Fujitsu T70H */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c +index a7967ceb79e6..3d4622cae2cf 100644 +--- a/drivers/iommu/dmar.c ++++ b/drivers/iommu/dmar.c +@@ -968,7 +968,7 @@ void dmar_disable_qi(struct intel_iommu *iommu) + + raw_spin_lock_irqsave(&iommu->register_lock, flags); + +- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); ++ sts = readl(iommu->reg + DMAR_GSTS_REG); + if (!(sts & DMA_GSTS_QIES)) + goto end; + +diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c +index 45011f63ad16..990cc298824a 100644 +--- a/drivers/iommu/intel_irq_remapping.c ++++ b/drivers/iommu/intel_irq_remapping.c +@@ -495,7 +495,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu) + + raw_spin_lock_irqsave(&iommu->register_lock, flags); + +- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); ++ sts = readl(iommu->reg + DMAR_GSTS_REG); + if (!(sts & DMA_GSTS_IRES)) + goto end; + +diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c +index 5895f1978691..e98de425f8e0 100644 +--- a/drivers/net/wan/x25_asy.c ++++ b/drivers/net/wan/x25_asy.c +@@ -545,16 +545,12 @@ static void x25_asy_receive_buf(struct tty_struct *tty, + + static int x25_asy_open_tty(struct tty_struct *tty) + { +- struct x25_asy *sl = tty->disc_data; ++ struct x25_asy *sl; + int err; + + if (tty->ops->write == NULL) + return -EOPNOTSUPP; + +- /* First make sure we're not already connected. */ +- if (sl && sl->magic == X25_ASY_MAGIC) +- return -EEXIST; +- + /* OK. Find a free X.25 channel to use. */ + sl = x25_asy_alloc(); + if (sl == NULL) +diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c +index 02bc5a6343c3..aa454241489c 100644 +--- a/drivers/platform/x86/intel_scu_ipcutil.c ++++ b/drivers/platform/x86/intel_scu_ipcutil.c +@@ -49,7 +49,7 @@ struct scu_ipc_data { + + static int scu_reg_access(u32 cmd, struct scu_ipc_data *data) + { +- int count = data->count; ++ unsigned int count = data->count; + + if (count == 0 || count == 3 || count > 4) + return -EINVAL; +diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c +index 69c915aa77c2..d661fcda1932 100644 +--- a/drivers/scsi/device_handler/scsi_dh_rdac.c ++++ b/drivers/scsi/device_handler/scsi_dh_rdac.c +@@ -569,7 +569,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev, + /* + * Command Lock contention + */ +- err = SCSI_DH_RETRY; ++ err = SCSI_DH_IMM_RETRY; + break; + default: + break; +@@ -619,6 +619,8 @@ retry: + err = mode_select_handle_sense(sdev, h->sense); + if (err == SCSI_DH_RETRY && retry_cnt--) + goto retry; ++ if (err == SCSI_DH_IMM_RETRY) ++ goto retry; + } + if (err == SCSI_DH_OK) { + h->state = RDAC_STATE_ACTIVE; +diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c +index 3cafe0d784b8..3020f1ff4abb 100644 +--- a/drivers/scsi/hosts.c ++++ b/drivers/scsi/hosts.c +@@ -305,6 +305,17 @@ static void scsi_host_dev_release(struct device *dev) + kfree(queuedata); + } + ++ if (shost->shost_state == SHOST_CREATED) { ++ /* ++ * Free the shost_dev device name here if scsi_host_alloc() ++ * and scsi_host_put() have been called but neither ++ * scsi_host_add() nor scsi_host_remove() has been called. ++ * This avoids that the memory allocated for the shost_dev ++ * name is leaked. ++ */ ++ kfree(dev_name(&shost->shost_dev)); ++ } ++ + scsi_destroy_command_freelist(shost); + if (shost->bqt) + blk_free_tags(shost->bqt); +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index 9e2dd478dd15..135d7b56fbe6 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -789,7 +789,7 @@ sdev_store_queue_ramp_up_period(struct device *dev, + return -EINVAL; + + sdev->queue_ramp_up_period = msecs_to_jiffies(period); +- return period; ++ return count; + } + + static struct device_attribute sdev_attr_queue_ramp_up_period = +@@ -1030,31 +1030,25 @@ static void __scsi_remove_target(struct scsi_target *starget) + void scsi_remove_target(struct device *dev) + { + struct Scsi_Host *shost = dev_to_shost(dev->parent); +- struct scsi_target *starget, *last = NULL; ++ struct scsi_target *starget, *last_target = NULL; + unsigned long flags; + +- /* remove targets being careful to lookup next entry before +- * deleting the last +- */ ++restart: + spin_lock_irqsave(shost->host_lock, flags); + list_for_each_entry(starget, &shost->__targets, siblings) { +- if (starget->state == STARGET_DEL) ++ if (starget->state == STARGET_DEL || ++ starget == last_target) + continue; + if (starget->dev.parent == dev || &starget->dev == dev) { +- /* assuming new targets arrive at the end */ + kref_get(&starget->reap_ref); ++ last_target = starget; + spin_unlock_irqrestore(shost->host_lock, flags); +- if (last) +- scsi_target_reap(last); +- last = starget; + __scsi_remove_target(starget); +- spin_lock_irqsave(shost->host_lock, flags); ++ scsi_target_reap(starget); ++ goto restart; + } + } + spin_unlock_irqrestore(shost->host_lock, flags); +- +- if (last) +- scsi_target_reap(last); + } + EXPORT_SYMBOL(scsi_remove_target); + +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index 26b543bc4f53..4afce0e838a2 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -3090,8 +3090,8 @@ static int sd_suspend(struct device *dev) + struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); + int ret = 0; + +- if (!sdkp) +- return 0; /* this can happen */ ++ if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ ++ return 0; + + if (sdkp->WCE) { + sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); +@@ -3115,6 +3115,9 @@ static int sd_resume(struct device *dev) + struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); + int ret = 0; + ++ if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ ++ return 0; ++ + if (!sdkp->device->manage_start_stop) + goto done; + +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 721d839d6c54..0be16bf5f0cd 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -1258,7 +1258,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) + } + + sfp->mmap_called = 1; +- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; ++ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_private_data = sfp; + vma->vm_ops = &sg_mmap_vm_ops; + return 0; +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c +index 119d67f9c47e..1ac9943cbb93 100644 +--- a/drivers/scsi/sr.c ++++ b/drivers/scsi/sr.c +@@ -142,6 +142,9 @@ static int sr_runtime_suspend(struct device *dev) + { + struct scsi_cd *cd = dev_get_drvdata(dev); + ++ if (!cd) /* E.g.: runtime suspend following sr_remove() */ ++ return 0; ++ + if (cd->media_present) + return -EBUSY; + else +@@ -1006,6 +1009,7 @@ static int sr_remove(struct device *dev) + + blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn); + del_gendisk(cd->disk); ++ dev_set_drvdata(dev, NULL); + + mutex_lock(&sr_ref_mutex); + kref_put(&cd->kref, sr_kref_release); +diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c +index 2f2f7fdd0691..9cbe2dd70499 100644 +--- a/drivers/staging/iio/adc/lpc32xx_adc.c ++++ b/drivers/staging/iio/adc/lpc32xx_adc.c +@@ -76,7 +76,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev, + + if (mask == IIO_CHAN_INFO_RAW) { + mutex_lock(&indio_dev->mlock); +- clk_enable(info->clk); ++ clk_prepare_enable(info->clk); + /* Measurement setup */ + __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm, + LPC32XX_ADC_SELECT(info->adc_base)); +@@ -84,7 +84,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev, + __raw_writel(AD_PDN_CTRL | AD_STROBE, + LPC32XX_ADC_CTRL(info->adc_base)); + wait_for_completion(&info->completion); /* set by ISR */ +- clk_disable(info->clk); ++ clk_disable_unprepare(info->clk); + *val = info->value; + mutex_unlock(&indio_dev->mlock); + +diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c +index b9359753784e..364978e63d8d 100644 +--- a/drivers/staging/speakup/selection.c ++++ b/drivers/staging/speakup/selection.c +@@ -139,7 +139,9 @@ static void __speakup_paste_selection(struct work_struct *work) + struct tty_ldisc *ld; + DECLARE_WAITQUEUE(wait, current); + +- ld = tty_ldisc_ref_wait(tty); ++ ld = tty_ldisc_ref(tty); ++ if (!ld) ++ goto tty_unref; + + /* FIXME: this is completely unsafe */ + add_wait_queue(&vc->paste_wait, &wait); +@@ -158,6 +160,7 @@ static void __speakup_paste_selection(struct work_struct *work) + current->state = TASK_RUNNING; + + tty_ldisc_deref(ld); ++tty_unref: + tty_kref_put(tty); + } + +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 06cd916f91fe..d74da9598d58 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -3960,6 +3960,17 @@ reject: + return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + } + ++static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) ++{ ++ bool ret; ++ ++ spin_lock_bh(&conn->state_lock); ++ ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN); ++ spin_unlock_bh(&conn->state_lock); ++ ++ return ret; ++} ++ + int iscsi_target_rx_thread(void *arg) + { + int ret, rc; +@@ -3977,7 +3988,7 @@ int iscsi_target_rx_thread(void *arg) + * incoming iscsi/tcp socket I/O, and/or failing the connection. + */ + rc = wait_for_completion_interruptible(&conn->rx_login_comp); +- if (rc < 0) ++ if (rc < 0 || iscsi_target_check_conn_state(conn)) + return 0; + + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { +diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c +index c45b3365d63d..200d779d0c03 100644 +--- a/drivers/target/iscsi/iscsi_target_configfs.c ++++ b/drivers/target/iscsi/iscsi_target_configfs.c +@@ -1730,7 +1730,8 @@ static void lio_tpg_release_fabric_acl( + } + + /* +- * Called with spin_lock_bh(struct se_portal_group->session_lock) held.. ++ * Called with spin_lock_irq(struct se_portal_group->session_lock) held ++ * or not held. + * + * Also, this function calls iscsit_inc_session_usage_count() on the + * struct iscsi_session in question. +@@ -1738,19 +1739,32 @@ static void lio_tpg_release_fabric_acl( + static int lio_tpg_shutdown_session(struct se_session *se_sess) + { + struct iscsi_session *sess = se_sess->fabric_sess_ptr; ++ struct se_portal_group *se_tpg = se_sess->se_tpg; ++ bool local_lock = false; ++ ++ if (!spin_is_locked(&se_tpg->session_lock)) { ++ spin_lock_irq(&se_tpg->session_lock); ++ local_lock = true; ++ } + + spin_lock(&sess->conn_lock); + if (atomic_read(&sess->session_fall_back_to_erl0) || + atomic_read(&sess->session_logout) || + (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { + spin_unlock(&sess->conn_lock); ++ if (local_lock) ++ spin_unlock_irq(&sess->conn_lock); + return 0; + } + atomic_set(&sess->session_reinstatement, 1); + spin_unlock(&sess->conn_lock); + + iscsit_stop_time2retain_timer(sess); ++ spin_unlock_irq(&se_tpg->session_lock); ++ + iscsit_stop_session(sess, 1, 1); ++ if (!local_lock) ++ spin_lock_irq(&se_tpg->session_lock); + + return 1; + } +diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c +index 77c276acccb6..2a61a01142e9 100644 +--- a/drivers/target/iscsi/iscsi_target_nego.c ++++ b/drivers/target/iscsi/iscsi_target_nego.c +@@ -384,6 +384,7 @@ err: + if (login->login_complete) { + if (conn->rx_thread && conn->rx_thread_active) { + send_sig(SIGINT, conn->rx_thread, 1); ++ complete(&conn->rx_login_comp); + kthread_stop(conn->rx_thread); + } + if (conn->tx_thread && conn->tx_thread_active) { +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index 7cb36813aac2..deee2b81afff 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -623,7 +623,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) + /* this is called once with whichever end is closed last */ + static void pty_unix98_shutdown(struct tty_struct *tty) + { +- devpts_kill_index(tty->driver_data, tty->index); ++ struct inode *ptmx_inode; ++ ++ if (tty->driver->subtype == PTY_TYPE_MASTER) ++ ptmx_inode = tty->driver_data; ++ else ++ ptmx_inode = tty->link->driver_data; ++ devpts_kill_index(ptmx_inode, tty->index); ++ devpts_del_ref(ptmx_inode); + } + + static const struct tty_operations ptm_unix98_ops = { +@@ -714,6 +721,18 @@ static int ptmx_open(struct inode *inode, struct file *filp) + set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ + tty->driver_data = inode; + ++ /* ++ * In the case where all references to ptmx inode are dropped and we ++ * still have /dev/tty opened pointing to the master/slave pair (ptmx ++ * is closed/released before /dev/tty), we must make sure that the inode ++ * is still valid when we call the final pty_unix98_shutdown, thus we ++ * hold an additional reference to the ptmx inode. For the same /dev/tty ++ * last close case, we also need to make sure the super_block isn't ++ * destroyed (devpts instance unmounted), before /dev/tty is closed and ++ * on its release devpts_kill_index is called. ++ */ ++ devpts_add_ref(inode); ++ + tty_add_file(tty, filp); + + slave_inode = devpts_pty_new(inode, +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 9dd6fa3a1260..507677b9bdc7 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1502,7 +1502,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) + if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { + xhci_dbg(xhci, "HW died, freeing TD.\n"); + urb_priv = urb->hcpriv; +- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { ++ for (i = urb_priv->td_cnt; ++ i < urb_priv->length && xhci->devs[urb->dev->slot_id]; ++ i++) { + td = urb_priv->td[i]; + if (!list_empty(&td->td_list)) + list_del_init(&td->td_list); +diff --git a/fs/aio.c b/fs/aio.c +index ded94c4fa30d..9798d4edfd8f 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -977,12 +977,17 @@ static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat) + + static ssize_t aio_setup_single_vector(int rw, struct kiocb *kiocb) + { +- if (unlikely(!access_ok(!rw, kiocb->ki_buf, kiocb->ki_nbytes))) +- return -EFAULT; ++ size_t len = kiocb->ki_nbytes; ++ ++ if (len > MAX_RW_COUNT) ++ len = MAX_RW_COUNT; ++ ++ if (unlikely(!access_ok(!rw, kiocb->ki_buf, len))) ++ return -EFAULT; + + kiocb->ki_iovec = &kiocb->ki_inline_vec; + kiocb->ki_iovec->iov_base = kiocb->ki_buf; +- kiocb->ki_iovec->iov_len = kiocb->ki_nbytes; ++ kiocb->ki_iovec->iov_len = len; + kiocb->ki_nr_segs = 1; + return 0; + } +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c +index d85f90c92bb4..bca854b44056 100644 +--- a/fs/btrfs/backref.c ++++ b/fs/btrfs/backref.c +@@ -1228,7 +1228,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, + read_extent_buffer(eb, dest + bytes_left, + name_off, name_len); + if (eb != eb_in) { +- btrfs_tree_read_unlock_blocking(eb); ++ if (!path->skip_locking) ++ btrfs_tree_read_unlock_blocking(eb); + free_extent_buffer(eb); + } + ret = inode_ref_info(parent, 0, fs_root, path, &found_key); +@@ -1247,9 +1248,10 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, + eb = path->nodes[0]; + /* make sure we can use eb after releasing the path */ + if (eb != eb_in) { +- atomic_inc(&eb->refs); +- btrfs_tree_read_lock(eb); +- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); ++ if (!path->skip_locking) ++ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); ++ path->nodes[0] = NULL; ++ path->locks[0] = 0; + } + btrfs_release_path(path); + iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); +diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c +index 5c807b23ca67..182e82f22b3a 100644 +--- a/fs/cifs/cifsencrypt.c ++++ b/fs/cifs/cifsencrypt.c +@@ -591,7 +591,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) + + ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL); + if (!ses->auth_key.response) { +- rc = ENOMEM; ++ rc = -ENOMEM; + ses->auth_key.len = 0; + goto setup_ntlmv2_rsp_ret; + } +diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c +index a726b9f29cb7..61af24e379ad 100644 +--- a/fs/devpts/inode.c ++++ b/fs/devpts/inode.c +@@ -564,6 +564,26 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx) + mutex_unlock(&allocated_ptys_lock); + } + ++/* ++ * pty code needs to hold extra references in case of last /dev/tty close ++ */ ++ ++void devpts_add_ref(struct inode *ptmx_inode) ++{ ++ struct super_block *sb = pts_sb_from_inode(ptmx_inode); ++ ++ atomic_inc(&sb->s_active); ++ ihold(ptmx_inode); ++} ++ ++void devpts_del_ref(struct inode *ptmx_inode) ++{ ++ struct super_block *sb = pts_sb_from_inode(ptmx_inode); ++ ++ iput(ptmx_inode); ++ deactivate_super(sb); ++} ++ + /** + * devpts_pty_new -- create a new inode in /dev/pts/ + * @ptmx_inode: inode of the master +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c +index fa7d2e668c3a..cf0a70486618 100644 +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -181,7 +181,7 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size) + if (flex_gd == NULL) + goto out3; + +- if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data)) ++ if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data)) + goto out2; + flex_gd->count = flexbg_size; + +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 4fafb8484bbc..35f604b5f408 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -993,6 +993,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, + + mark_page_accessed(page); + ++ iov_iter_advance(ii, tmp); + if (!tmp) { + unlock_page(page); + page_cache_release(page); +@@ -1005,7 +1006,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, + req->page_descs[req->num_pages].length = tmp; + req->num_pages++; + +- iov_iter_advance(ii, tmp); + count += tmp; + pos += tmp; + offset += tmp; +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 78679b489484..d8ac734a1e44 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -1005,6 +1005,7 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s + * Protect the call to nfs4_state_set_mode_locked and + * serialise the stateid update + */ ++ spin_lock(&state->owner->so_lock); + write_seqlock(&state->seqlock); + if (deleg_stateid != NULL) { + nfs4_stateid_copy(&state->stateid, deleg_stateid); +@@ -1013,7 +1014,6 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s + if (open_stateid != NULL) + nfs_set_open_stateid_locked(state, open_stateid, fmode); + write_sequnlock(&state->seqlock); +- spin_lock(&state->owner->so_lock); + update_open_stateflags(state, fmode); + spin_unlock(&state->owner->so_lock); + } +diff --git a/fs/proc/array.c b/fs/proc/array.c +index 09f0d9c374a3..5c45eb5e4e0d 100644 +--- a/fs/proc/array.c ++++ b/fs/proc/array.c +@@ -398,7 +398,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + + state = *get_task_state(task); + vsize = eip = esp = 0; +- permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT); ++ permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT); + mm = get_task_mm(task); + if (mm) { + vsize = task_vsize(mm); +diff --git a/fs/proc/base.c b/fs/proc/base.c +index 8fc784aef0b8..7b5d453ebf53 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -239,7 +239,7 @@ out: + + static int proc_pid_auxv(struct task_struct *task, char *buffer) + { +- struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ); ++ struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); + int res = PTR_ERR(mm); + if (mm && !IS_ERR(mm)) { + unsigned int nwords = 0; +@@ -269,7 +269,7 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer) + wchan = get_wchan(task); + + if (lookup_symbol_name(wchan, symname) < 0) +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) + return 0; + else + return sprintf(buffer, "%lu", wchan); +@@ -283,7 +283,7 @@ static int lock_trace(struct task_struct *task) + int err = mutex_lock_killable(&task->signal->cred_guard_mutex); + if (err) + return err; +- if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) { ++ if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) { + mutex_unlock(&task->signal->cred_guard_mutex); + return -EPERM; + } +@@ -557,7 +557,7 @@ static int proc_fd_access_allowed(struct inode *inode) + */ + task = get_proc_task(inode); + if (task) { +- allowed = ptrace_may_access(task, PTRACE_MODE_READ); ++ allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); + put_task_struct(task); + } + return allowed; +@@ -592,7 +592,7 @@ static bool has_pid_permissions(struct pid_namespace *pid, + return true; + if (in_group_p(pid->pid_gid)) + return true; +- return ptrace_may_access(task, PTRACE_MODE_READ); ++ return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); + } + + +@@ -707,7 +707,7 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode) + if (!task) + return -ESRCH; + +- mm = mm_access(task, mode); ++ mm = mm_access(task, mode | PTRACE_MODE_FSCREDS); + put_task_struct(task); + + if (IS_ERR(mm)) +@@ -1761,7 +1761,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags) + if (!task) + goto out_notask; + +- mm = mm_access(task, PTRACE_MODE_READ); ++ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); + if (IS_ERR_OR_NULL(mm)) + goto out; + +@@ -1896,7 +1896,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir, + goto out; + + result = ERR_PTR(-EACCES); +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) + goto out_put_task; + + result = ERR_PTR(-ENOENT); +@@ -1952,7 +1952,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir) + goto out; + + ret = -EACCES; +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) + goto out_put_task; + + ret = 0; +@@ -2488,7 +2488,7 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole) + if (result) + return result; + +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) { ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { + result = -EACCES; + goto out_unlock; + } +diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c +index 54bdc6701e9f..ac49a8d4aaf8 100644 +--- a/fs/proc/namespaces.c ++++ b/fs/proc/namespaces.c +@@ -125,7 +125,7 @@ static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd) + if (!task) + goto out; + +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) + goto out_put_task; + + ns_path.dentry = proc_ns_get_dentry(sb, task, ei->ns.ns_ops); +@@ -158,7 +158,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl + if (!task) + goto out; + +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) + goto out_put_task; + + len = -ENOENT; +diff --git a/fs/udf/inode.c b/fs/udf/inode.c +index 789814f27438..5c1120a5fa42 100644 +--- a/fs/udf/inode.c ++++ b/fs/udf/inode.c +@@ -2055,14 +2055,29 @@ void udf_write_aext(struct inode *inode, struct extent_position *epos, + epos->offset += adsize; + } + ++/* ++ * Only 1 indirect extent in a row really makes sense but allow upto 16 in case ++ * someone does some weird stuff. ++ */ ++#define UDF_MAX_INDIR_EXTS 16 ++ + int8_t udf_next_aext(struct inode *inode, struct extent_position *epos, + struct kernel_lb_addr *eloc, uint32_t *elen, int inc) + { + int8_t etype; ++ unsigned int indirections = 0; + + while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == + (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { + int block; ++ ++ if (++indirections > UDF_MAX_INDIR_EXTS) { ++ udf_err(inode->i_sb, ++ "too many indirect extents in inode %lu\n", ++ inode->i_ino); ++ return -1; ++ } ++ + epos->block = *eloc; + epos->offset = sizeof(struct allocExtDesc); + brelse(epos->bh); +diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c +index 44b815e57f94..685fbd8a2937 100644 +--- a/fs/udf/unicode.c ++++ b/fs/udf/unicode.c +@@ -132,11 +132,15 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i) + if (c < 0x80U) + utf_o->u_name[utf_o->u_len++] = (uint8_t)c; + else if (c < 0x800U) { ++ if (utf_o->u_len > (UDF_NAME_LEN - 4)) ++ break; + utf_o->u_name[utf_o->u_len++] = + (uint8_t)(0xc0 | (c >> 6)); + utf_o->u_name[utf_o->u_len++] = + (uint8_t)(0x80 | (c & 0x3f)); + } else { ++ if (utf_o->u_len > (UDF_NAME_LEN - 5)) ++ break; + utf_o->u_name[utf_o->u_len++] = + (uint8_t)(0xe0 | (c >> 12)); + utf_o->u_name[utf_o->u_len++] = +@@ -177,17 +181,22 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i) + static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length) + { + unsigned c, i, max_val, utf_char; +- int utf_cnt, u_len; ++ int utf_cnt, u_len, u_ch; + + memset(ocu, 0, sizeof(dstring) * length); + ocu[0] = 8; + max_val = 0xffU; ++ u_ch = 1; + + try_again: + u_len = 0U; + utf_char = 0U; + utf_cnt = 0U; + for (i = 0U; i < utf->u_len; i++) { ++ /* Name didn't fit? */ ++ if (u_len + 1 + u_ch >= length) ++ return 0; ++ + c = (uint8_t)utf->u_name[i]; + + /* Complete a multi-byte UTF-8 character */ +@@ -229,6 +238,7 @@ try_again: + if (max_val == 0xffU) { + max_val = 0xffffU; + ocu[0] = (uint8_t)0x10U; ++ u_ch = 2; + goto try_again; + } + goto error_out; +@@ -281,7 +291,7 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o, + c = (c << 8) | ocu[i++]; + + len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len], +- UDF_NAME_LEN - utf_o->u_len); ++ UDF_NAME_LEN - 2 - utf_o->u_len); + /* Valid character? */ + if (len >= 0) + utf_o->u_len += len; +@@ -299,15 +309,19 @@ static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni, + int len; + unsigned i, max_val; + uint16_t uni_char; +- int u_len; ++ int u_len, u_ch; + + memset(ocu, 0, sizeof(dstring) * length); + ocu[0] = 8; + max_val = 0xffU; ++ u_ch = 1; + + try_again: + u_len = 0U; + for (i = 0U; i < uni->u_len; i++) { ++ /* Name didn't fit? */ ++ if (u_len + 1 + u_ch >= length) ++ return 0; + len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char); + if (!len) + continue; +@@ -320,6 +334,7 @@ try_again: + if (uni_char > max_val) { + max_val = 0xffffU; + ocu[0] = (uint8_t)0x10U; ++ u_ch = 2; + goto try_again; + } + +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index a2329c5e6206..a2ce6f8871c4 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -131,7 +131,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + */ + #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) + #define __trace_if(cond) \ +- if (__builtin_constant_p((cond)) ? !!(cond) : \ ++ if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ + ({ \ + int ______r; \ + static struct ftrace_branch_data \ +diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h +index 251a2090a554..e0ee0b3000b2 100644 +--- a/include/linux/devpts_fs.h ++++ b/include/linux/devpts_fs.h +@@ -19,6 +19,8 @@ + + int devpts_new_index(struct inode *ptmx_inode); + void devpts_kill_index(struct inode *ptmx_inode, int idx); ++void devpts_add_ref(struct inode *ptmx_inode); ++void devpts_del_ref(struct inode *ptmx_inode); + /* mknod in devpts */ + struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, + void *priv); +@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode); + /* Dummy stubs in the no-pty case */ + static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; } + static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { } ++static inline void devpts_add_ref(struct inode *ptmx_inode) { } ++static inline void devpts_del_ref(struct inode *ptmx_inode) { } + static inline struct inode *devpts_pty_new(struct inode *ptmx_inode, + dev_t device, int index, void *priv) + { +diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h +index bb980ae6d9d3..6af8988f5ddd 100644 +--- a/include/linux/ptrace.h ++++ b/include/linux/ptrace.h +@@ -56,7 +56,29 @@ extern void exit_ptrace(struct task_struct *tracer); + #define PTRACE_MODE_READ 0x01 + #define PTRACE_MODE_ATTACH 0x02 + #define PTRACE_MODE_NOAUDIT 0x04 +-/* Returns true on success, false on denial. */ ++#define PTRACE_MODE_FSCREDS 0x08 ++#define PTRACE_MODE_REALCREDS 0x10 ++ ++/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ ++#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) ++#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) ++#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) ++#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) ++ ++/** ++ * ptrace_may_access - check whether the caller is permitted to access ++ * a target task. ++ * @task: target task ++ * @mode: selects type of access and caller credentials ++ * ++ * Returns true on success, false on denial. ++ * ++ * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must ++ * be set in @mode to specify whether the access was requested through ++ * a filesystem syscall (should use effective capabilities and fsuid ++ * of the caller) or through an explicit syscall such as ++ * process_vm_writev or ptrace (and should use the real credentials). ++ */ + extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); + + static inline int ptrace_reparented(struct task_struct *child) +diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h +index ffc444c38b0a..e02e09f85fad 100644 +--- a/include/linux/radix-tree.h ++++ b/include/linux/radix-tree.h +@@ -322,12 +322,28 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned flags); + + /** ++ * radix_tree_iter_retry - retry this chunk of the iteration ++ * @iter: iterator state ++ * ++ * If we iterate over a tree protected only by the RCU lock, a race ++ * against deletion or creation may result in seeing a slot for which ++ * radix_tree_deref_retry() returns true. If so, call this function ++ * and continue the iteration. ++ */ ++static inline __must_check ++void **radix_tree_iter_retry(struct radix_tree_iter *iter) ++{ ++ iter->next_index = iter->index; ++ return NULL; ++} ++ ++/** + * radix_tree_chunk_size - get current chunk size + * + * @iter: pointer to radix tree iterator + * Returns: current chunk size + */ +-static __always_inline unsigned ++static __always_inline long + radix_tree_chunk_size(struct radix_tree_iter *iter) + { + return iter->next_index - iter->index; +@@ -361,9 +377,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) + return slot + offset + 1; + } + } else { +- unsigned size = radix_tree_chunk_size(iter) - 1; ++ long size = radix_tree_chunk_size(iter); + +- while (size--) { ++ while (--size > 0) { + slot++; + iter->index++; + if (likely(*slot)) +diff --git a/kernel/events/core.c b/kernel/events/core.c +index d9b0aad17dbf..0f5207839673 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -2938,7 +2938,7 @@ find_lively_task_by_vpid(pid_t vpid) + + /* Reuse ptrace permission checks for now. */ + err = -EACCES; +- if (!ptrace_may_access(task, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) + goto errout; + + return task; +@@ -5639,6 +5639,10 @@ static int perf_tp_filter_match(struct perf_event *event, + { + void *record = data->raw->data; + ++ /* only top level events have filters set */ ++ if (event->parent) ++ event = event->parent; ++ + if (likely(!event->filter) || filter_match_preds(event->filter, record)) + return 1; + return 0; +diff --git a/kernel/futex.c b/kernel/futex.c +index 625a4e659e7a..edc4beae4df1 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -2494,6 +2494,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + if (q.pi_state && (q.pi_state->owner != current)) { + spin_lock(q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); ++ /* ++ * Drop the reference to the pi state which ++ * the requeue_pi() code acquired for us. ++ */ ++ free_pi_state(q.pi_state); + spin_unlock(q.lock_ptr); + } + } else { +@@ -2620,7 +2625,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, + } + + ret = -EPERM; +- if (!ptrace_may_access(p, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) + goto err_unlock; + + head = p->robust_list; +diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c +index f9f44fd4d34d..3888617a1f9e 100644 +--- a/kernel/futex_compat.c ++++ b/kernel/futex_compat.c +@@ -155,7 +155,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, + } + + ret = -EPERM; +- if (!ptrace_may_access(p, PTRACE_MODE_READ)) ++ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) + goto err_unlock; + + head = p->compat_robust_list; +diff --git a/kernel/kcmp.c b/kernel/kcmp.c +index 0aa69ea1d8fd..3a47fa998fe0 100644 +--- a/kernel/kcmp.c ++++ b/kernel/kcmp.c +@@ -122,8 +122,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, + &task2->signal->cred_guard_mutex); + if (ret) + goto err; +- if (!ptrace_may_access(task1, PTRACE_MODE_READ) || +- !ptrace_may_access(task2, PTRACE_MODE_READ)) { ++ if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) || ++ !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) { + ret = -EPERM; + goto err_unlock; + } +diff --git a/kernel/module.c b/kernel/module.c +index fd2afdf48a89..70a4754c001f 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -3398,6 +3398,11 @@ static inline int is_arm_mapping_symbol(const char *str) + && (str[2] == '\0' || str[2] == '.'); + } + ++static const char *symname(struct module *mod, unsigned int symnum) ++{ ++ return mod->strtab + mod->symtab[symnum].st_name; ++} ++ + static const char *get_ksymbol(struct module *mod, + unsigned long addr, + unsigned long *size, +@@ -3420,15 +3425,15 @@ static const char *get_ksymbol(struct module *mod, + + /* We ignore unnamed symbols: they're uninformative + * and inserted at a whim. */ ++ if (*symname(mod, i) == '\0' ++ || is_arm_mapping_symbol(symname(mod, i))) ++ continue; ++ + if (mod->symtab[i].st_value <= addr +- && mod->symtab[i].st_value > mod->symtab[best].st_value +- && *(mod->strtab + mod->symtab[i].st_name) != '\0' +- && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) ++ && mod->symtab[i].st_value > mod->symtab[best].st_value) + best = i; + if (mod->symtab[i].st_value > addr +- && mod->symtab[i].st_value < nextval +- && *(mod->strtab + mod->symtab[i].st_name) != '\0' +- && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) ++ && mod->symtab[i].st_value < nextval) + nextval = mod->symtab[i].st_value; + } + +@@ -3439,7 +3444,7 @@ static const char *get_ksymbol(struct module *mod, + *size = nextval - mod->symtab[best].st_value; + if (offset) + *offset = addr - mod->symtab[best].st_value; +- return mod->strtab + mod->symtab[best].st_name; ++ return symname(mod, best); + } + + /* For kallsyms to ask for address resolution. NULL means not found. Careful +@@ -3540,8 +3545,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + if (symnum < mod->num_symtab) { + *value = mod->symtab[symnum].st_value; + *type = mod->symtab[symnum].st_info; +- strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, +- KSYM_NAME_LEN); ++ strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN); + strlcpy(module_name, mod->name, MODULE_NAME_LEN); + *exported = is_exported(name, *value, mod); + preempt_enable(); +@@ -3558,7 +3562,7 @@ static unsigned long mod_find_symname(struct module *mod, const char *name) + unsigned int i; + + for (i = 0; i < mod->num_symtab; i++) +- if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 && ++ if (strcmp(name, symname(mod, i)) == 0 && + mod->symtab[i].st_info != 'U') + return mod->symtab[i].st_value; + return 0; +@@ -3602,7 +3606,7 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + if (mod->state == MODULE_STATE_UNFORMED) + continue; + for (i = 0; i < mod->num_symtab; i++) { +- ret = fn(data, mod->strtab + mod->symtab[i].st_name, ++ ret = fn(data, symname(mod, i), + mod, mod->symtab[i].st_value); + if (ret != 0) + return ret; +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index 30ab20623bca..72b0b3e0e065 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -225,6 +225,14 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) + static int __ptrace_may_access(struct task_struct *task, unsigned int mode) + { + const struct cred *cred = current_cred(), *tcred; ++ int dumpable = 0; ++ kuid_t caller_uid; ++ kgid_t caller_gid; ++ ++ if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) { ++ WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n"); ++ return -EPERM; ++ } + + /* May we inspect the given task? + * This check is used both for attaching with ptrace +@@ -234,18 +242,33 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) + * because setting up the necessary parent/child relationship + * or halting the specified task is impossible. + */ +- int dumpable = 0; ++ + /* Don't let security modules deny introspection */ + if (same_thread_group(task, current)) + return 0; + rcu_read_lock(); ++ if (mode & PTRACE_MODE_FSCREDS) { ++ caller_uid = cred->fsuid; ++ caller_gid = cred->fsgid; ++ } else { ++ /* ++ * Using the euid would make more sense here, but something ++ * in userland might rely on the old behavior, and this ++ * shouldn't be a security problem since ++ * PTRACE_MODE_REALCREDS implies that the caller explicitly ++ * used a syscall that requests access to another process ++ * (and not a filesystem syscall to procfs). ++ */ ++ caller_uid = cred->uid; ++ caller_gid = cred->gid; ++ } + tcred = __task_cred(task); +- if (uid_eq(cred->uid, tcred->euid) && +- uid_eq(cred->uid, tcred->suid) && +- uid_eq(cred->uid, tcred->uid) && +- gid_eq(cred->gid, tcred->egid) && +- gid_eq(cred->gid, tcred->sgid) && +- gid_eq(cred->gid, tcred->gid)) ++ if (uid_eq(caller_uid, tcred->euid) && ++ uid_eq(caller_uid, tcred->suid) && ++ uid_eq(caller_uid, tcred->uid) && ++ gid_eq(caller_gid, tcred->egid) && ++ gid_eq(caller_gid, tcred->sgid) && ++ gid_eq(caller_gid, tcred->gid)) + goto ok; + if (ptrace_has_cap(tcred->user_ns, mode)) + goto ok; +@@ -312,7 +335,7 @@ static int ptrace_attach(struct task_struct *task, long request, + goto out; + + task_lock(task); +- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); ++ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); + task_unlock(task); + if (retval) + goto unlock_creds; +diff --git a/lib/dma-debug.c b/lib/dma-debug.c +index d87a17a819d0..eb43517bf261 100644 +--- a/lib/dma-debug.c ++++ b/lib/dma-debug.c +@@ -962,7 +962,7 @@ static inline bool overlap(void *addr, unsigned long len, void *start, void *end + + static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) + { +- if (overlap(addr, len, _text, _etext) || ++ if (overlap(addr, len, _stext, _etext) || + overlap(addr, len, __start_rodata, __end_rodata)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); + } +diff --git a/lib/klist.c b/lib/klist.c +index 358a368a2947..2e59aecbec0d 100644 +--- a/lib/klist.c ++++ b/lib/klist.c +@@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i, + struct klist_node *n) + { + i->i_klist = k; +- i->i_cur = n; +- if (n) +- kref_get(&n->n_ref); ++ i->i_cur = NULL; ++ if (n && kref_get_unless_zero(&n->n_ref)) ++ i->i_cur = n; + } + EXPORT_SYMBOL_GPL(klist_iter_init_node); + +diff --git a/lib/radix-tree.c b/lib/radix-tree.c +index e7964296fd50..936a02c1c77b 100644 +--- a/lib/radix-tree.c ++++ b/lib/radix-tree.c +@@ -1015,9 +1015,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, + return 0; + + radix_tree_for_each_slot(slot, root, &iter, first_index) { +- results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot)); ++ results[ret] = rcu_dereference_raw(*slot); + if (!results[ret]) + continue; ++ if (radix_tree_is_indirect_ptr(results[ret])) { ++ slot = radix_tree_iter_retry(&iter); ++ continue; ++ } + if (++ret == max_items) + break; + } +@@ -1094,9 +1098,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, + return 0; + + radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { +- results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot)); ++ results[ret] = rcu_dereference_raw(*slot); + if (!results[ret]) + continue; ++ if (radix_tree_is_indirect_ptr(results[ret])) { ++ slot = radix_tree_iter_retry(&iter); ++ continue; ++ } + if (++ret == max_items) + break; + } +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index eaa3accb01e7..437ae2cbe102 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -5790,16 +5790,17 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, + swap_buffers: + /* Swap primary and spare array */ + thresholds->spare = thresholds->primary; +- /* If all events are unregistered, free the spare array */ +- if (!new) { +- kfree(thresholds->spare); +- thresholds->spare = NULL; +- } + + rcu_assign_pointer(thresholds->primary, new); + + /* To be sure that nobody uses thresholds */ + synchronize_rcu(); ++ ++ /* If all events are unregistered, free the spare array */ ++ if (!new) { ++ kfree(thresholds->spare); ++ thresholds->spare = NULL; ++ } + unlock: + mutex_unlock(&memcg->thresholds_lock); + } +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index f97d709594e6..37df20faddd5 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -1472,7 +1472,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) + * Did it turn free? + */ + ret = __get_any_page(page, pfn, 0); +- if (!PageLRU(page)) { ++ if (ret == 1 && !PageLRU(page)) { + /* Drop page reference which is from __get_any_page() */ + put_page(page); + pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index d85d3a0e06ce..7f1bf93fa87f 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -1209,23 +1209,30 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) + */ + static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) + { +- unsigned long pfn; ++ unsigned long pfn, sec_end_pfn; + struct zone *zone = NULL; + struct page *page; + int i; +- for (pfn = start_pfn; ++ for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn); + pfn < end_pfn; +- pfn += MAX_ORDER_NR_PAGES) { +- i = 0; +- /* This is just a CONFIG_HOLES_IN_ZONE check.*/ +- while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) +- i++; +- if (i == MAX_ORDER_NR_PAGES) ++ pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) { ++ /* Make sure the memory section is present first */ ++ if (!present_section_nr(pfn_to_section_nr(pfn))) + continue; +- page = pfn_to_page(pfn + i); +- if (zone && page_zone(page) != zone) +- return 0; +- zone = page_zone(page); ++ for (; pfn < sec_end_pfn && pfn < end_pfn; ++ pfn += MAX_ORDER_NR_PAGES) { ++ i = 0; ++ /* This is just a CONFIG_HOLES_IN_ZONE check.*/ ++ while ((i < MAX_ORDER_NR_PAGES) && ++ !pfn_valid_within(pfn + i)) ++ i++; ++ if (i == MAX_ORDER_NR_PAGES) ++ continue; ++ page = pfn_to_page(pfn + i); ++ if (zone && page_zone(page) != zone) ++ return 0; ++ zone = page_zone(page); ++ } + } + return 1; + } +diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c +index fd26d0433509..e739825be8b3 100644 +--- a/mm/process_vm_access.c ++++ b/mm/process_vm_access.c +@@ -298,7 +298,7 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, + goto free_proc_pages; + } + +- mm = mm_access(task, PTRACE_MODE_ATTACH); ++ mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); + if (!mm || IS_ERR(mm)) { + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; + /* +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c +index 9ec416552cc5..8d69df16f6a8 100644 +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -336,7 +336,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id) + + static void ip6mr_free_table(struct mr6_table *mrt) + { +- del_timer(&mrt->ipmr_expire_timer); ++ del_timer_sync(&mrt->ipmr_expire_timer); + mroute_clean_tables(mrt, true); + kfree(mrt); + } +diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter +index 6129020c41a9..81228a443122 100755 +--- a/scripts/bloat-o-meter ++++ b/scripts/bloat-o-meter +@@ -55,8 +55,8 @@ for name in common: + delta.sort() + delta.reverse() + +-print "add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \ +- (add, remove, grow, shrink, up, -down, up-down) +-print "%-40s %7s %7s %+7s" % ("function", "old", "new", "delta") ++print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \ ++ (add, remove, grow, shrink, up, -down, up-down)) ++print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")) + for d, n in delta: +- if d: print "%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d) ++ if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)) +diff --git a/security/commoncap.c b/security/commoncap.c +index c9219a66b7c6..4fd7bf2b19e1 100644 +--- a/security/commoncap.c ++++ b/security/commoncap.c +@@ -142,12 +142,17 @@ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode) + { + int ret = 0; + const struct cred *cred, *child_cred; ++ const kernel_cap_t *caller_caps; + + rcu_read_lock(); + cred = current_cred(); + child_cred = __task_cred(child); ++ if (mode & PTRACE_MODE_FSCREDS) ++ caller_caps = &cred->cap_effective; ++ else ++ caller_caps = &cred->cap_permitted; + if (cred->user_ns == child_cred->user_ns && +- cap_issubset(child_cred->cap_permitted, cred->cap_permitted)) ++ cap_issubset(child_cred->cap_permitted, *caller_caps)) + goto out; + if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE)) + goto out; +diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c +index 67c91d226552..ee0522a8f730 100644 +--- a/sound/core/seq/seq_ports.c ++++ b/sound/core/seq/seq_ports.c +@@ -540,19 +540,22 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client, + bool is_src, bool ack) + { + struct snd_seq_port_subs_info *grp; ++ struct list_head *list; ++ bool empty; + + grp = is_src ? &port->c_src : &port->c_dest; ++ list = is_src ? &subs->src_list : &subs->dest_list; + down_write(&grp->list_mutex); + write_lock_irq(&grp->list_lock); +- if (is_src) +- list_del(&subs->src_list); +- else +- list_del(&subs->dest_list); ++ empty = list_empty(list); ++ if (!empty) ++ list_del_init(list); + grp->exclusive = 0; + write_unlock_irq(&grp->list_lock); + up_write(&grp->list_mutex); + +- unsubscribe_port(client, port, grp, &subs->info, ack); ++ if (!empty) ++ unsubscribe_port(client, port, grp, &subs->info, ack); + } + + /* connect two ports */ +diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c +index 82b0606dcb8a..c3efcf2f816b 100644 +--- a/tools/lib/traceevent/event-parse.c ++++ b/tools/lib/traceevent/event-parse.c +@@ -4190,13 +4190,12 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event + sizeof(long) != 8) { + char *p; + +- ls = 2; + /* make %l into %ll */ +- p = strchr(format, 'l'); +- if (p) ++ if (ls == 1 && (p = strchr(format, 'l'))) + memmove(p+1, p, strlen(p)+1); + else if (strcmp(format, "%p") == 0) + strcpy(format, "0x%llx"); ++ ls = 2; + } + switch (ls) { + case -2: diff --git a/patch/kernel/marvell-default/patch-3.10.98-99.patch b/patch/kernel/marvell-default/patch-3.10.98-99.patch new file mode 100644 index 000000000..e405e5ac5 --- /dev/null +++ b/patch/kernel/marvell-default/patch-3.10.98-99.patch @@ -0,0 +1,2573 @@ +diff --git a/Makefile b/Makefile +index dadd1edc6f84..f1e6491fd7d8 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 98 ++SUBLEVEL = 99 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c +index a8d02223da44..14558a9fa3b3 100644 +--- a/arch/arc/kernel/unwind.c ++++ b/arch/arc/kernel/unwind.c +@@ -984,42 +984,13 @@ int arc_unwind(struct unwind_frame_info *frame) + (const u8 *)(fde + + 1) + + *fde, ptrType); +- if (pc >= endLoc) ++ if (pc >= endLoc) { + fde = NULL; +- } else +- fde = NULL; +- } +- if (fde == NULL) { +- for (fde = table->address, tableSize = table->size; +- cie = NULL, tableSize > sizeof(*fde) +- && tableSize - sizeof(*fde) >= *fde; +- tableSize -= sizeof(*fde) + *fde, +- fde += 1 + *fde / sizeof(*fde)) { +- cie = cie_for_fde(fde, table); +- if (cie == &bad_cie) { + cie = NULL; +- break; + } +- if (cie == NULL +- || cie == ¬_fde +- || (ptrType = fde_pointer_type(cie)) < 0) +- continue; +- ptr = (const u8 *)(fde + 2); +- startLoc = read_pointer(&ptr, +- (const u8 *)(fde + 1) + +- *fde, ptrType); +- if (!startLoc) +- continue; +- if (!(ptrType & DW_EH_PE_indirect)) +- ptrType &= +- DW_EH_PE_FORM | DW_EH_PE_signed; +- endLoc = +- startLoc + read_pointer(&ptr, +- (const u8 *)(fde + +- 1) + +- *fde, ptrType); +- if (pc >= startLoc && pc < endLoc) +- break; ++ } else { ++ fde = NULL; ++ cie = NULL; + } + } + } +diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S +index 920b63210806..34c35f0e3290 100644 +--- a/arch/mips/kvm/kvm_locore.S ++++ b/arch/mips/kvm/kvm_locore.S +@@ -156,9 +156,11 @@ FEXPORT(__kvm_mips_vcpu_run) + + FEXPORT(__kvm_mips_load_asid) + /* Set the ASID for the Guest Kernel */ +- sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ +- /* addresses shift to 0x80000000 */ +- bltz t0, 1f /* If kernel */ ++ PTR_L t0, VCPU_COP0(k1) ++ LONG_L t0, COP0_STATUS(t0) ++ andi t0, KSU_USER | ST0_ERL | ST0_EXL ++ xori t0, KSU_USER ++ bnez t0, 1f /* If kernel */ + addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ + addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ + 1: +@@ -442,9 +444,11 @@ __kvm_mips_return_to_guest: + mtc0 t0, CP0_EPC + + /* Set the ASID for the Guest Kernel */ +- sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ +- /* addresses shift to 0x80000000 */ +- bltz t0, 1f /* If kernel */ ++ PTR_L t0, VCPU_COP0(k1) ++ LONG_L t0, COP0_STATUS(t0) ++ andi t0, KSU_USER | ST0_ERL | ST0_EXL ++ xori t0, KSU_USER ++ bnez t0, 1f /* If kernel */ + addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ + addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ + 1: +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c +index 843ec38fec7b..8aa5f30d8579 100644 +--- a/arch/mips/kvm/kvm_mips.c ++++ b/arch/mips/kvm/kvm_mips.c +@@ -308,7 +308,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) + + if (!gebase) { + err = -ENOMEM; +- goto out_free_cpu; ++ goto out_uninit_cpu; + } + kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n", + ALIGN(size, PAGE_SIZE), gebase); +@@ -368,6 +368,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) + out_free_gebase: + kfree(gebase); + ++out_uninit_cpu: ++ kvm_vcpu_uninit(vcpu); ++ + out_free_cpu: + kfree(vcpu); + +diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c +index c76f297b7149..33085819cd89 100644 +--- a/arch/mips/kvm/kvm_mips_emul.c ++++ b/arch/mips/kvm/kvm_mips_emul.c +@@ -935,7 +935,7 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, + + base = (inst >> 21) & 0x1f; + op_inst = (inst >> 16) & 0x1f; +- offset = inst & 0xffff; ++ offset = (int16_t)inst; + cache = (inst >> 16) & 0x3; + op = (inst >> 18) & 0x7; + +diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c +index 4d1ee88864e8..18c8b819b0aa 100644 +--- a/arch/s390/mm/extable.c ++++ b/arch/s390/mm/extable.c +@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start, + int i; + + /* Normalize entries to being relative to the start of the section */ +- for (p = start, i = 0; p < finish; p++, i += 8) ++ for (p = start, i = 0; p < finish; p++, i += 8) { + p->insn += i; ++ p->fixup += i + 4; ++ } + sort(start, finish - start, sizeof(*start), cmp_ex, NULL); + /* Denormalize all entries */ +- for (p = start, i = 0; p < finish; p++, i += 8) ++ for (p = start, i = 0; p < finish; p++, i += 8) { + p->insn -= i; ++ p->fixup -= i + 4; ++ } + } + + #ifdef CONFIG_MODULES +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c +index be8db9bb7878..666510b39870 100644 +--- a/arch/sparc/kernel/sys_sparc_64.c ++++ b/arch/sparc/kernel/sys_sparc_64.c +@@ -416,7 +416,7 @@ out: + + SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) + { +- int ret; ++ long ret; + + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) +diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c +index 337518c5042a..b412c62486f0 100644 +--- a/arch/um/os-Linux/start_up.c ++++ b/arch/um/os-Linux/start_up.c +@@ -95,6 +95,8 @@ static int start_ptraced_child(void) + { + int pid, n, status; + ++ fflush(stdout); ++ + pid = fork(); + if (pid == 0) + ptrace_child(); +diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c +index 6033be9ff81a..3c8bffdc71c8 100644 +--- a/arch/x86/platform/efi/efi.c ++++ b/arch/x86/platform/efi/efi.c +@@ -250,12 +250,19 @@ static efi_status_t __init phys_efi_set_virtual_address_map( + efi_memory_desc_t *virtual_map) + { + efi_status_t status; ++ unsigned long flags; + + efi_call_phys_prelog(); ++ ++ /* Disable interrupts around EFI calls: */ ++ local_irq_save(flags); + status = efi_call_phys4(efi_phys.set_virtual_address_map, + memory_map_size, descriptor_size, + descriptor_version, virtual_map); ++ local_irq_restore(flags); ++ + efi_call_phys_epilog(); ++ + return status; + } + +diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c +index 40e446941dd7..bebbee05e331 100644 +--- a/arch/x86/platform/efi/efi_32.c ++++ b/arch/x86/platform/efi/efi_32.c +@@ -33,19 +33,16 @@ + + /* + * To make EFI call EFI runtime service in physical addressing mode we need +- * prelog/epilog before/after the invocation to disable interrupt, to +- * claim EFI runtime service handler exclusively and to duplicate a memory in +- * low memory space say 0 - 3G. ++ * prolog/epilog before/after the invocation to claim the EFI runtime service ++ * handler exclusively and to duplicate a memory mapping in low memory space, ++ * say 0 - 3G. + */ + +-static unsigned long efi_rt_eflags; + + void efi_call_phys_prelog(void) + { + struct desc_ptr gdt_descr; + +- local_irq_save(efi_rt_eflags); +- + load_cr3(initial_page_table); + __flush_tlb_all(); + +@@ -64,6 +61,4 @@ void efi_call_phys_epilog(void) + + load_cr3(swapper_pg_dir); + __flush_tlb_all(); +- +- local_irq_restore(efi_rt_eflags); + } +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c +index 39a0e7f1f0a3..2f6c1a9734c8 100644 +--- a/arch/x86/platform/efi/efi_64.c ++++ b/arch/x86/platform/efi/efi_64.c +@@ -40,7 +40,6 @@ + #include + + static pgd_t *save_pgd __initdata; +-static unsigned long efi_flags __initdata; + + static void __init early_code_mapping_set_exec(int executable) + { +@@ -66,7 +65,6 @@ void __init efi_call_phys_prelog(void) + int n_pgds; + + early_code_mapping_set_exec(1); +- local_irq_save(efi_flags); + + n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); + save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); +@@ -90,7 +88,6 @@ void __init efi_call_phys_epilog(void) + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); + kfree(save_pgd); + __flush_tlb_all(); +- local_irq_restore(efi_flags); + early_code_mapping_set_exec(0); + } + +diff --git a/block/partitions/mac.c b/block/partitions/mac.c +index 76d8ba6379a9..bd5b91465230 100644 +--- a/block/partitions/mac.c ++++ b/block/partitions/mac.c +@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state) + Sector sect; + unsigned char *data; + int slot, blocks_in_map; +- unsigned secsize; ++ unsigned secsize, datasize, partoffset; + #ifdef CONFIG_PPC_PMAC + int found_root = 0; + int found_root_goodness = 0; +@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state) + } + secsize = be16_to_cpu(md->block_size); + put_dev_sector(sect); +- data = read_part_sector(state, secsize/512, §); ++ datasize = round_down(secsize, 512); ++ data = read_part_sector(state, datasize / 512, §); + if (!data) + return -1; +- part = (struct mac_partition *) (data + secsize%512); ++ partoffset = secsize % 512; ++ if (partoffset + sizeof(*part) > datasize) ++ return -1; ++ part = (struct mac_partition *) (data + partoffset); + if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { + put_dev_sector(sect); + return 0; /* not a MacOS disk */ +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c +index 136803c47cdb..96e5ed188636 100644 +--- a/drivers/ata/libata-sff.c ++++ b/drivers/ata/libata-sff.c +@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, + static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) + { + struct ata_port *ap = qc->ap; +- unsigned long flags; + + if (ap->ops->error_handler) { + if (in_wq) { +- spin_lock_irqsave(ap->lock, flags); +- + /* EH might have kicked in while host lock is + * released. + */ +@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) + } else + ata_port_freeze(ap); + } +- +- spin_unlock_irqrestore(ap->lock, flags); + } else { + if (likely(!(qc->err_mask & AC_ERR_HSM))) + ata_qc_complete(qc); +@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) + } + } else { + if (in_wq) { +- spin_lock_irqsave(ap->lock, flags); + ata_sff_irq_on(ap); + ata_qc_complete(qc); +- spin_unlock_irqrestore(ap->lock, flags); + } else + ata_qc_complete(qc); + } +@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, + { + struct ata_link *link = qc->dev->link; + struct ata_eh_info *ehi = &link->eh_info; +- unsigned long flags = 0; + int poll_next; + ++ lockdep_assert_held(ap->lock); ++ + WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); + + /* Make sure ata_sff_qc_issue() does not throw things +@@ -1112,14 +1106,6 @@ fsm_start: + } + } + +- /* Send the CDB (atapi) or the first data block (ata pio out). +- * During the state transition, interrupt handler shouldn't +- * be invoked before the data transfer is complete and +- * hsm_task_state is changed. Hence, the following locking. +- */ +- if (in_wq) +- spin_lock_irqsave(ap->lock, flags); +- + if (qc->tf.protocol == ATA_PROT_PIO) { + /* PIO data out protocol. + * send first data block. +@@ -1135,9 +1121,6 @@ fsm_start: + /* send CDB */ + atapi_send_cdb(ap, qc); + +- if (in_wq) +- spin_unlock_irqrestore(ap->lock, flags); +- + /* if polling, ata_sff_pio_task() handles the rest. + * otherwise, interrupt handler takes over from here. + */ +@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work) + u8 status; + int poll_next; + ++ spin_lock_irq(ap->lock); ++ + BUG_ON(ap->sff_pio_task_link == NULL); + /* qc can be NULL if timeout occurred */ + qc = ata_qc_from_tag(ap, link->active_tag); + if (!qc) { + ap->sff_pio_task_link = NULL; +- return; ++ goto out_unlock; + } + + fsm_start: +@@ -1381,11 +1366,14 @@ fsm_start: + */ + status = ata_sff_busy_wait(ap, ATA_BUSY, 5); + if (status & ATA_BUSY) { ++ spin_unlock_irq(ap->lock); + ata_msleep(ap, 2); ++ spin_lock_irq(ap->lock); ++ + status = ata_sff_busy_wait(ap, ATA_BUSY, 10); + if (status & ATA_BUSY) { + ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); +- return; ++ goto out_unlock; + } + } + +@@ -1402,6 +1390,8 @@ fsm_start: + */ + if (poll_next) + goto fsm_start; ++out_unlock: ++ spin_unlock_irq(ap->lock); + } + + /** +diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c +index dd1faa564eb2..cdfb98e70cfd 100644 +--- a/drivers/ata/sata_sil.c ++++ b/drivers/ata/sata_sil.c +@@ -631,6 +631,9 @@ static void sil_dev_config(struct ata_device *dev) + unsigned int n, quirks = 0; + unsigned char model_num[ATA_ID_PROD_LEN + 1]; + ++ /* This controller doesn't support trim */ ++ dev->horkage |= ATA_HORKAGE_NOTRIM; ++ + ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); + + for (n = 0; sil_blacklist[n].product; n++) +diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c +index 64f553f04fa4..5874ebf9dced 100644 +--- a/drivers/clocksource/vt8500_timer.c ++++ b/drivers/clocksource/vt8500_timer.c +@@ -50,6 +50,8 @@ + + #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) + ++#define MIN_OSCR_DELTA 16 ++ + static void __iomem *regbase; + + static cycle_t vt8500_timer_read(struct clocksource *cs) +@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles, + cpu_relax(); + writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL); + +- if ((signed)(alarm - clocksource.read(&clocksource)) <= 16) ++ if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA) + return -ETIME; + + writel(1, regbase + TIMER_IER_VAL); +@@ -162,7 +164,7 @@ static void __init vt8500_timer_init(struct device_node *np) + pr_err("%s: setup_irq failed for %s\n", __func__, + clockevent.name); + clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, +- 4, 0xf0000000); ++ MIN_OSCR_DELTA * 2, 0xf0000000); + } + + CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init); +diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h +index b6b7d70f2832..5cfc1765af74 100644 +--- a/drivers/gpu/drm/ast/ast_drv.h ++++ b/drivers/gpu/drm/ast/ast_drv.h +@@ -296,6 +296,7 @@ int ast_framebuffer_init(struct drm_device *dev, + int ast_fbdev_init(struct drm_device *dev); + void ast_fbdev_fini(struct drm_device *dev); + void ast_fbdev_set_suspend(struct drm_device *dev, int state); ++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr); + + struct ast_bo { + struct ttm_buffer_object bo; +diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c +index fbc0823cfa18..a298d8f72225 100644 +--- a/drivers/gpu/drm/ast/ast_fb.c ++++ b/drivers/gpu/drm/ast/ast_fb.c +@@ -366,3 +366,10 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state) + + fb_set_suspend(ast->fbdev->helper.fbdev, state); + } ++ ++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr) ++{ ++ ast->fbdev->helper.fbdev->fix.smem_start = ++ ast->fbdev->helper.fbdev->apertures->ranges[0].base + gpu_addr; ++ ast->fbdev->helper.fbdev->fix.smem_len = ast->vram_size - gpu_addr; ++} +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index 96f874a508e2..313ccaf25f49 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -359,6 +359,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) + dev->mode_config.min_height = 0; + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; ++ dev->mode_config.fb_base = pci_resource_start(ast->dev->pdev, 0); + + if (ast->chip == AST2100 || + ast->chip == AST2200 || +diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c +index e8f6418b6dec..f3a54ad77e3f 100644 +--- a/drivers/gpu/drm/ast/ast_mode.c ++++ b/drivers/gpu/drm/ast/ast_mode.c +@@ -509,6 +509,8 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc, + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); + if (ret) + DRM_ERROR("failed to kmap fbcon\n"); ++ else ++ ast_fbdev_set_base(ast, gpu_addr); + } + ast_bo_unreserve(bo); + +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +index ba2ab9a9b988..f3cce23f4a62 100644 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -452,7 +452,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, + } + + /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ +- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) && ++ if (((dev->pdev->device == 0x9802) || ++ (dev->pdev->device == 0x9805) || ++ (dev->pdev->device == 0x9806)) && + (dev->pdev->subsystem_vendor == 0x1734) && + (dev->pdev->subsystem_device == 0x11bd)) { + if (*connector_type == DRM_MODE_CONNECTOR_VGA) { +@@ -463,14 +465,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, + } + } + +- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */ +- if ((dev->pdev->device == 0x9805) && +- (dev->pdev->subsystem_vendor == 0x1734) && +- (dev->pdev->subsystem_device == 0x11bd)) { +- if (*connector_type == DRM_MODE_CONNECTOR_VGA) +- return false; +- } +- + return true; + } + +diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c +index db83d075606e..6acd3646ac08 100644 +--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c ++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c +@@ -73,6 +73,11 @@ static void radeon_hotplug_work_func(struct work_struct *work) + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + ++ /* we can race here at startup, some boards seem to trigger ++ * hotplug irqs when they shouldn't. */ ++ if (!rdev->mode_info.mode_config_initialized) ++ return; ++ + mutex_lock(&mode_config->mutex); + if (mode_config->num_connector) { + list_for_each_entry(connector, &mode_config->connector_list, head) +diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c +index f0bac68254b7..bb166849aa6e 100644 +--- a/drivers/gpu/drm/radeon/radeon_sa.c ++++ b/drivers/gpu/drm/radeon/radeon_sa.c +@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev, + /* see if we can skip over some allocations */ + } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); + ++ for (i = 0; i < RADEON_NUM_RINGS; ++i) ++ radeon_fence_ref(fences[i]); ++ + spin_unlock(&sa_manager->wq.lock); + r = radeon_fence_wait_any(rdev, fences, false); ++ for (i = 0; i < RADEON_NUM_RINGS; ++i) ++ radeon_fence_unref(&fences[i]); + spin_lock(&sa_manager->wq.lock); + /* if we have nothing to wait for block */ + if (r == -ENOENT && block) { +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c +index 4a14e113369d..f7015592544f 100644 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c +@@ -619,7 +619,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) + 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { +- while (--i) { ++ while (i--) { + pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + gtt->ttm.dma_address[i] = 0; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +index 6c44c69a5ba4..94a0baac93dd 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +@@ -25,6 +25,7 @@ + * + **************************************************************************/ + #include ++#include + + #include + #include "vmwgfx_drv.h" +@@ -1192,6 +1193,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + static int __init vmwgfx_init(void) + { + int ret; ++ ++#ifdef CONFIG_VGA_CONSOLE ++ if (vgacon_text_force()) ++ return -EINVAL; ++#endif ++ + ret = drm_pci_init(&driver, &vmw_pci_driver); + if (ret) + DRM_ERROR("Failed initializing DRM.\n"); +diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c +index e893f6e1937d..3c84e96a485a 100644 +--- a/drivers/gpu/vga/vgaarb.c ++++ b/drivers/gpu/vga/vgaarb.c +@@ -392,8 +392,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) + set_current_state(interruptible ? + TASK_INTERRUPTIBLE : + TASK_UNINTERRUPTIBLE); +- if (signal_pending(current)) { +- rc = -EINTR; ++ if (interruptible && signal_pending(current)) { ++ __set_current_state(TASK_RUNNING); ++ remove_wait_queue(&vga_wait_queue, &wait); ++ rc = -ERESTARTSYS; + break; + } + schedule(); +diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c +index 3e094cd6a0e3..a9194ef626cd 100644 +--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c ++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c +@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en + error = l2t_send(tdev, skb, l2e); + if (error < 0) + kfree_skb(skb); +- return error; ++ return error < 0 ? error : 0; + } + + int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb) +@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb) + error = cxgb3_ofld_send(tdev, skb); + if (error < 0) + kfree_skb(skb); +- return error; ++ return error < 0 ? error : 0; + } + + static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) +diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c +index dabb697b1c2a..48ba1c3e945a 100644 +--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c ++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c +@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + struct qib_ibdev *dev = to_idev(ibqp->device); + struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); + struct qib_mcast *mcast = NULL; +- struct qib_mcast_qp *p, *tmp; ++ struct qib_mcast_qp *p, *tmp, *delp = NULL; + struct rb_node *n; + int last = 0; + int ret; + +- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { +- ret = -EINVAL; +- goto bail; +- } ++ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) ++ return -EINVAL; + + spin_lock_irq(&ibp->lock); + +@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + while (1) { + if (n == NULL) { + spin_unlock_irq(&ibp->lock); +- ret = -EINVAL; +- goto bail; ++ return -EINVAL; + } + + mcast = rb_entry(n, struct qib_mcast, rb_node); +@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + */ + list_del_rcu(&p->list); + mcast->n_attached--; ++ delp = p; + + /* If this was the last attached QP, remove the GID too. */ + if (list_empty(&mcast->qp_list)) { +@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + } + + spin_unlock_irq(&ibp->lock); ++ /* QP not attached */ ++ if (!delp) ++ return -EINVAL; ++ /* ++ * Wait for any list walkers to finish before freeing the ++ * list element. ++ */ ++ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); ++ qib_mcast_qp_free(delp); + +- if (p) { +- /* +- * Wait for any list walkers to finish before freeing the +- * list element. +- */ +- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); +- qib_mcast_qp_free(p); +- } + if (last) { + atomic_dec(&mcast->refcount); + wait_event(mcast->wait, !atomic_read(&mcast->refcount)); +@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + dev->n_mcast_grps_allocated--; + spin_unlock_irq(&dev->n_mcast_grps_lock); + } +- +- ret = 0; +- +-bail: +- return ret; ++ return 0; + } + + int qib_mcast_tree_empty(struct qib_ibport *ibp) +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c +index b4713cea1913..2d2915fdbf02 100644 +--- a/drivers/md/bcache/super.c ++++ b/drivers/md/bcache/super.c +@@ -1959,8 +1959,10 @@ static int __init bcache_init(void) + closure_debug_init(); + + bcache_major = register_blkdev(0, "bcache"); +- if (bcache_major < 0) ++ if (bcache_major < 0) { ++ unregister_reboot_notifier(&reboot); + return bcache_major; ++ } + + if (!(bcache_wq = create_workqueue("bcache")) || + !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || +diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h +index 0b2536247cf5..84e27708ad97 100644 +--- a/drivers/md/dm-exception-store.h ++++ b/drivers/md/dm-exception-store.h +@@ -70,7 +70,7 @@ struct dm_exception_store_type { + * Update the metadata with this exception. + */ + void (*commit_exception) (struct dm_exception_store *store, +- struct dm_exception *e, ++ struct dm_exception *e, int valid, + void (*callback) (void *, int success), + void *callback_context); + +diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c +index 2d2b1b7588d7..8f6d3ea55401 100644 +--- a/drivers/md/dm-snap-persistent.c ++++ b/drivers/md/dm-snap-persistent.c +@@ -646,7 +646,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store, + } + + static void persistent_commit_exception(struct dm_exception_store *store, +- struct dm_exception *e, ++ struct dm_exception *e, int valid, + void (*callback) (void *, int success), + void *callback_context) + { +@@ -655,6 +655,9 @@ static void persistent_commit_exception(struct dm_exception_store *store, + struct core_exception ce; + struct commit_callback *cb; + ++ if (!valid) ++ ps->valid = 0; ++ + ce.old_chunk = e->old_chunk; + ce.new_chunk = e->new_chunk; + write_exception(ps, ps->current_committed++, &ce); +diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c +index 1ce9a2586e41..31439d53cf7e 100644 +--- a/drivers/md/dm-snap-transient.c ++++ b/drivers/md/dm-snap-transient.c +@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store, + } + + static void transient_commit_exception(struct dm_exception_store *store, +- struct dm_exception *e, ++ struct dm_exception *e, int valid, + void (*callback) (void *, int success), + void *callback_context) + { + /* Just succeed */ +- callback(callback_context, 1); ++ callback(callback_context, valid); + } + + static void transient_usage(struct dm_exception_store *store, +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c +index d892a05c84f4..dbd0f00f7395 100644 +--- a/drivers/md/dm-snap.c ++++ b/drivers/md/dm-snap.c +@@ -1388,8 +1388,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err) + dm_table_event(s->ti->table); + } + +-static void pending_complete(struct dm_snap_pending_exception *pe, int success) ++static void pending_complete(void *context, int success) + { ++ struct dm_snap_pending_exception *pe = context; + struct dm_exception *e; + struct dm_snapshot *s = pe->snap; + struct bio *origin_bios = NULL; +@@ -1459,24 +1460,13 @@ out: + free_pending_exception(pe); + } + +-static void commit_callback(void *context, int success) +-{ +- struct dm_snap_pending_exception *pe = context; +- +- pending_complete(pe, success); +-} +- + static void complete_exception(struct dm_snap_pending_exception *pe) + { + struct dm_snapshot *s = pe->snap; + +- if (unlikely(pe->copy_error)) +- pending_complete(pe, 0); +- +- else +- /* Update the metadata if we are persistent */ +- s->store->type->commit_exception(s->store, &pe->e, +- commit_callback, pe); ++ /* Update the metadata if we are persistent */ ++ s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error, ++ pending_complete, pe); + } + + /* +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c +index 43f6250baadd..4bf9211b2740 100644 +--- a/drivers/md/dm-thin-metadata.c ++++ b/drivers/md/dm-thin-metadata.c +@@ -1191,6 +1191,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd) + dm_block_t held_root; + + /* ++ * We commit to ensure the btree roots which we increment in a ++ * moment are up to date. ++ */ ++ __commit_transaction(pmd); ++ ++ /* + * Copy the superblock. + */ + dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION); +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c +index ec56072c6326..295f74d4f0ab 100644 +--- a/drivers/md/dm-thin.c ++++ b/drivers/md/dm-thin.c +@@ -2281,7 +2281,7 @@ static void pool_postsuspend(struct dm_target *ti) + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + +- cancel_delayed_work(&pool->waker); ++ cancel_delayed_work_sync(&pool->waker); + flush_workqueue(pool->wq); + (void) commit_or_fallback(pool); + } +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c +index 6d7f4d950b8f..b07fcda9ca71 100644 +--- a/drivers/md/persistent-data/dm-btree.c ++++ b/drivers/md/persistent-data/dm-btree.c +@@ -235,6 +235,16 @@ static bool is_internal_level(struct dm_btree_info *info, struct frame *f) + return f->level < (info->levels - 1); + } + ++static void unlock_all_frames(struct del_stack *s) ++{ ++ struct frame *f; ++ ++ while (unprocessed_frames(s)) { ++ f = s->spine + s->top--; ++ dm_tm_unlock(s->tm, f->b); ++ } ++} ++ + int dm_btree_del(struct dm_btree_info *info, dm_block_t root) + { + int r; +@@ -290,9 +300,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root) + f->current_child = f->nr_children; + } + } +- + out: ++ if (r) { ++ /* cleanup all frames of del_stack */ ++ unlock_all_frames(s); ++ } + kfree(s); ++ + return r; + } + EXPORT_SYMBOL_GPL(dm_btree_del); +diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c +index 1f925e856974..46a984291b7d 100644 +--- a/drivers/media/dvb-core/dvb_frontend.c ++++ b/drivers/media/dvb-core/dvb_frontend.c +@@ -2195,9 +2195,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file, + dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n", + __func__, c->delivery_system, fe->ops.info.type); + +- /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't +- * do it, it is done for it. */ +- info->caps |= FE_CAN_INVERSION_AUTO; ++ /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */ ++ if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT)) ++ info->caps |= FE_CAN_INVERSION_AUTO; + err = 0; + break; + } +diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c +index a2631be7ffac..08e0f0dd8728 100644 +--- a/drivers/media/dvb-frontends/tda1004x.c ++++ b/drivers/media/dvb-frontends/tda1004x.c +@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe) + { + struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache; + struct tda1004x_state* state = fe->demodulator_priv; ++ int status; + + dprintk("%s\n", __func__); + ++ status = tda1004x_read_byte(state, TDA1004X_STATUS_CD); ++ if (status == -1) ++ return -EIO; ++ ++ /* Only update the properties cache if device is locked */ ++ if (!(status & 8)) ++ return 0; ++ + // inversion status + fe_params->inversion = INVERSION_OFF; + if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20) +diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c +index 2e28c81a03ab..a5bee0d0d686 100644 +--- a/drivers/media/usb/gspca/ov534.c ++++ b/drivers/media/usb/gspca/ov534.c +@@ -1490,8 +1490,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev, + struct v4l2_fract *tpf = &cp->timeperframe; + struct sd *sd = (struct sd *) gspca_dev; + +- /* Set requested framerate */ +- sd->frame_rate = tpf->denominator / tpf->numerator; ++ if (tpf->numerator == 0 || tpf->denominator == 0) ++ /* Set default framerate */ ++ sd->frame_rate = 30; ++ else ++ /* Set requested framerate */ ++ sd->frame_rate = tpf->denominator / tpf->numerator; ++ + if (gspca_dev->streaming) + set_frame_rate(gspca_dev); + +diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c +index 4cb511ccc5f6..22ea6aefd22f 100644 +--- a/drivers/media/usb/gspca/topro.c ++++ b/drivers/media/usb/gspca/topro.c +@@ -4791,7 +4791,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev, + struct v4l2_fract *tpf = &cp->timeperframe; + int fr, i; + +- sd->framerate = tpf->denominator / tpf->numerator; ++ if (tpf->numerator == 0 || tpf->denominator == 0) ++ sd->framerate = 30; ++ else ++ sd->framerate = tpf->denominator / tpf->numerator; ++ + if (gspca_dev->streaming) + setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure)); + +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c +index 885ba4a19a6c..ebb40a292d67 100644 +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -59,8 +59,7 @@ MODULE_ALIAS("mmc:block"); + #define INAND_CMD38_ARG_SECTRIM2 0x88 + #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ + +-#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ +- (req->cmd_flags & REQ_META)) && \ ++#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ + (rq_data_dir(req) == WRITE)) + #define PACKED_CMD_VER 0x01 + #define PACKED_CMD_WR 0x02 +@@ -1300,13 +1299,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, + + /* + * Reliable writes are used to implement Forced Unit Access and +- * REQ_META accesses, and are supported only on MMCs. +- * +- * XXX: this really needs a good explanation of why REQ_META +- * is treated special. ++ * are supported only on MMCs. + */ +- bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || +- (req->cmd_flags & REQ_META)) && ++ bool do_rel_wr = (req->cmd_flags & REQ_FUA) && + (rq_data_dir(req) == WRITE) && + (md->flags & MMC_BLK_REL_WR); + +diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c +index f4f3038c1df0..faeda85e78fa 100644 +--- a/drivers/mmc/host/mmci.c ++++ b/drivers/mmc/host/mmci.c +@@ -1740,7 +1740,7 @@ static struct amba_id mmci_ids[] = { + { + .id = 0x00280180, + .mask = 0x00ffffff, +- .data = &variant_u300, ++ .data = &variant_nomadik, + }, + { + .id = 0x00480180, +diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c +index c2d0559115d3..732a8ed571c2 100644 +--- a/drivers/net/can/sja1000/sja1000.c ++++ b/drivers/net/can/sja1000/sja1000.c +@@ -187,6 +187,9 @@ static void sja1000_start(struct net_device *dev) + /* clear interrupt flags */ + priv->read_reg(priv, SJA1000_IR); + ++ /* clear interrupt flags */ ++ priv->read_reg(priv, SJA1000_IR); ++ + /* leave reset mode */ + set_normal_mode(dev); + } +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c +index 5f9a7ad9b964..d921416295ce 100644 +--- a/drivers/net/can/usb/ems_usb.c ++++ b/drivers/net/can/usb/ems_usb.c +@@ -118,6 +118,9 @@ MODULE_LICENSE("GPL v2"); + */ + #define EMS_USB_ARM7_CLOCK 8000000 + ++#define CPC_TX_QUEUE_TRIGGER_LOW 25 ++#define CPC_TX_QUEUE_TRIGGER_HIGH 35 ++ + /* + * CAN-Message representation in a CPC_MSG. Message object type is + * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or +@@ -279,6 +282,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb) + switch (urb->status) { + case 0: + dev->free_slots = dev->intr_in_buffer[1]; ++ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){ ++ if (netif_queue_stopped(netdev)){ ++ netif_wake_queue(netdev); ++ } ++ } + break; + + case -ECONNRESET: /* unlink */ +@@ -530,8 +538,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb) + /* Release context */ + context->echo_index = MAX_TX_URBS; + +- if (netif_queue_stopped(netdev)) +- netif_wake_queue(netdev); + } + + /* +@@ -591,7 +597,7 @@ static int ems_usb_start(struct ems_usb *dev) + int err, i; + + dev->intr_in_buffer[0] = 0; +- dev->free_slots = 15; /* initial size */ ++ dev->free_slots = 50; /* initial size */ + + for (i = 0; i < MAX_RX_URBS; i++) { + struct urb *urb = NULL; +@@ -841,7 +847,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne + + /* Slow down tx path */ + if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || +- dev->free_slots < 5) { ++ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) { + netif_stop_queue(netdev); + } + } +diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c +index 76ef634caf6f..b84e713445d0 100644 +--- a/drivers/pci/pcie/aer/aerdrv.c ++++ b/drivers/pci/pcie/aer/aerdrv.c +@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) + rpc->rpd = dev; + INIT_WORK(&rpc->dpc_handler, aer_isr); + mutex_init(&rpc->rpc_mutex); +- init_waitqueue_head(&rpc->wait_release); + + /* Use PCIe bus function to store rpc into PCIe device */ + set_service_data(dev, rpc); +@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev) + if (rpc->isr) + free_irq(dev->irq, dev); + +- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); +- ++ flush_work(&rpc->dpc_handler); + aer_disable_rootport(rpc); + kfree(rpc); + set_service_data(dev, NULL); +diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h +index d12c77cd6991..3b8766e1e51b 100644 +--- a/drivers/pci/pcie/aer/aerdrv.h ++++ b/drivers/pci/pcie/aer/aerdrv.h +@@ -76,7 +76,6 @@ struct aer_rpc { + * recovery on the same + * root port hierarchy + */ +- wait_queue_head_t wait_release; + }; + + struct aer_broadcast_data { +diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c +index 0f4554e48cc5..a017aac0d7ed 100644 +--- a/drivers/pci/pcie/aer/aerdrv_core.c ++++ b/drivers/pci/pcie/aer/aerdrv_core.c +@@ -817,8 +817,6 @@ void aer_isr(struct work_struct *work) + while (get_e_source(rpc, &e_src)) + aer_isr_one_error(p_device, &e_src); + mutex_unlock(&rpc->rpc_mutex); +- +- wake_up(&rpc->wait_release); + } + + /** +diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c +index f7197a790341..eb402f4f0e2f 100644 +--- a/drivers/pci/xen-pcifront.c ++++ b/drivers/pci/xen-pcifront.c +@@ -51,7 +51,7 @@ struct pcifront_device { + }; + + struct pcifront_sd { +- int domain; ++ struct pci_sysdata sd; + struct pcifront_device *pdev; + }; + +@@ -65,7 +65,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd, + unsigned int domain, unsigned int bus, + struct pcifront_device *pdev) + { +- sd->domain = domain; ++ /* Because we do not expose that information via XenBus. */ ++ sd->sd.node = first_online_node; ++ sd->sd.domain = domain; + sd->pdev = pdev; + } + +@@ -463,8 +465,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev, + dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", + domain, bus); + +- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); +- sd = kmalloc(sizeof(*sd), GFP_KERNEL); ++ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL); ++ sd = kzalloc(sizeof(*sd), GFP_KERNEL); + if (!bus_entry || !sd) { + err = -ENOMEM; + goto err_out; +diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c +index 3bed2f55cf7d..3ccadf631d45 100644 +--- a/drivers/power/wm831x_power.c ++++ b/drivers/power/wm831x_power.c +@@ -567,7 +567,7 @@ static int wm831x_power_probe(struct platform_device *pdev) + + irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO")); + ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq, +- IRQF_TRIGGER_RISING, "System power low", ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "System power low", + power); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n", +@@ -577,7 +577,7 @@ static int wm831x_power_probe(struct platform_device *pdev) + + irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC")); + ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq, +- IRQF_TRIGGER_RISING, "Power source", ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "Power source", + power); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n", +@@ -590,7 +590,7 @@ static int wm831x_power_probe(struct platform_device *pdev) + platform_get_irq_byname(pdev, + wm831x_bat_irqs[i])); + ret = request_threaded_irq(irq, NULL, wm831x_bat_irq, +- IRQF_TRIGGER_RISING, ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, + wm831x_bat_irqs[i], + power); + if (ret != 0) { +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c +index a2597e683e79..6a64e86e8ccd 100644 +--- a/drivers/s390/block/dasd_alias.c ++++ b/drivers/s390/block/dasd_alias.c +@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) + spin_unlock_irqrestore(&lcu->lock, flags); + cancel_work_sync(&lcu->suc_data.worker); + spin_lock_irqsave(&lcu->lock, flags); +- if (device == lcu->suc_data.device) ++ if (device == lcu->suc_data.device) { ++ dasd_put_device(device); + lcu->suc_data.device = NULL; ++ } + } + was_pending = 0; + if (device == lcu->ruac_data.device) { +@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) + was_pending = 1; + cancel_delayed_work_sync(&lcu->ruac_data.dwork); + spin_lock_irqsave(&lcu->lock, flags); +- if (device == lcu->ruac_data.device) ++ if (device == lcu->ruac_data.device) { ++ dasd_put_device(device); + lcu->ruac_data.device = NULL; ++ } + } + private->lcu = NULL; + spin_unlock_irqrestore(&lcu->lock, flags); +@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work) + if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { + DBF_DEV_EVENT(DBF_WARNING, device, "could not update" + " alias data in lcu (rc = %d), retry later", rc); +- schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ)) ++ dasd_put_device(device); + } else { ++ dasd_put_device(device); + lcu->ruac_data.device = NULL; + lcu->flags &= ~UPDATE_PENDING; + } +@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu, + */ + if (!usedev) + return -EINVAL; ++ dasd_get_device(usedev); + lcu->ruac_data.device = usedev; +- schedule_delayed_work(&lcu->ruac_data.dwork, 0); ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0)) ++ dasd_put_device(usedev); + return 0; + } + +@@ -722,7 +730,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu, + ASCEBC((char *) &cqr->magic, 4); + ccw = cqr->cpaddr; + ccw->cmd_code = DASD_ECKD_CCW_RSCK; +- ccw->flags = 0 ; ++ ccw->flags = CCW_FLAG_SLI; + ccw->count = 16; + ccw->cda = (__u32)(addr_t) cqr->data; + ((char *)cqr->data)[0] = reason; +@@ -926,6 +934,7 @@ static void summary_unit_check_handling_work(struct work_struct *work) + /* 3. read new alias configuration */ + _schedule_lcu_update(lcu, device); + lcu->suc_data.device = NULL; ++ dasd_put_device(device); + spin_unlock_irqrestore(&lcu->lock, flags); + } + +@@ -985,6 +994,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device, + } + lcu->suc_data.reason = reason; + lcu->suc_data.device = device; ++ dasd_get_device(device); + spin_unlock(&lcu->lock); +- schedule_work(&lcu->suc_data.worker); ++ if (!schedule_work(&lcu->suc_data.worker)) ++ dasd_put_device(device); + }; +diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h +index b52121358385..280e769a1686 100644 +--- a/drivers/scsi/megaraid/megaraid_sas.h ++++ b/drivers/scsi/megaraid/megaraid_sas.h +@@ -300,6 +300,8 @@ enum MR_EVT_ARGS { + MR_EVT_ARGS_GENERIC, + }; + ++ ++#define SGE_BUFFER_SIZE 4096 + /* + * define constants for device list query options + */ +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c +index 78b4fe845245..e6dfa8108301 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c +@@ -3602,7 +3602,7 @@ static int megasas_init_fw(struct megasas_instance *instance) + } + + instance->max_sectors_per_req = instance->max_num_sge * +- PAGE_SIZE / 512; ++ SGE_BUFFER_SIZE / 512; + if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) + instance->max_sectors_per_req = tmp_sectors; + +@@ -5051,6 +5051,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) + int i; + int error = 0; + compat_uptr_t ptr; ++ unsigned long local_raw_ptr; ++ u32 local_sense_off; ++ u32 local_sense_len; + + if (clear_user(ioc, sizeof(*ioc))) + return -EFAULT; +@@ -5068,9 +5071,15 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) + * sense_len is not null, so prepare the 64bit value under + * the same condition. + */ +- if (ioc->sense_len) { ++ if (get_user(local_raw_ptr, ioc->frame.raw) || ++ get_user(local_sense_off, &ioc->sense_off) || ++ get_user(local_sense_len, &ioc->sense_len)) ++ return -EFAULT; ++ ++ ++ if (local_sense_len) { + void __user **sense_ioc_ptr = +- (void __user **)(ioc->frame.raw + ioc->sense_off); ++ (void __user **)((u8*)local_raw_ptr + local_sense_off); + compat_uptr_t *sense_cioc_ptr = + (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off); + if (get_user(ptr, sense_cioc_ptr) || +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c +index eba183c428cf..3643bbf5456d 100644 +--- a/drivers/scsi/ses.c ++++ b/drivers/scsi/ses.c +@@ -70,6 +70,7 @@ static int ses_probe(struct device *dev) + static int ses_recv_diag(struct scsi_device *sdev, int page_code, + void *buf, int bufflen) + { ++ int ret; + unsigned char cmd[] = { + RECEIVE_DIAGNOSTIC, + 1, /* Set PCV bit */ +@@ -78,9 +79,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code, + bufflen & 0xff, + 0 + }; ++ unsigned char recv_page_code; + +- return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, ++ ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, + NULL, SES_TIMEOUT, SES_RETRIES, NULL); ++ if (unlikely(!ret)) ++ return ret; ++ ++ recv_page_code = ((unsigned char *)buf)[0]; ++ ++ if (likely(recv_page_code == page_code)) ++ return ret; ++ ++ /* successful diagnostic but wrong page code. This happens to some ++ * USB devices, just print a message and pretend there was an error */ ++ ++ sdev_printk(KERN_ERR, sdev, ++ "Wrong diagnostic page; asked for %d got %u\n", ++ page_code, recv_page_code); ++ ++ return -EINVAL; + } + + static int ses_send_diag(struct scsi_device *sdev, int page_code, +@@ -436,7 +454,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, + if (desc_ptr) + desc_ptr += len; + +- if (addl_desc_ptr) ++ if (addl_desc_ptr && ++ /* only find additional descriptions for specific devices */ ++ (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || ++ type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE || ++ type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER || ++ /* these elements are optional */ ++ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT || ++ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT || ++ type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) + addl_desc_ptr += addl_desc_ptr[1] + 2; + + } +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index d2ea64de92df..d6dab8adf60e 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1726,6 +1726,11 @@ static const struct usb_device_id acm_ids[] = { + }, + #endif + ++ /*Samsung phone in firmware update mode */ ++ { USB_DEVICE(0x04e8, 0x685d), ++ .driver_info = IGNORE_DEVICE, ++ }, ++ + /* Exclude Infineon Flash Loader utility */ + { USB_DEVICE(0x058b, 0x0041), + .driver_info = IGNORE_DEVICE, +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 89ba7cfba5bc..303f3b3fb65f 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ ++ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ ++ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 81f6a572f016..9bab34cf01d4 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb); + #define TOSHIBA_PRODUCT_G450 0x0d45 + + #define ALINK_VENDOR_ID 0x1e0e ++#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */ + #define ALINK_PRODUCT_PH300 0x9100 + #define ALINK_PRODUCT_3GU 0x9200 + +@@ -615,6 +616,10 @@ static const struct option_blacklist_info zte_1255_blacklist = { + .reserved = BIT(3) | BIT(4), + }; + ++static const struct option_blacklist_info simcom_sim7100e_blacklist = { ++ .reserved = BIT(5) | BIT(6), ++}; ++ + static const struct option_blacklist_info telit_le910_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(1) | BIT(2), +@@ -1130,6 +1135,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ ++ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */ ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ +@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, + { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, + { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), ++ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), + .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist + }, +diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c +index ee59b74768d9..beaa7cc4e857 100644 +--- a/drivers/virtio/virtio.c ++++ b/drivers/virtio/virtio.c +@@ -238,6 +238,7 @@ static int virtio_init(void) + static void __exit virtio_exit(void) + { + bus_unregister(&virtio_bus); ++ ida_destroy(&virtio_index_ida); + } + core_initcall(virtio_init); + module_exit(virtio_exit); +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 7360f03ddbe1..9612a01198df 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2437,6 +2437,7 @@ int open_ctree(struct super_block *sb, + "unsupported option features (%Lx).\n", + (unsigned long long)features); + err = -EINVAL; ++ brelse(bh); + goto fail_alloc; + } + +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index ae29b403a7e2..b5d13c4eea00 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -7477,15 +7477,28 @@ int btrfs_readpage(struct file *file, struct page *page) + static int btrfs_writepage(struct page *page, struct writeback_control *wbc) + { + struct extent_io_tree *tree; +- ++ struct inode *inode = page->mapping->host; ++ int ret; + + if (current->flags & PF_MEMALLOC) { + redirty_page_for_writepage(wbc, page); + unlock_page(page); + return 0; + } ++ ++ /* ++ * If we are under memory pressure we will call this directly from the ++ * VM, we need to make sure we have the inode referenced for the ordered ++ * extent. If not just return like we didn't do anything. ++ */ ++ if (!igrab(inode)) { ++ redirty_page_for_writepage(wbc, page); ++ return AOP_WRITEPAGE_ACTIVATE; ++ } + tree = &BTRFS_I(page->mapping->host)->io_tree; +- return extent_write_full_page(tree, page, btrfs_get_extent, wbc); ++ ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc); ++ btrfs_add_delayed_iput(inode); ++ return ret; + } + + static int btrfs_writepages(struct address_space *mapping, +@@ -8474,9 +8487,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, + /* + * 2 items for inode item and ref + * 2 items for dir items ++ * 1 item for updating parent inode item ++ * 1 item for the inline extent item + * 1 item for xattr if selinux is on + */ +- trans = btrfs_start_transaction(root, 5); ++ trans = btrfs_start_transaction(root, 7); + if (IS_ERR(trans)) + return PTR_ERR(trans); + +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index 414c1b9eb896..3104e0eec816 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -1338,7 +1338,21 @@ static int read_symlink(struct send_ctx *sctx, + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; +- BUG_ON(ret); ++ if (ret) { ++ /* ++ * An empty symlink inode. Can happen in rare error paths when ++ * creating a symlink (transaction committed before the inode ++ * eviction handler removed the symlink inode items and a crash ++ * happened in between or the subvol was snapshoted in between). ++ * Print an informative message to dmesg/syslog so that the user ++ * can delete the symlink. ++ */ ++ btrfs_err(root->fs_info, ++ "Found empty symlink inode %llu at root %llu", ++ ino, root->root_key.objectid); ++ ret = -EIO; ++ goto out; ++ } + + ei = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_file_extent_item); +diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c +index 32f35f187989..b58a9cbb9695 100644 +--- a/fs/hostfs/hostfs_kern.c ++++ b/fs/hostfs/hostfs_kern.c +@@ -720,15 +720,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, + + init_special_inode(inode, mode, dev); + err = do_mknod(name, mode, MAJOR(dev), MINOR(dev)); +- if (!err) ++ if (err) + goto out_free; + + err = read_name(inode, name); + __putname(name); + if (err) + goto out_put; +- if (err) +- goto out_put; + + d_instantiate(dentry, inode); + return 0; +diff --git a/fs/lockd/host.c b/fs/lockd/host.c +index 969d589c848d..b5f3c3ab0d5f 100644 +--- a/fs/lockd/host.c ++++ b/fs/lockd/host.c +@@ -116,7 +116,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni, + atomic_inc(&nsm->sm_count); + else { + host = NULL; +- nsm = nsm_get_handle(ni->sap, ni->salen, ++ nsm = nsm_get_handle(ni->net, ni->sap, ni->salen, + ni->hostname, ni->hostname_len); + if (unlikely(nsm == NULL)) { + dprintk("lockd: %s failed; no nsm handle\n", +@@ -534,17 +534,18 @@ static struct nlm_host *next_host_state(struct hlist_head *cache, + + /** + * nlm_host_rebooted - Release all resources held by rebooted host ++ * @net: network namespace + * @info: pointer to decoded results of NLM_SM_NOTIFY call + * + * We were notified that the specified host has rebooted. Release + * all resources held by that peer. + */ +-void nlm_host_rebooted(const struct nlm_reboot *info) ++void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info) + { + struct nsm_handle *nsm; + struct nlm_host *host; + +- nsm = nsm_reboot_lookup(info); ++ nsm = nsm_reboot_lookup(net, info); + if (unlikely(nsm == NULL)) + return; + +diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c +index 6ae664b489af..13fac49aff7f 100644 +--- a/fs/lockd/mon.c ++++ b/fs/lockd/mon.c +@@ -51,7 +51,6 @@ struct nsm_res { + }; + + static const struct rpc_program nsm_program; +-static LIST_HEAD(nsm_handles); + static DEFINE_SPINLOCK(nsm_lock); + + /* +@@ -259,33 +258,35 @@ void nsm_unmonitor(const struct nlm_host *host) + } + } + +-static struct nsm_handle *nsm_lookup_hostname(const char *hostname, +- const size_t len) ++static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles, ++ const char *hostname, const size_t len) + { + struct nsm_handle *nsm; + +- list_for_each_entry(nsm, &nsm_handles, sm_link) ++ list_for_each_entry(nsm, nsm_handles, sm_link) + if (strlen(nsm->sm_name) == len && + memcmp(nsm->sm_name, hostname, len) == 0) + return nsm; + return NULL; + } + +-static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap) ++static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles, ++ const struct sockaddr *sap) + { + struct nsm_handle *nsm; + +- list_for_each_entry(nsm, &nsm_handles, sm_link) ++ list_for_each_entry(nsm, nsm_handles, sm_link) + if (rpc_cmp_addr(nsm_addr(nsm), sap)) + return nsm; + return NULL; + } + +-static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv) ++static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles, ++ const struct nsm_private *priv) + { + struct nsm_handle *nsm; + +- list_for_each_entry(nsm, &nsm_handles, sm_link) ++ list_for_each_entry(nsm, nsm_handles, sm_link) + if (memcmp(nsm->sm_priv.data, priv->data, + sizeof(priv->data)) == 0) + return nsm; +@@ -350,6 +351,7 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap, + + /** + * nsm_get_handle - Find or create a cached nsm_handle ++ * @net: network namespace + * @sap: pointer to socket address of handle to find + * @salen: length of socket address + * @hostname: pointer to C string containing hostname to find +@@ -362,11 +364,13 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap, + * @hostname cannot be found in the handle cache. Returns NULL if + * an error occurs. + */ +-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, ++struct nsm_handle *nsm_get_handle(const struct net *net, ++ const struct sockaddr *sap, + const size_t salen, const char *hostname, + const size_t hostname_len) + { + struct nsm_handle *cached, *new = NULL; ++ struct lockd_net *ln = net_generic(net, lockd_net_id); + + if (hostname && memchr(hostname, '/', hostname_len) != NULL) { + if (printk_ratelimit()) { +@@ -381,9 +385,10 @@ retry: + spin_lock(&nsm_lock); + + if (nsm_use_hostnames && hostname != NULL) +- cached = nsm_lookup_hostname(hostname, hostname_len); ++ cached = nsm_lookup_hostname(&ln->nsm_handles, ++ hostname, hostname_len); + else +- cached = nsm_lookup_addr(sap); ++ cached = nsm_lookup_addr(&ln->nsm_handles, sap); + + if (cached != NULL) { + atomic_inc(&cached->sm_count); +@@ -397,7 +402,7 @@ retry: + } + + if (new != NULL) { +- list_add(&new->sm_link, &nsm_handles); ++ list_add(&new->sm_link, &ln->nsm_handles); + spin_unlock(&nsm_lock); + dprintk("lockd: created nsm_handle for %s (%s)\n", + new->sm_name, new->sm_addrbuf); +@@ -414,19 +419,22 @@ retry: + + /** + * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle ++ * @net: network namespace + * @info: pointer to NLMPROC_SM_NOTIFY arguments + * + * Returns a matching nsm_handle if found in the nsm cache. The returned + * nsm_handle's reference count is bumped. Otherwise returns NULL if some + * error occurred. + */ +-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info) ++struct nsm_handle *nsm_reboot_lookup(const struct net *net, ++ const struct nlm_reboot *info) + { + struct nsm_handle *cached; ++ struct lockd_net *ln = net_generic(net, lockd_net_id); + + spin_lock(&nsm_lock); + +- cached = nsm_lookup_priv(&info->priv); ++ cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv); + if (unlikely(cached == NULL)) { + spin_unlock(&nsm_lock); + dprintk("lockd: never saw rebooted peer '%.*s' before\n", +diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h +index 5010b55628b4..414da99744e9 100644 +--- a/fs/lockd/netns.h ++++ b/fs/lockd/netns.h +@@ -16,6 +16,7 @@ struct lockd_net { + spinlock_t nsm_clnt_lock; + unsigned int nsm_users; + struct rpc_clnt *nsm_clnt; ++ struct list_head nsm_handles; + }; + + extern int lockd_net_id; +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c +index 7a318480ab7a..ce05c60ff06d 100644 +--- a/fs/lockd/svc.c ++++ b/fs/lockd/svc.c +@@ -583,6 +583,7 @@ static int lockd_init_net(struct net *net) + INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender); + INIT_LIST_HEAD(&ln->grace_list); + spin_lock_init(&ln->nsm_clnt_lock); ++ INIT_LIST_HEAD(&ln->nsm_handles); + return 0; + } + +diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c +index b147d1ae71fd..09c576f26c7b 100644 +--- a/fs/lockd/svc4proc.c ++++ b/fs/lockd/svc4proc.c +@@ -421,7 +421,7 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, + return rpc_system_err; + } + +- nlm_host_rebooted(argp); ++ nlm_host_rebooted(SVC_NET(rqstp), argp); + return rpc_success; + } + +diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c +index 21171f0c6477..fb26b9f522e7 100644 +--- a/fs/lockd/svcproc.c ++++ b/fs/lockd/svcproc.c +@@ -464,7 +464,7 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, + return rpc_system_err; + } + +- nlm_host_rebooted(argp); ++ nlm_host_rebooted(SVC_NET(rqstp), argp); + return rpc_success; + } + +diff --git a/fs/namei.c b/fs/namei.c +index 157c3dbacf6c..c87e15ee9255 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -2917,6 +2917,10 @@ opened: + goto exit_fput; + } + out: ++ if (unlikely(error > 0)) { ++ WARN_ON(1); ++ error = -EINVAL; ++ } + if (got_write) + mnt_drop_write(nd->path.mnt); + path_put(&save_parent); +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index cfa9163b3bb7..2bdaf57c82d0 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -1452,7 +1452,7 @@ restart: + } + spin_unlock(&state->state_lock); + nfs4_put_open_state(state); +- clear_bit(NFS4CLNT_RECLAIM_NOGRACE, ++ clear_bit(NFS_STATE_RECLAIM_NOGRACE, + &state->flags); + spin_lock(&sp->so_lock); + goto restart; +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index 9f285fb9bab3..b86db1236c7c 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -170,7 +170,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) + if (!priv->task) + return ERR_PTR(-ESRCH); + +- mm = mm_access(priv->task, PTRACE_MODE_READ); ++ mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS); + if (!mm || IS_ERR(mm)) + return mm; + down_read(&mm->mmap_sem); +@@ -1044,7 +1044,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, + if (!pm.buffer) + goto out_task; + +- mm = mm_access(task, PTRACE_MODE_READ); ++ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); + ret = PTR_ERR(mm); + if (!mm || IS_ERR(mm)) + goto out_free; +diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c +index 56123a6f462e..123c19890b14 100644 +--- a/fs/proc/task_nommu.c ++++ b/fs/proc/task_nommu.c +@@ -223,7 +223,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) + if (!priv->task) + return ERR_PTR(-ESRCH); + +- mm = mm_access(priv->task, PTRACE_MODE_READ); ++ mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS); + if (!mm || IS_ERR(mm)) { + put_task_struct(priv->task); + priv->task = NULL; +diff --git a/fs/splice.c b/fs/splice.c +index f183f1342c01..3b94a6bba29f 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -949,6 +949,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, + + splice_from_pipe_begin(sd); + do { ++ cond_resched(); + ret = splice_from_pipe_next(pipe, sd); + if (ret > 0) + ret = splice_from_pipe_feed(pipe, sd, actor); +@@ -1189,7 +1190,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, + long ret, bytes; + umode_t i_mode; + size_t len; +- int i, flags; ++ int i, flags, more; + + /* + * We require the input being a regular file, as we don't want to +@@ -1232,6 +1233,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, + * Don't block on output, we have to drain the direct pipe. + */ + sd->flags &= ~SPLICE_F_NONBLOCK; ++ more = sd->flags & SPLICE_F_MORE; + + while (len) { + size_t read_len; +@@ -1245,6 +1247,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, + sd->total_len = read_len; + + /* ++ * If more data is pending, set SPLICE_F_MORE ++ * If this is the last data and SPLICE_F_MORE was not set ++ * initially, clears it. ++ */ ++ if (read_len < len) ++ sd->flags |= SPLICE_F_MORE; ++ else if (!more) ++ sd->flags &= ~SPLICE_F_MORE; ++ /* + * NOTE: nonblocking mode only applies to the input. We + * must not do the output in nonblocking mode as then we + * could get stuck data in the internal pipe: +diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h +index 9a33c5f7e126..f6c229e2bffa 100644 +--- a/include/linux/enclosure.h ++++ b/include/linux/enclosure.h +@@ -29,7 +29,11 @@ + /* A few generic types ... taken from ses-2 */ + enum enclosure_component_type { + ENCLOSURE_COMPONENT_DEVICE = 0x01, ++ ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07, ++ ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14, ++ ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15, + ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, ++ ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18, + }; + + /* ses-2 common element status */ +diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h +index dcaad79f54ed..0adf073f13b3 100644 +--- a/include/linux/lockd/lockd.h ++++ b/include/linux/lockd/lockd.h +@@ -236,7 +236,8 @@ void nlm_rebind_host(struct nlm_host *); + struct nlm_host * nlm_get_host(struct nlm_host *); + void nlm_shutdown_hosts(void); + void nlm_shutdown_hosts_net(struct net *net); +-void nlm_host_rebooted(const struct nlm_reboot *); ++void nlm_host_rebooted(const struct net *net, ++ const struct nlm_reboot *); + + /* + * Host monitoring +@@ -244,11 +245,13 @@ void nlm_host_rebooted(const struct nlm_reboot *); + int nsm_monitor(const struct nlm_host *host); + void nsm_unmonitor(const struct nlm_host *host); + +-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, ++struct nsm_handle *nsm_get_handle(const struct net *net, ++ const struct sockaddr *sap, + const size_t salen, + const char *hostname, + const size_t hostname_len); +-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info); ++struct nsm_handle *nsm_reboot_lookup(const struct net *net, ++ const struct nlm_reboot *info); + void nsm_release(struct nsm_handle *nsm); + + /* +diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h +index fc01d5cb4cf1..7d2021d3ee08 100644 +--- a/include/linux/nfs_fs.h ++++ b/include/linux/nfs_fs.h +@@ -578,9 +578,7 @@ static inline void nfs3_forget_cached_acls(struct inode *inode) + + static inline loff_t nfs_size_to_loff_t(__u64 size) + { +- if (size > (__u64) OFFSET_MAX - 1) +- return OFFSET_MAX - 1; +- return (loff_t) size; ++ return min_t(u64, size, OFFSET_MAX); + } + + static inline ino_t +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h +index ba605015c4d8..0b2d0cbe0bab 100644 +--- a/include/linux/tracepoint.h ++++ b/include/linux/tracepoint.h +@@ -14,8 +14,11 @@ + * See the file COPYING for more details. + */ + ++#include + #include + #include ++#include ++#include + #include + #include + +@@ -126,6 +129,9 @@ static inline void tracepoint_synchronize_unregister(void) + void *it_func; \ + void *__data; \ + \ ++ if (!cpu_online(raw_smp_processor_id())) \ ++ return; \ ++ \ + if (!(cond)) \ + return; \ + prercu; \ +diff --git a/include/net/af_unix.h b/include/net/af_unix.h +index 686760024572..6278e4d32612 100644 +--- a/include/net/af_unix.h ++++ b/include/net/af_unix.h +@@ -6,8 +6,8 @@ + #include + #include + +-extern void unix_inflight(struct file *fp); +-extern void unix_notinflight(struct file *fp); ++extern void unix_inflight(struct user_struct *user, struct file *fp); ++extern void unix_notinflight(struct user_struct *user, struct file *fp); + extern void unix_gc(void); + extern void wait_for_unix_gc(void); + extern struct sock *unix_get_socket(struct file *filp); +diff --git a/include/net/scm.h b/include/net/scm.h +index 8de2d37d2077..d00cd43a990c 100644 +--- a/include/net/scm.h ++++ b/include/net/scm.h +@@ -21,6 +21,7 @@ struct scm_creds { + struct scm_fp_list { + short count; + short max; ++ struct user_struct *user; + struct file *fp[SCM_MAX_FD]; + }; + +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index a79d267b64ec..7b0d31b67f6a 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -1229,6 +1229,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) + if (!desc) + return NULL; + ++ chip_bus_lock(desc); + raw_spin_lock_irqsave(&desc->lock, flags); + + /* +@@ -1242,7 +1243,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) + if (!action) { + WARN(1, "Trying to free already-free IRQ %d\n", irq); + raw_spin_unlock_irqrestore(&desc->lock, flags); +- ++ chip_bus_sync_unlock(desc); + return NULL; + } + +@@ -1265,6 +1266,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) + #endif + + raw_spin_unlock_irqrestore(&desc->lock, flags); ++ chip_bus_sync_unlock(desc); + + unregister_handler_proc(irq, action); + +@@ -1338,9 +1340,7 @@ void free_irq(unsigned int irq, void *dev_id) + desc->affinity_notify = NULL; + #endif + +- chip_bus_lock(desc); + kfree(__free_irq(irq, dev_id)); +- chip_bus_sync_unlock(desc); + } + EXPORT_SYMBOL(free_irq); + +diff --git a/kernel/resource.c b/kernel/resource.c +index d7386986e10e..b8422b135b68 100644 +--- a/kernel/resource.c ++++ b/kernel/resource.c +@@ -961,9 +961,10 @@ struct resource * __request_region(struct resource *parent, + if (!conflict) + break; + if (conflict != parent) { +- parent = conflict; +- if (!(conflict->flags & IORESOURCE_BUSY)) ++ if (!(conflict->flags & IORESOURCE_BUSY)) { ++ parent = conflict; + continue; ++ } + } + if (conflict->flags & flags & IORESOURCE_MUXED) { + add_wait_queue(&muxed_resource_wait, &wait); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index c771f2547bef..f234c84d36c8 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1587,7 +1587,6 @@ out: + */ + int wake_up_process(struct task_struct *p) + { +- WARN_ON(task_is_stopped_or_traced(p)); + return try_to_wake_up(p, TASK_NORMAL, 0); + } + EXPORT_SYMBOL(wake_up_process); +diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c +index ce033c7aa2e8..9cff0ab82b63 100644 +--- a/kernel/time/posix-clock.c ++++ b/kernel/time/posix-clock.c +@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf, + static unsigned int posix_clock_poll(struct file *fp, poll_table *wait) + { + struct posix_clock *clk = get_posix_clock(fp); +- int result = 0; ++ unsigned int result = 0; + + if (!clk) +- return -ENODEV; ++ return POLLERR; + + if (clk->ops.poll) + result = clk->ops.poll(clk, fp, wait); +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index ab21b8c66535..cb73c4e0741e 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -1948,12 +1948,6 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) + goto again; + } + +-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) +-{ +- cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; +- cpu_buffer->reader_page->read = 0; +-} +- + static void rb_inc_iter(struct ring_buffer_iter *iter) + { + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; +@@ -3591,7 +3585,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) + + /* Finally update the reader page to the new head */ + cpu_buffer->reader_page = reader; +- rb_reset_reader_page(cpu_buffer); ++ cpu_buffer->reader_page->read = 0; + + if (overwrite != cpu_buffer->last_overrun) { + cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; +@@ -3601,6 +3595,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) + goto again; + + out: ++ /* Update the read_stamp on the first event */ ++ if (reader && reader->read == 0) ++ cpu_buffer->read_stamp = reader->page->time_stamp; ++ + arch_spin_unlock(&cpu_buffer->lock); + local_irq_restore(flags); + +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index 5a898f15bfc6..7d054b7671ec 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -602,7 +602,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) + * The ftrace subsystem is for showing formats only. + * They can not be enabled or disabled via the event files. + */ +- if (call->class && call->class->reg) ++ if (call->class && call->class->reg && ++ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) + return file; + } + +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index fa927fd5778d..fe7c4b91d2e7 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -1450,13 +1450,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, + timer_stats_timer_set_start_info(&dwork->timer); + + dwork->wq = wq; +- /* timer isn't guaranteed to run in this cpu, record earlier */ +- if (cpu == WORK_CPU_UNBOUND) +- cpu = raw_smp_processor_id(); + dwork->cpu = cpu; + timer->expires = jiffies + delay; + +- add_timer_on(timer, cpu); ++ if (unlikely(cpu != WORK_CPU_UNBOUND)) ++ add_timer_on(timer, cpu); ++ else ++ add_timer(timer); + } + + /** +diff --git a/lib/devres.c b/lib/devres.c +index 823533138fa0..20afaf181b27 100644 +--- a/lib/devres.c ++++ b/lib/devres.c +@@ -423,7 +423,7 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) + if (!iomap) + return; + +- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { ++ for (i = 0; i < PCIM_IOMAP_MAX; i++) { + if (!(mask & (1 << i))) + continue; + +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c +index e3bea2e0821a..025ced8fbb57 100644 +--- a/net/ceph/messenger.c ++++ b/net/ceph/messenger.c +@@ -2277,7 +2277,7 @@ static int read_partial_message(struct ceph_connection *con) + con->in_base_pos = -front_len - middle_len - data_len - + sizeof(m->footer); + con->in_tag = CEPH_MSGR_TAG_READY; +- return 0; ++ return 1; + } else if ((s64)seq - (s64)con->in_seq > 1) { + pr_err("read_partial_message bad seq %lld expected %lld\n", + seq, con->in_seq + 1); +@@ -2310,7 +2310,7 @@ static int read_partial_message(struct ceph_connection *con) + sizeof(m->footer); + con->in_tag = CEPH_MSGR_TAG_READY; + con->in_seq++; +- return 0; ++ return 1; + } + + BUG_ON(!con->in_msg); +diff --git a/net/core/scm.c b/net/core/scm.c +index dbc6bfcdf446..7a6cf8351cde 100644 +--- a/net/core/scm.c ++++ b/net/core/scm.c +@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) + *fplp = fpl; + fpl->count = 0; + fpl->max = SCM_MAX_FD; ++ fpl->user = NULL; + } + fpp = &fpl->fp[fpl->count]; + +@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) + *fpp++ = file; + fpl->count++; + } ++ ++ if (!fpl->user) ++ fpl->user = get_uid(current_user()); ++ + return num; + } + +@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm) + scm->fp = NULL; + for (i=fpl->count-1; i>=0; i--) + fput(fpl->fp[i]); ++ free_uid(fpl->user); + kfree(fpl); + } + } +@@ -337,6 +343,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) + for (i = 0; i < fpl->count; i++) + get_file(fpl->fp[i]); + new_fpl->max = new_fpl->count; ++ new_fpl->user = get_uid(fpl->user); + } + return new_fpl; + } +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c +index 89aacfd2756d..9ba6d8c7c793 100644 +--- a/net/mac80211/mesh_pathtbl.c ++++ b/net/mac80211/mesh_pathtbl.c +@@ -747,10 +747,8 @@ void mesh_plink_broken(struct sta_info *sta) + static void mesh_path_node_reclaim(struct rcu_head *rp) + { + struct mpath_node *node = container_of(rp, struct mpath_node, rcu); +- struct ieee80211_sub_if_data *sdata = node->mpath->sdata; + + del_timer_sync(&node->mpath->timer); +- atomic_dec(&sdata->u.mesh.mpaths); + kfree(node->mpath); + kfree(node); + } +@@ -758,8 +756,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) + /* needs to be called with the corresponding hashwlock taken */ + static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) + { +- struct mesh_path *mpath; +- mpath = node->mpath; ++ struct mesh_path *mpath = node->mpath; ++ struct ieee80211_sub_if_data *sdata = node->mpath->sdata; ++ + spin_lock(&mpath->state_lock); + mpath->flags |= MESH_PATH_RESOLVING; + if (mpath->is_gate) +@@ -767,6 +766,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) + hlist_del_rcu(&node->list); + call_rcu(&node->rcu, mesh_path_node_reclaim); + spin_unlock(&mpath->state_lock); ++ atomic_dec(&sdata->u.mesh.mpaths); + atomic_dec(&tbl->entries); + } + +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index 0283baedcdfb..9dc979abb461 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -311,6 +311,21 @@ static void death_by_timeout(unsigned long ul_conntrack) + nf_ct_put(ct); + } + ++static inline bool ++nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, ++ const struct nf_conntrack_tuple *tuple, ++ u16 zone) ++{ ++ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); ++ ++ /* A conntrack can be recreated with the equal tuple, ++ * so we need to check that the conntrack is confirmed ++ */ ++ return nf_ct_tuple_equal(tuple, &h->tuple) && ++ nf_ct_zone(ct) == zone && ++ nf_ct_is_confirmed(ct); ++} ++ + /* + * Warning : + * - Caller must take a reference on returned object +@@ -332,8 +347,7 @@ ____nf_conntrack_find(struct net *net, u16 zone, + local_bh_disable(); + begin: + hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { +- if (nf_ct_tuple_equal(tuple, &h->tuple) && +- nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { ++ if (nf_ct_key_equal(h, tuple, zone)) { + NF_CT_STAT_INC(net, found); + local_bh_enable(); + return h; +@@ -380,8 +394,7 @@ begin: + !atomic_inc_not_zero(&ct->ct_general.use))) + h = NULL; + else { +- if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || +- nf_ct_zone(ct) != zone)) { ++ if (unlikely(!nf_ct_key_equal(h, tuple, zone))) { + nf_ct_put(ct); + goto begin; + } +diff --git a/net/rds/connection.c b/net/rds/connection.c +index e88bf3976e54..642ad42c416b 100644 +--- a/net/rds/connection.c ++++ b/net/rds/connection.c +@@ -177,12 +177,6 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, + } + } + +- if (trans == NULL) { +- kmem_cache_free(rds_conn_slab, conn); +- conn = ERR_PTR(-ENODEV); +- goto out; +- } +- + conn->c_trans = trans; + + ret = trans->conn_alloc(conn, gfp); +diff --git a/net/rds/send.c b/net/rds/send.c +index 88eace57dd6b..31c9fa464b11 100644 +--- a/net/rds/send.c ++++ b/net/rds/send.c +@@ -955,11 +955,13 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, + release_sock(sk); + } + +- /* racing with another thread binding seems ok here */ ++ lock_sock(sk); + if (daddr == 0 || rs->rs_bound_addr == 0) { ++ release_sock(sk); + ret = -ENOTCONN; /* XXX not a great errno */ + goto out; + } ++ release_sock(sk); + + /* size of rm including all sgs */ + ret = rds_rm_size(msg, payload_len); +diff --git a/net/rfkill/core.c b/net/rfkill/core.c +index 1cec5e4f3a5e..6563cc04c578 100644 +--- a/net/rfkill/core.c ++++ b/net/rfkill/core.c +@@ -51,7 +51,6 @@ + struct rfkill { + spinlock_t lock; + +- const char *name; + enum rfkill_type type; + + unsigned long state; +@@ -75,6 +74,7 @@ struct rfkill { + struct delayed_work poll_work; + struct work_struct uevent_work; + struct work_struct sync_work; ++ char name[]; + }; + #define to_rfkill(d) container_of(d, struct rfkill, dev) + +@@ -871,14 +871,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name, + if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) + return NULL; + +- rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); ++ rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL); + if (!rfkill) + return NULL; + + spin_lock_init(&rfkill->lock); + INIT_LIST_HEAD(&rfkill->node); + rfkill->type = type; +- rfkill->name = name; ++ strcpy(rfkill->name, name); + rfkill->ops = ops; + rfkill->data = ops_data; + +@@ -1088,17 +1088,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) + return res; + } + +-static bool rfkill_readable(struct rfkill_data *data) +-{ +- bool r; +- +- mutex_lock(&data->mtx); +- r = !list_empty(&data->events); +- mutex_unlock(&data->mtx); +- +- return r; +-} +- + static ssize_t rfkill_fop_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) + { +@@ -1115,8 +1104,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf, + goto out; + } + mutex_unlock(&data->mtx); ++ /* since we re-check and it just compares pointers, ++ * using !list_empty() without locking isn't a problem ++ */ + ret = wait_event_interruptible(data->read_wait, +- rfkill_readable(data)); ++ !list_empty(&data->events)); + mutex_lock(&data->mtx); + + if (ret) +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c +index 231b71944c52..a4266b9b2429 100644 +--- a/net/sunrpc/cache.c ++++ b/net/sunrpc/cache.c +@@ -1221,7 +1221,7 @@ int qword_get(char **bpp, char *dest, int bufsize) + if (bp[0] == '\\' && bp[1] == 'x') { + /* HEX STRING */ + bp += 2; +- while (len < bufsize) { ++ while (len < bufsize - 1) { + int h, l; + + h = hex_to_bin(bp[0]); +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index a673c1f4f638..8f118c7c19e1 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -1466,7 +1466,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) + UNIXCB(skb).fp = NULL; + + for (i = scm->fp->count-1; i >= 0; i--) +- unix_notinflight(scm->fp->fp[i]); ++ unix_notinflight(scm->fp->user, scm->fp->fp[i]); + } + + static void unix_destruct_scm(struct sk_buff *skb) +@@ -1531,7 +1531,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) + return -ENOMEM; + + for (i = scm->fp->count - 1; i >= 0; i--) +- unix_inflight(scm->fp->fp[i]); ++ unix_inflight(scm->fp->user, scm->fp->fp[i]); + return max_level; + } + +diff --git a/net/unix/garbage.c b/net/unix/garbage.c +index 06730fe6ad9d..a72182d6750f 100644 +--- a/net/unix/garbage.c ++++ b/net/unix/garbage.c +@@ -122,7 +122,7 @@ struct sock *unix_get_socket(struct file *filp) + * descriptor if it is for an AF_UNIX socket. + */ + +-void unix_inflight(struct file *fp) ++void unix_inflight(struct user_struct *user, struct file *fp) + { + struct sock *s = unix_get_socket(fp); + +@@ -139,11 +139,11 @@ void unix_inflight(struct file *fp) + } + unix_tot_inflight++; + } +- fp->f_cred->user->unix_inflight++; ++ user->unix_inflight++; + spin_unlock(&unix_gc_lock); + } + +-void unix_notinflight(struct file *fp) ++void unix_notinflight(struct user_struct *user, struct file *fp) + { + struct sock *s = unix_get_socket(fp); + +@@ -157,7 +157,7 @@ void unix_notinflight(struct file *fp) + list_del_init(&u->link); + unix_tot_inflight--; + } +- fp->f_cred->user->unix_inflight--; ++ user->unix_inflight--; + spin_unlock(&unix_gc_lock); + } + +diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c +index 9c22317778eb..ee625e3a56ba 100644 +--- a/scripts/recordmcount.c ++++ b/scripts/recordmcount.c +@@ -189,6 +189,20 @@ static void *mmap_file(char const *fname) + addr = umalloc(sb.st_size); + uread(fd_map, addr, sb.st_size); + } ++ if (sb.st_nlink != 1) { ++ /* file is hard-linked, break the hard link */ ++ close(fd_map); ++ if (unlink(fname) < 0) { ++ perror(fname); ++ fail_file(); ++ } ++ fd_map = open(fname, O_RDWR | O_CREAT, sb.st_mode); ++ if (fd_map < 0) { ++ perror(fname); ++ fail_file(); ++ } ++ uwrite(fd_map, addr, sb.st_size); ++ } + return addr; + } + +diff --git a/tools/Makefile b/tools/Makefile +index 41067f304215..b82a15b92b1c 100644 +--- a/tools/Makefile ++++ b/tools/Makefile +@@ -22,6 +22,10 @@ help: + @echo ' from the kernel command line to build and install one of' + @echo ' the tools above' + @echo '' ++ @echo ' $$ make tools/all' ++ @echo '' ++ @echo ' builds all tools.' ++ @echo '' + @echo ' $$ make tools/install' + @echo '' + @echo ' installs all tools.' +@@ -50,6 +54,10 @@ selftests: FORCE + turbostat x86_energy_perf_policy: FORCE + $(call descend,power/x86/$@) + ++all: cgroup cpupower firewire lguest \ ++ perf selftests turbostat usb \ ++ virtio vm net x86_energy_perf_policy ++ + cpupower_install: + $(call descend,power/$(@:_install=),install) + +diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c +index ea475cd03511..ca2d05a07b57 100644 +--- a/virt/kvm/async_pf.c ++++ b/virt/kvm/async_pf.c +@@ -158,7 +158,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, + * do alloc nowait since if we are going to sleep anyway we + * may as well sleep faulting in page + */ +- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); ++ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); + if (!work) + return 0; + diff --git a/patch/kernel/marvell-default/patch-3.10.99-100.patch b/patch/kernel/marvell-default/patch-3.10.99-100.patch new file mode 100644 index 000000000..da39546c5 --- /dev/null +++ b/patch/kernel/marvell-default/patch-3.10.99-100.patch @@ -0,0 +1,1156 @@ +diff --git a/Makefile b/Makefile +index f1e6491fd7d8..40d4d3bf52c3 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 99 ++SUBLEVEL = 100 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S +index c9305ef1d411..e73982b93537 100644 +--- a/arch/x86/ia32/ia32entry.S ++++ b/arch/x86/ia32/ia32entry.S +@@ -422,6 +422,7 @@ ENTRY(ia32_syscall) + /*CFI_REL_OFFSET cs,CS-RIP*/ + CFI_REL_OFFSET rip,RIP-RIP + PARAVIRT_ADJUST_EXCEPTION_FRAME ++ ASM_CLAC /* Do this early to minimize exposure */ + SWAPGS + /* + * No need to follow this irqs on/off section: the syscall +diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c +index ec94e11807dc..ca0805633f26 100644 +--- a/arch/x86/kernel/acpi/sleep.c ++++ b/arch/x86/kernel/acpi/sleep.c +@@ -16,6 +16,7 @@ + #include + #include + ++#include + #include "../../realmode/rm/wakeup.h" + #include "sleep.h" + +@@ -96,7 +97,13 @@ int acpi_suspend_lowlevel(void) + saved_magic = 0x123456789abcdef0L; + #endif /* CONFIG_64BIT */ + ++ /* ++ * Pause/unpause graph tracing around do_suspend_lowlevel as it has ++ * inconsistent call/return info after it jumps to the wakeup vector. ++ */ ++ pause_graph_tracing(); + do_suspend_lowlevel(); ++ unpause_graph_tracing(); + return 0; + } + +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 04e7db668362..f3f0801a0e81 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -673,19 +673,18 @@ static int ata_ioc32(struct ata_port *ap) + int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, + int cmd, void __user *arg) + { +- int val = -EINVAL, rc = -EINVAL; ++ unsigned long val; ++ int rc = -EINVAL; + unsigned long flags; + + switch (cmd) { +- case ATA_IOC_GET_IO32: ++ case HDIO_GET_32BIT: + spin_lock_irqsave(ap->lock, flags); + val = ata_ioc32(ap); + spin_unlock_irqrestore(ap->lock, flags); +- if (copy_to_user(arg, &val, 1)) +- return -EFAULT; +- return 0; ++ return put_user(val, (unsigned long __user *)arg); + +- case ATA_IOC_SET_IO32: ++ case HDIO_SET_32BIT: + val = (unsigned long) arg; + rc = 0; + spin_lock_irqsave(ap->lock, flags); +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c +index 88cd940ece63..453c816b4537 100644 +--- a/drivers/edac/edac_mc_sysfs.c ++++ b/drivers/edac/edac_mc_sysfs.c +@@ -973,21 +973,26 @@ nomem: + */ + int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) + { ++ char *name; + int i, err; + + /* + * The memory controller needs its own bus, in order to avoid + * namespace conflicts at /sys/bus/edac. + */ +- mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); +- if (!mci->bus->name) ++ name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); ++ if (!name) + return -ENOMEM; + ++ mci->bus->name = name; ++ + edac_dbg(0, "creating bus %s\n", mci->bus->name); + + err = bus_register(mci->bus); +- if (err < 0) ++ if (err < 0) { ++ kfree(name); + return err; ++ } + + /* get the /sys/devices/system/edac subsys reference */ + mci->dev.type = &mci_attr_type; +@@ -1071,7 +1076,8 @@ fail: + fail2: + device_unregister(&mci->dev); + bus_unregister(mci->bus); +- kfree(mci->bus->name); ++ kfree(name); ++ + return err; + } + +@@ -1102,10 +1108,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) + + void edac_unregister_sysfs(struct mem_ctl_info *mci) + { ++ const char *name = mci->bus->name; ++ + edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); + device_unregister(&mci->dev); + bus_unregister(mci->bus); +- kfree(mci->bus->name); ++ kfree(name); + } + + static void mc_attr_release(struct device *dev) +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index 313ccaf25f49..62834322b337 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -124,7 +124,7 @@ static int ast_get_dram_info(struct drm_device *dev) + } while (ast_read32(ast, 0x10000) != 0x01); + data = ast_read32(ast, 0x10004); + +- if (data & 0x400) ++ if (data & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; +diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c +index bb166849aa6e..f0bac68254b7 100644 +--- a/drivers/gpu/drm/radeon/radeon_sa.c ++++ b/drivers/gpu/drm/radeon/radeon_sa.c +@@ -349,13 +349,8 @@ int radeon_sa_bo_new(struct radeon_device *rdev, + /* see if we can skip over some allocations */ + } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); + +- for (i = 0; i < RADEON_NUM_RINGS; ++i) +- radeon_fence_ref(fences[i]); +- + spin_unlock(&sa_manager->wq.lock); + r = radeon_fence_wait_any(rdev, fences, false); +- for (i = 0; i < RADEON_NUM_RINGS; ++i) +- radeon_fence_unref(&fences[i]); + spin_lock(&sa_manager->wq.lock); + /* if we have nothing to wait for block */ + if (r == -ENOENT && block) { +diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c +index 2a1b6e037e1a..0134ba32a057 100644 +--- a/drivers/mtd/ubi/upd.c ++++ b/drivers/mtd/ubi/upd.c +@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, + vol->changing_leb = 1; + vol->ch_lnum = req->lnum; + +- vol->upd_buf = vmalloc(req->bytes); ++ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size)); + if (!vol->upd_buf) + return -ENOMEM; + +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 303f3b3fb65f..84b770461655 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -164,6 +164,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ + { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ + { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ ++ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 9bab34cf01d4..24366a2afea6 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -271,6 +271,7 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_UE910_V2 0x1012 + #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 + #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 ++#define TELIT_PRODUCT_LE922_USBCFG5 0x1045 + #define TELIT_PRODUCT_LE920 0x1200 + #define TELIT_PRODUCT_LE910 0x1201 + +@@ -1140,6 +1141,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), +@@ -1191,6 +1194,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index d17c5d72cd29..4c86850bd627 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -1389,11 +1389,10 @@ openRetry: + * current bigbuf. + */ + static int +-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++discard_remaining_data(struct TCP_Server_Info *server) + { + unsigned int rfclen = get_rfc1002_length(server->smallbuf); + int remaining = rfclen + 4 - server->total_read; +- struct cifs_readdata *rdata = mid->callback_data; + + while (remaining > 0) { + int length; +@@ -1407,10 +1406,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) + remaining -= length; + } + +- dequeue_mid(mid, rdata->result); + return 0; + } + ++static int ++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++{ ++ int length; ++ struct cifs_readdata *rdata = mid->callback_data; ++ ++ length = discard_remaining_data(server); ++ dequeue_mid(mid, rdata->result); ++ return length; ++} ++ + int + cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + { +@@ -1439,6 +1448,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + return length; + server->total_read += length; + ++ if (server->ops->is_status_pending && ++ server->ops->is_status_pending(buf, server, 0)) { ++ discard_remaining_data(server); ++ return -1; ++ } ++ + /* Was the SMB read successful? */ + rdata->result = server->ops->map_error(buf, false); + if (rdata->result != 0) { +diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking +index 3ea36554107f..8918ac905a3b 100644 +--- a/fs/jffs2/README.Locking ++++ b/fs/jffs2/README.Locking +@@ -2,10 +2,6 @@ + JFFS2 LOCKING DOCUMENTATION + --------------------------- + +-At least theoretically, JFFS2 does not require the Big Kernel Lock +-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS +-code. It has its own locking, as described below. +- + This document attempts to describe the existing locking rules for + JFFS2. It is not expected to remain perfectly up to date, but ought to + be fairly close. +@@ -69,6 +65,7 @@ Ordering constraints: + any f->sem held. + 2. Never attempt to lock two file mutexes in one thread. + No ordering rules have been made for doing so. ++ 3. Never lock a page cache page with f->sem held. + + + erase_completion_lock spinlock +diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c +index a3750f902adc..c1f04947d7dc 100644 +--- a/fs/jffs2/build.c ++++ b/fs/jffs2/build.c +@@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) + + + static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, +- struct jffs2_inode_cache *ic) ++ struct jffs2_inode_cache *ic, ++ int *dir_hardlinks) + { + struct jffs2_full_dirent *fd; + +@@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", + fd->name, fd->ino, ic->ino); + jffs2_mark_node_obsolete(c, fd->raw); ++ /* Clear the ic/raw union so it doesn't cause problems later. */ ++ fd->ic = NULL; + continue; + } + ++ /* From this point, fd->raw is no longer used so we can set fd->ic */ ++ fd->ic = child_ic; ++ child_ic->pino_nlink++; ++ /* If we appear (at this stage) to have hard-linked directories, ++ * set a flag to trigger a scan later */ + if (fd->type == DT_DIR) { +- if (child_ic->pino_nlink) { +- JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", +- fd->name, fd->ino, ic->ino); +- /* TODO: What do we do about it? */ +- } else { +- child_ic->pino_nlink = ic->ino; +- } +- } else +- child_ic->pino_nlink++; ++ child_ic->flags |= INO_FLAGS_IS_DIR; ++ if (child_ic->pino_nlink > 1) ++ *dir_hardlinks = 1; ++ } + + dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); + /* Can't free scan_dents so far. We might need them in pass 2 */ +@@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + */ + static int jffs2_build_filesystem(struct jffs2_sb_info *c) + { +- int ret; +- int i; ++ int ret, i, dir_hardlinks = 0; + struct jffs2_inode_cache *ic; + struct jffs2_full_dirent *fd; + struct jffs2_full_dirent *dead_fds = NULL; +@@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + /* Now scan the directory tree, increasing nlink according to every dirent found. */ + for_each_inode(i, c, ic) { + if (ic->scan_dents) { +- jffs2_build_inode_pass1(c, ic); ++ jffs2_build_inode_pass1(c, ic, &dir_hardlinks); + cond_resched(); + } + } +@@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + } + + dbg_fsbuild("pass 2a complete\n"); ++ ++ if (dir_hardlinks) { ++ /* If we detected directory hardlinks earlier, *hopefully* ++ * they are gone now because some of the links were from ++ * dead directories which still had some old dirents lying ++ * around and not yet garbage-collected, but which have ++ * been discarded above. So clear the pino_nlink field ++ * in each directory, so that the final scan below can ++ * print appropriate warnings. */ ++ for_each_inode(i, c, ic) { ++ if (ic->flags & INO_FLAGS_IS_DIR) ++ ic->pino_nlink = 0; ++ } ++ } + dbg_fsbuild("freeing temporary data structures\n"); + + /* Finally, we can scan again and free the dirent structs */ +@@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + while(ic->scan_dents) { + fd = ic->scan_dents; + ic->scan_dents = fd->next; ++ /* We do use the pino_nlink field to count nlink of ++ * directories during fs build, so set it to the ++ * parent ino# now. Now that there's hopefully only ++ * one. */ ++ if (fd->type == DT_DIR) { ++ if (!fd->ic) { ++ /* We'll have complained about it and marked the coresponding ++ raw node obsolete already. Just skip it. */ ++ continue; ++ } ++ ++ /* We *have* to have set this in jffs2_build_inode_pass1() */ ++ BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR)); ++ ++ /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks ++ * is set. Otherwise, we know this should never trigger anyway, so ++ * we don't do the check. And ic->pino_nlink still contains the nlink ++ * value (which is 1). */ ++ if (dir_hardlinks && fd->ic->pino_nlink) { ++ JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n", ++ fd->name, fd->ino, ic->ino, fd->ic->pino_nlink); ++ /* Should we unlink it from its previous parent? */ ++ } ++ ++ /* For directories, ic->pino_nlink holds that parent inode # */ ++ fd->ic->pino_nlink = ic->ino; ++ } + jffs2_free_full_dirent(fd); + } + ic->scan_dents = NULL; +@@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, + + /* Reduce nlink of the child. If it's now zero, stick it on the + dead_fds list to be cleaned up later. Else just free the fd */ +- +- if (fd->type == DT_DIR) +- child_ic->pino_nlink = 0; +- else +- child_ic->pino_nlink--; ++ child_ic->pino_nlink--; + + if (!child_ic->pino_nlink) { + dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", +diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c +index 1506673c087e..60ef3fb707ff 100644 +--- a/fs/jffs2/file.c ++++ b/fs/jffs2/file.c +@@ -138,39 +138,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + struct page *pg; + struct inode *inode = mapping->host; + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); +- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); +- struct jffs2_raw_inode ri; +- uint32_t alloc_len = 0; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + uint32_t pageofs = index << PAGE_CACHE_SHIFT; + int ret = 0; + +- jffs2_dbg(1, "%s()\n", __func__); +- +- if (pageofs > inode->i_size) { +- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, +- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); +- if (ret) +- return ret; +- } +- +- mutex_lock(&f->sem); + pg = grab_cache_page_write_begin(mapping, index, flags); +- if (!pg) { +- if (alloc_len) +- jffs2_complete_reservation(c); +- mutex_unlock(&f->sem); ++ if (!pg) + return -ENOMEM; +- } + *pagep = pg; + +- if (alloc_len) { ++ jffs2_dbg(1, "%s()\n", __func__); ++ ++ if (pageofs > inode->i_size) { + /* Make new hole frag from old EOF to new page */ ++ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); ++ struct jffs2_raw_inode ri; + struct jffs2_full_dnode *fn; ++ uint32_t alloc_len; + + jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", + (unsigned int)inode->i_size, pageofs); + ++ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, ++ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); ++ if (ret) ++ goto out_page; ++ ++ mutex_lock(&f->sem); + memset(&ri, 0, sizeof(ri)); + + ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); +@@ -197,6 +191,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + if (IS_ERR(fn)) { + ret = PTR_ERR(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + ret = jffs2_add_full_dnode_to_inode(c, f, fn); +@@ -211,10 +206,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + jffs2_mark_node_obsolete(c, fn->raw); + jffs2_free_full_dnode(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + jffs2_complete_reservation(c); + inode->i_size = pageofs; ++ mutex_unlock(&f->sem); + } + + /* +@@ -223,18 +220,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + * case of a short-copy. + */ + if (!PageUptodate(pg)) { ++ mutex_lock(&f->sem); + ret = jffs2_do_readpage_nolock(inode, pg); ++ mutex_unlock(&f->sem); + if (ret) + goto out_page; + } +- mutex_unlock(&f->sem); + jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); + return ret; + + out_page: + unlock_page(pg); + page_cache_release(pg); +- mutex_unlock(&f->sem); + return ret; + } + +diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c +index 5a2dec2b064c..95d5880a63ee 100644 +--- a/fs/jffs2/gc.c ++++ b/fs/jffs2/gc.c +@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era + BUG_ON(start > orig_start); + } + +- /* First, use readpage() to read the appropriate page into the page cache */ +- /* Q: What happens if we actually try to GC the _same_ page for which commit_write() +- * triggered garbage collection in the first place? +- * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the +- * page OK. We'll actually write it out again in commit_write, which is a little +- * suboptimal, but at least we're correct. +- */ ++ /* The rules state that we must obtain the page lock *before* f->sem, so ++ * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's ++ * actually going to *change* so we're safe; we only allow reading. ++ * ++ * It is important to note that jffs2_write_begin() will ensure that its ++ * page is marked Uptodate before allocating space. That means that if we ++ * end up here trying to GC the *same* page that jffs2_write_begin() is ++ * trying to write out, read_cache_page() will not deadlock. */ ++ mutex_unlock(&f->sem); + pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); ++ mutex_lock(&f->sem); + + if (IS_ERR(pg_ptr)) { + pr_warn("read_cache_page() returned error: %ld\n", +diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h +index fa35ff79ab35..0637271f3770 100644 +--- a/fs/jffs2/nodelist.h ++++ b/fs/jffs2/nodelist.h +@@ -194,6 +194,7 @@ struct jffs2_inode_cache { + #define INO_STATE_CLEARING 6 /* In clear_inode() */ + + #define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */ ++#define INO_FLAGS_IS_DIR 0x02 /* is a directory */ + + #define RAWNODE_CLASS_INODE_CACHE 0 + #define RAWNODE_CLASS_XATTR_DATUM 1 +@@ -249,7 +250,10 @@ struct jffs2_readinode_info + + struct jffs2_full_dirent + { +- struct jffs2_raw_node_ref *raw; ++ union { ++ struct jffs2_raw_node_ref *raw; ++ struct jffs2_inode_cache *ic; /* Just during part of build */ ++ }; + struct jffs2_full_dirent *next; + uint32_t version; + uint32_t ino; /* == zero for unlink */ +diff --git a/fs/locks.c b/fs/locks.c +index 0274c953b07d..f7b1de7e6735 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -1852,7 +1852,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, + goto out; + } + +-again: + error = flock_to_posix_lock(filp, file_lock, &flock); + if (error) + goto out; +@@ -1883,19 +1882,22 @@ again: + * Attempt to detect a close/fcntl race and recover by + * releasing the lock that was just acquired. + */ +- /* +- * we need that spin_lock here - it prevents reordering between +- * update of inode->i_flock and check for it done in close(). +- * rcu_read_lock() wouldn't do. +- */ +- spin_lock(¤t->files->file_lock); +- f = fcheck(fd); +- spin_unlock(¤t->files->file_lock); +- if (!error && f != filp && flock.l_type != F_UNLCK) { +- flock.l_type = F_UNLCK; +- goto again; ++ if (!error && file_lock->fl_type != F_UNLCK) { ++ /* ++ * We need that spin_lock here - it prevents reordering between ++ * update of inode->i_flock and check for it done in ++ * close(). rcu_read_lock() wouldn't do. ++ */ ++ spin_lock(¤t->files->file_lock); ++ f = fcheck(fd); ++ spin_unlock(¤t->files->file_lock); ++ if (f != filp) { ++ file_lock->fl_type = F_UNLCK; ++ error = do_lock_file_wait(filp, cmd, file_lock); ++ WARN_ON_ONCE(error); ++ error = -EBADF; ++ } + } +- + out: + locks_free_lock(file_lock); + return error; +@@ -1970,7 +1972,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, + goto out; + } + +-again: + error = flock64_to_posix_lock(filp, file_lock, &flock); + if (error) + goto out; +@@ -2001,14 +2002,22 @@ again: + * Attempt to detect a close/fcntl race and recover by + * releasing the lock that was just acquired. + */ +- spin_lock(¤t->files->file_lock); +- f = fcheck(fd); +- spin_unlock(¤t->files->file_lock); +- if (!error && f != filp && flock.l_type != F_UNLCK) { +- flock.l_type = F_UNLCK; +- goto again; ++ if (!error && file_lock->fl_type != F_UNLCK) { ++ /* ++ * We need that spin_lock here - it prevents reordering between ++ * update of inode->i_flock and check for it done in ++ * close(). rcu_read_lock() wouldn't do. ++ */ ++ spin_lock(¤t->files->file_lock); ++ f = fcheck(fd); ++ spin_unlock(¤t->files->file_lock); ++ if (f != filp) { ++ file_lock->fl_type = F_UNLCK; ++ error = do_lock_file_wait(filp, cmd, file_lock); ++ WARN_ON_ONCE(error); ++ error = -EBADF; ++ } + } +- + out: + locks_free_lock(file_lock); + return error; +diff --git a/include/linux/ata.h b/include/linux/ata.h +index ee0bd9524055..f60ffe29b3a1 100644 +--- a/include/linux/ata.h ++++ b/include/linux/ata.h +@@ -477,8 +477,8 @@ enum ata_tf_protocols { + }; + + enum ata_ioctls { +- ATA_IOC_GET_IO32 = 0x309, +- ATA_IOC_SET_IO32 = 0x324, ++ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */ ++ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */ + }; + + /* core structures */ +diff --git a/include/linux/libata.h b/include/linux/libata.h +index 8ad0771b88ab..8017e5c459cf 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -666,7 +666,7 @@ struct ata_device { + union { + u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ + u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ +- }; ++ } ____cacheline_aligned; + + /* DEVSLP Timing Variables from Identify Device Data Log */ + u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; +diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c +index 2bb95a7a8809..c14565bde887 100644 +--- a/sound/core/control_compat.c ++++ b/sound/core/control_compat.c +@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 { + unsigned char reserved[128]; + }; + ++#ifdef CONFIG_X86_X32 ++/* x32 has a different alignment for 64bit values from ia32 */ ++struct snd_ctl_elem_value_x32 { ++ struct snd_ctl_elem_id id; ++ unsigned int indirect; /* bit-field causes misalignment */ ++ union { ++ s32 integer[128]; ++ unsigned char data[512]; ++ s64 integer64[64]; ++ } value; ++ unsigned char reserved[128]; ++}; ++#endif /* CONFIG_X86_X32 */ + + /* get the value type and count of the control */ + static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id, +@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count) + + static int copy_ctl_value_from_user(struct snd_card *card, + struct snd_ctl_elem_value *data, +- struct snd_ctl_elem_value32 __user *data32, ++ void __user *userdata, ++ void __user *valuep, + int *typep, int *countp) + { ++ struct snd_ctl_elem_value32 __user *data32 = userdata; + int i, type, size; + int uninitialized_var(count); + unsigned int indirect; +@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; +- if (get_user(val, &data32->value.integer[i])) ++ if (get_user(val, &intp[i])) + return -EFAULT; + data->value.integer.value[i] = val; + } +@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card, + printk(KERN_ERR "snd_ioctl32_ctl_elem_value: unknown type %d\n", type); + return -EINVAL; + } +- if (copy_from_user(data->value.bytes.data, +- data32->value.data, size)) ++ if (copy_from_user(data->value.bytes.data, valuep, size)) + return -EFAULT; + } + +@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card, + } + + /* restore the value to 32bit */ +-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, ++static int copy_ctl_value_to_user(void __user *userdata, ++ void __user *valuep, + struct snd_ctl_elem_value *data, + int type, int count) + { +@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; + val = data->value.integer.value[i]; +- if (put_user(val, &data32->value.integer[i])) ++ if (put_user(val, &intp[i])) + return -EFAULT; + } + } else { + size = get_elem_size(type, count); +- if (copy_to_user(data32->value.data, +- data->value.bytes.data, size)) ++ if (copy_to_user(valuep, data->value.bytes.data, size)) + return -EFAULT; + } + return 0; + } + +-static int snd_ctl_elem_read_user_compat(struct snd_card *card, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_read_user(struct snd_card *card, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + int err, type, count; +@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + err = snd_ctl_elem_read(card, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + +-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_write_user(struct snd_ctl_file *file, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + struct snd_card *card = file->card; +@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + err = snd_ctl_elem_write(card, file, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + ++static int snd_ctl_elem_read_user_compat(struct snd_card *card, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++ ++#ifdef CONFIG_X86_X32 ++static int snd_ctl_elem_read_user_x32(struct snd_card *card, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++#endif /* CONFIG_X86_X32 */ ++ + /* add or replace a user control */ + static int snd_ctl_elem_add_compat(struct snd_ctl_file *file, + struct snd_ctl_elem_info32 __user *data32, +@@ -393,6 +441,10 @@ enum { + SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32), + SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32), + SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32), ++ SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_ctl_elem_add_compat(ctl, argp, 0); + case SNDRV_CTL_IOCTL_ELEM_REPLACE32: + return snd_ctl_elem_add_compat(ctl, argp, 1); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_CTL_IOCTL_ELEM_READ_X32: ++ return snd_ctl_elem_read_user_x32(ctl->card, argp); ++ case SNDRV_CTL_IOCTL_ELEM_WRITE_X32: ++ return snd_ctl_elem_write_user_x32(ctl, argp); ++#endif /* CONFIG_X86_X32 */ + } + + down_read(&snd_ioctl_rwsem); +diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c +index 5268c1f58c25..09a89094dcf7 100644 +--- a/sound/core/rawmidi_compat.c ++++ b/sound/core/rawmidi_compat.c +@@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has 64bit timespec and 64bit alignment */ ++struct snd_rawmidi_status_x32 { ++ s32 stream; ++ u32 rsvd; /* alignment */ ++ struct timespec tstamp; ++ u32 avail; ++ u32 xruns; ++ unsigned char reserved[16]; ++} __attribute__((packed)); ++ ++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst)) ++ ++static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile, ++ struct snd_rawmidi_status_x32 __user *src) ++{ ++ int err; ++ struct snd_rawmidi_status status; ++ ++ if (rfile->output == NULL) ++ return -EINVAL; ++ if (get_user(status.stream, &src->stream)) ++ return -EFAULT; ++ ++ switch (status.stream) { ++ case SNDRV_RAWMIDI_STREAM_OUTPUT: ++ err = snd_rawmidi_output_status(rfile->output, &status); ++ break; ++ case SNDRV_RAWMIDI_STREAM_INPUT: ++ err = snd_rawmidi_input_status(rfile->input, &status); ++ break; ++ default: ++ return -EINVAL; ++ } ++ if (err < 0) ++ return err; ++ ++ if (put_timespec(&status.tstamp, &src->tstamp) || ++ put_user(status.avail, &src->avail) || ++ put_user(status.xruns, &src->xruns)) ++ return -EFAULT; ++ ++ return 0; ++} ++#endif /* CONFIG_X86_X32 */ ++ + enum { + SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32), + SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign + return snd_rawmidi_ioctl_params_compat(rfile, argp); + case SNDRV_RAWMIDI_IOCTL_STATUS32: + return snd_rawmidi_ioctl_status_compat(rfile, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_RAWMIDI_IOCTL_STATUS_X32: ++ return snd_rawmidi_ioctl_status_x32(rfile, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c +index 8d4d5e853efe..ab774954c985 100644 +--- a/sound/core/seq/oss/seq_oss.c ++++ b/sound/core/seq/oss/seq_oss.c +@@ -150,8 +150,6 @@ odev_release(struct inode *inode, struct file *file) + if ((dp = file->private_data) == NULL) + return 0; + +- snd_seq_oss_drain_write(dp); +- + mutex_lock(®ister_mutex); + snd_seq_oss_release(dp); + mutex_unlock(®ister_mutex); +diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h +index c0154a959d55..2464112b08ad 100644 +--- a/sound/core/seq/oss/seq_oss_device.h ++++ b/sound/core/seq/oss/seq_oss_device.h +@@ -131,7 +131,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co + unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait); + + void snd_seq_oss_reset(struct seq_oss_devinfo *dp); +-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp); + + /* */ + void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time); +diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c +index b3f39b5ed742..f9e09e458227 100644 +--- a/sound/core/seq/oss/seq_oss_init.c ++++ b/sound/core/seq/oss/seq_oss_init.c +@@ -457,23 +457,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp) + + + /* +- * Wait until the queue is empty (if we don't have nonblock) +- */ +-void +-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp) +-{ +- if (! dp->timer->running) +- return; +- if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) && +- dp->writeq) { +- debug_printk(("syncing..\n")); +- while (snd_seq_oss_writeq_sync(dp->writeq)) +- ; +- } +-} +- +- +-/* + * reset sequencer devices + */ + void +diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c +index e05802ae6e1b..2e908225d754 100644 +--- a/sound/core/timer_compat.c ++++ b/sound/core/timer_compat.c +@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file, + struct snd_timer_status32 __user *_status) + { + struct snd_timer_user *tu; +- struct snd_timer_status status; ++ struct snd_timer_status32 status; + + tu = file->private_data; + if (snd_BUG_ON(!tu->timeri)) + return -ENXIO; + memset(&status, 0, sizeof(status)); +- status.tstamp = tu->tstamp; ++ status.tstamp.tv_sec = tu->tstamp.tv_sec; ++ status.tstamp.tv_nsec = tu->tstamp.tv_nsec; + status.resolution = snd_timer_resolution(tu->timeri); + status.lost = tu->timeri->lost; + status.overrun = tu->overrun; +@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has the same struct as x86-64 */ ++#define snd_timer_user_status_x32(file, s) \ ++ snd_timer_user_status(file, s) ++#endif /* CONFIG_X86_X32 */ ++ + /* + */ + + enum { + SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32), + SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_timer_user_info_compat(file, argp); + case SNDRV_TIMER_IOCTL_STATUS32: + return snd_timer_user_status_compat(file, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_TIMER_IOCTL_STATUS_X32: ++ return snd_timer_user_status_x32(file, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c +index 94084cdb130c..9a281f45eb9c 100644 +--- a/sound/pci/rme9652/hdsp.c ++++ b/sound/pci/rme9652/hdsp.c +@@ -2923,7 +2923,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + { + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp); ++ ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp); + return 0; + } + +@@ -2935,7 +2935,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + + if (!snd_hdsp_use_is_exclusive(hdsp)) + return -EBUSY; +- val = ucontrol->value.enumerated.item[0]; ++ val = ucontrol->value.integer.value[0]; + spin_lock_irq(&hdsp->lock); + if (val != hdsp_dds_offset(hdsp)) + change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0; +diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c +index dd910d249987..8444098d2a8e 100644 +--- a/sound/pci/rme9652/hdspm.c ++++ b/sound/pci/rme9652/hdspm.c +@@ -1423,6 +1423,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate) + { + u64 n; + ++ if (snd_BUG_ON(rate <= 0)) ++ return; ++ + if (rate >= 112000) + rate /= 4; + else if (rate >= 56000) +@@ -2045,6 +2048,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm) + } else { + /* slave mode, return external sample rate */ + rate = hdspm_external_sample_rate(hdspm); ++ if (!rate) ++ rate = hdspm->system_sample_rate; + } + } + +@@ -2090,8 +2095,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol, + ucontrol) + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); ++ int rate = ucontrol->value.integer.value[0]; + +- hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]); ++ if (rate < 27000 || rate > 207000) ++ return -EINVAL; ++ hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]); + return 0; + } + +@@ -4199,7 +4207,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdspm->tco->term; ++ ucontrol->value.integer.value[0] = hdspm->tco->term; + + return 0; + } +@@ -4210,8 +4218,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) { +- hdspm->tco->term = ucontrol->value.enumerated.item[0]; ++ if (hdspm->tco->term != ucontrol->value.integer.value[0]) { ++ hdspm->tco->term = ucontrol->value.integer.value[0]; + + hdspm_tco_write(hdspm); + diff --git a/patch/kernel/marvell-default/rpatch-3.10.100-101.patch b/patch/kernel/marvell-default/rpatch-3.10.100-101.patch new file mode 100644 index 000000000..d9647df43 --- /dev/null +++ b/patch/kernel/marvell-default/rpatch-3.10.100-101.patch @@ -0,0 +1,1493 @@ +diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt +index c477af086e65..686a64bba775 100644 +--- a/Documentation/filesystems/efivarfs.txt ++++ b/Documentation/filesystems/efivarfs.txt +@@ -14,3 +14,10 @@ filesystem. + efivarfs is typically mounted like this, + + mount -t efivarfs none /sys/firmware/efi/efivars ++ ++Due to the presence of numerous firmware bugs where removing non-standard ++UEFI variables causes the system firmware to fail to POST, efivarfs ++files that are not well-known standardized variables are created ++as immutable files. This doesn't prevent removal - "chattr -i" will work - ++but it does prevent this kind of failure from being accomplished ++accidentally. +diff --git a/Makefile b/Makefile +index 40d4d3bf52c3..4be9e643cef0 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 100 ++SUBLEVEL = 101 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c +index 6ee59a0eb268..48b4cf6b2a24 100644 +--- a/arch/powerpc/kernel/module_64.c ++++ b/arch/powerpc/kernel/module_64.c +@@ -192,7 +192,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) + if (syms[i].st_shndx == SHN_UNDEF) { + char *name = strtab + syms[i].st_name; + if (name[0] == '.') +- memmove(name, name+1, strlen(name)); ++ syms[i].st_name++; + } + } + } +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 04cc2fa7744f..335fe70967a8 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1487,6 +1487,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, + return; + } + break; ++ case MSR_IA32_PEBS_ENABLE: ++ /* PEBS needs a quiescent period after being disabled (to write ++ * a record). Disabling PEBS through VMX MSR swapping doesn't ++ * provide that period, so a CPU could write host's record into ++ * guest's memory. ++ */ ++ wrmsrl(MSR_IA32_PEBS_ENABLE, 0); + } + + for (i = 0; i < m->nr; ++i) +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 41ba726c1ce2..7f2b6dec4b2b 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1941,6 +1941,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu) + + static void record_steal_time(struct kvm_vcpu *vcpu) + { ++ accumulate_steal_time(vcpu); ++ + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) + return; + +@@ -2074,12 +2076,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + if (!(data & KVM_MSR_ENABLED)) + break; + +- vcpu->arch.st.last_steal = current->sched_info.run_delay; +- +- preempt_disable(); +- accumulate_steal_time(vcpu); +- preempt_enable(); +- + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + + break; +@@ -2758,7 +2754,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + vcpu->cpu = cpu; + } + +- accumulate_steal_time(vcpu); + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + } + +diff --git a/crypto/af_alg.c b/crypto/af_alg.c +index 0ca108f3c840..1aaa555fab56 100644 +--- a/crypto/af_alg.c ++++ b/crypto/af_alg.c +@@ -125,23 +125,6 @@ int af_alg_release(struct socket *sock) + } + EXPORT_SYMBOL_GPL(af_alg_release); + +-void af_alg_release_parent(struct sock *sk) +-{ +- struct alg_sock *ask = alg_sk(sk); +- bool last; +- +- sk = ask->parent; +- ask = alg_sk(sk); +- +- lock_sock(sk); +- last = !--ask->refcnt; +- release_sock(sk); +- +- if (last) +- sock_put(sk); +-} +-EXPORT_SYMBOL_GPL(af_alg_release_parent); +- + static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + { + struct sock *sk = sock->sk; +@@ -149,7 +132,6 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + struct sockaddr_alg *sa = (void *)uaddr; + const struct af_alg_type *type; + void *private; +- int err; + + if (sock->state == SS_CONNECTED) + return -EINVAL; +@@ -175,22 +157,16 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + return PTR_ERR(private); + } + +- err = -EBUSY; + lock_sock(sk); +- if (ask->refcnt) +- goto unlock; + + swap(ask->type, type); + swap(ask->private, private); + +- err = 0; +- +-unlock: + release_sock(sk); + + alg_do_release(type, private); + +- return err; ++ return 0; + } + + static int alg_setkey(struct sock *sk, char __user *ukey, +@@ -223,15 +199,11 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type; +- int err = -EBUSY; ++ int err = -ENOPROTOOPT; + + lock_sock(sk); +- if (ask->refcnt) +- goto unlock; +- + type = ask->type; + +- err = -ENOPROTOOPT; + if (level != SOL_ALG || !type) + goto unlock; + +@@ -280,8 +252,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) + + sk2->sk_family = PF_ALG; + +- if (!ask->refcnt++) +- sock_hold(sk); ++ sock_hold(sk); + alg_sk(sk2)->parent = sk; + alg_sk(sk2)->type = type; + +diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c +index 8bd1bb6dbe47..24ae2a694e9b 100644 +--- a/drivers/firmware/efi/efivars.c ++++ b/drivers/firmware/efi/efivars.c +@@ -219,7 +219,8 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) + } + + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || +- efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) { ++ efivar_validate(new_var->VendorGuid, new_var->VariableName, ++ new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } +@@ -334,7 +335,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, + return -EACCES; + + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || +- efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) { ++ efivar_validate(new_var->VendorGuid, new_var->VariableName, ++ new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } +@@ -405,35 +407,27 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) + { + int i, short_name_size; + char *short_name; +- unsigned long variable_name_size; +- efi_char16_t *variable_name; +- +- variable_name = new_var->var.VariableName; +- variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t); ++ unsigned long utf8_name_size; ++ efi_char16_t *variable_name = new_var->var.VariableName; + + /* +- * Length of the variable bytes in ASCII, plus the '-' separator, ++ * Length of the variable bytes in UTF8, plus the '-' separator, + * plus the GUID, plus trailing NUL + */ +- short_name_size = variable_name_size / sizeof(efi_char16_t) +- + 1 + EFI_VARIABLE_GUID_LEN + 1; +- +- short_name = kzalloc(short_name_size, GFP_KERNEL); ++ utf8_name_size = ucs2_utf8size(variable_name); ++ short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1; + ++ short_name = kmalloc(short_name_size, GFP_KERNEL); + if (!short_name) + return 1; + +- /* Convert Unicode to normal chars (assume top bits are 0), +- ala UTF-8 */ +- for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) { +- short_name[i] = variable_name[i] & 0xFF; +- } ++ ucs2_as_utf8(short_name, variable_name, short_name_size); ++ + /* This is ugly, but necessary to separate one vendor's + private variables from another's. */ +- +- *(short_name + strlen(short_name)) = '-'; ++ short_name[utf8_name_size] = '-'; + efi_guid_unparse(&new_var->var.VendorGuid, +- short_name + strlen(short_name)); ++ short_name + utf8_name_size + 1); + + new_var->kobj.kset = efivars_kset; + +diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c +index 7dbc319e1cf5..9f82b5545edd 100644 +--- a/drivers/firmware/efi/vars.c ++++ b/drivers/firmware/efi/vars.c +@@ -42,7 +42,7 @@ DECLARE_WORK(efivar_work, NULL); + EXPORT_SYMBOL_GPL(efivar_work); + + static bool +-validate_device_path(struct efi_variable *var, int match, u8 *buffer, ++validate_device_path(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + struct efi_generic_dev_path *node; +@@ -75,7 +75,7 @@ validate_device_path(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_boot_order(struct efi_variable *var, int match, u8 *buffer, ++validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + /* An array of 16-bit integers */ +@@ -86,18 +86,18 @@ validate_boot_order(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_load_option(struct efi_variable *var, int match, u8 *buffer, ++validate_load_option(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + u16 filepathlength; + int i, desclength = 0, namelen; + +- namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName)); ++ namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN); + + /* Either "Boot" or "Driver" followed by four digits of hex */ + for (i = match; i < match+4; i++) { +- if (var->VariableName[i] > 127 || +- hex_to_bin(var->VariableName[i] & 0xff) < 0) ++ if (var_name[i] > 127 || ++ hex_to_bin(var_name[i] & 0xff) < 0) + return true; + } + +@@ -132,12 +132,12 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, + /* + * And, finally, check the filepath + */ +- return validate_device_path(var, match, buffer + desclength + 6, ++ return validate_device_path(var_name, match, buffer + desclength + 6, + filepathlength); + } + + static bool +-validate_uint16(struct efi_variable *var, int match, u8 *buffer, ++validate_uint16(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + /* A single 16-bit integer */ +@@ -148,7 +148,7 @@ validate_uint16(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, ++validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + int i; +@@ -165,67 +165,133 @@ validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, + } + + struct variable_validate { ++ efi_guid_t vendor; + char *name; +- bool (*validate)(struct efi_variable *var, int match, u8 *data, ++ bool (*validate)(efi_char16_t *var_name, int match, u8 *data, + unsigned long len); + }; + ++/* ++ * This is the list of variables we need to validate, as well as the ++ * whitelist for what we think is safe not to default to immutable. ++ * ++ * If it has a validate() method that's not NULL, it'll go into the ++ * validation routine. If not, it is assumed valid, but still used for ++ * whitelisting. ++ * ++ * Note that it's sorted by {vendor,name}, but globbed names must come after ++ * any other name with the same prefix. ++ */ + static const struct variable_validate variable_validate[] = { +- { "BootNext", validate_uint16 }, +- { "BootOrder", validate_boot_order }, +- { "DriverOrder", validate_boot_order }, +- { "Boot*", validate_load_option }, +- { "Driver*", validate_load_option }, +- { "ConIn", validate_device_path }, +- { "ConInDev", validate_device_path }, +- { "ConOut", validate_device_path }, +- { "ConOutDev", validate_device_path }, +- { "ErrOut", validate_device_path }, +- { "ErrOutDev", validate_device_path }, +- { "Timeout", validate_uint16 }, +- { "Lang", validate_ascii_string }, +- { "PlatformLang", validate_ascii_string }, +- { "", NULL }, ++ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 }, ++ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option }, ++ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string }, ++ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL }, ++ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 }, ++ { LINUX_EFI_CRASH_GUID, "*", NULL }, ++ { NULL_GUID, "", NULL }, + }; + ++static bool ++variable_matches(const char *var_name, size_t len, const char *match_name, ++ int *match) ++{ ++ for (*match = 0; ; (*match)++) { ++ char c = match_name[*match]; ++ char u = var_name[*match]; ++ ++ /* Wildcard in the matching name means we've matched */ ++ if (c == '*') ++ return true; ++ ++ /* Case sensitive match */ ++ if (!c && *match == len) ++ return true; ++ ++ if (c != u) ++ return false; ++ ++ if (!c) ++ return true; ++ } ++ return true; ++} ++ + bool +-efivar_validate(struct efi_variable *var, u8 *data, unsigned long len) ++efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, ++ unsigned long data_size) + { + int i; +- u16 *unicode_name = var->VariableName; ++ unsigned long utf8_size; ++ u8 *utf8_name; + +- for (i = 0; variable_validate[i].validate != NULL; i++) { +- const char *name = variable_validate[i].name; +- int match; ++ utf8_size = ucs2_utf8size(var_name); ++ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL); ++ if (!utf8_name) ++ return false; + +- for (match = 0; ; match++) { +- char c = name[match]; +- u16 u = unicode_name[match]; ++ ucs2_as_utf8(utf8_name, var_name, utf8_size); ++ utf8_name[utf8_size] = '\0'; + +- /* All special variables are plain ascii */ +- if (u > 127) +- return true; ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { ++ const char *name = variable_validate[i].name; ++ int match = 0; + +- /* Wildcard in the matching name means we've matched */ +- if (c == '*') +- return variable_validate[i].validate(var, +- match, data, len); ++ if (efi_guidcmp(vendor, variable_validate[i].vendor)) ++ continue; + +- /* Case sensitive match */ +- if (c != u) ++ if (variable_matches(utf8_name, utf8_size+1, name, &match)) { ++ if (variable_validate[i].validate == NULL) + break; +- +- /* Reached the end of the string while matching */ +- if (!c) +- return variable_validate[i].validate(var, +- match, data, len); ++ kfree(utf8_name); ++ return variable_validate[i].validate(var_name, match, ++ data, data_size); + } + } +- ++ kfree(utf8_name); + return true; + } + EXPORT_SYMBOL_GPL(efivar_validate); + ++bool ++efivar_variable_is_removable(efi_guid_t vendor, const char *var_name, ++ size_t len) ++{ ++ int i; ++ bool found = false; ++ int match = 0; ++ ++ /* ++ * Check if our variable is in the validated variables list ++ */ ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { ++ if (efi_guidcmp(variable_validate[i].vendor, vendor)) ++ continue; ++ ++ if (variable_matches(var_name, len, ++ variable_validate[i].name, &match)) { ++ found = true; ++ break; ++ } ++ } ++ ++ /* ++ * If it's in our list, it is removable. ++ */ ++ return found; ++} ++EXPORT_SYMBOL_GPL(efivar_variable_is_removable); ++ + static efi_status_t + check_var_size(u32 attributes, unsigned long size) + { +@@ -797,7 +863,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, + + *set = false; + +- if (efivar_validate(&entry->var, data, *size) == false) ++ if (efivar_validate(*vendor, name, data, *size) == false) + return -EINVAL; + + /* +diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c +index 8dd524f32284..08f105a06fbf 100644 +--- a/fs/efivarfs/file.c ++++ b/fs/efivarfs/file.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include "internal.h" + +@@ -108,9 +109,79 @@ out_free: + return size; + } + ++static int ++efivarfs_ioc_getxflags(struct file *file, void __user *arg) ++{ ++ struct inode *inode = file->f_mapping->host; ++ unsigned int i_flags; ++ unsigned int flags = 0; ++ ++ i_flags = inode->i_flags; ++ if (i_flags & S_IMMUTABLE) ++ flags |= FS_IMMUTABLE_FL; ++ ++ if (copy_to_user(arg, &flags, sizeof(flags))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ++efivarfs_ioc_setxflags(struct file *file, void __user *arg) ++{ ++ struct inode *inode = file->f_mapping->host; ++ unsigned int flags; ++ unsigned int i_flags = 0; ++ int error; ++ ++ if (!inode_owner_or_capable(inode)) ++ return -EACCES; ++ ++ if (copy_from_user(&flags, arg, sizeof(flags))) ++ return -EFAULT; ++ ++ if (flags & ~FS_IMMUTABLE_FL) ++ return -EOPNOTSUPP; ++ ++ if (!capable(CAP_LINUX_IMMUTABLE)) ++ return -EPERM; ++ ++ if (flags & FS_IMMUTABLE_FL) ++ i_flags |= S_IMMUTABLE; ++ ++ ++ error = mnt_want_write_file(file); ++ if (error) ++ return error; ++ ++ mutex_lock(&inode->i_mutex); ++ inode->i_flags &= ~S_IMMUTABLE; ++ inode->i_flags |= i_flags; ++ mutex_unlock(&inode->i_mutex); ++ ++ mnt_drop_write_file(file); ++ ++ return 0; ++} ++ ++long ++efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p) ++{ ++ void __user *arg = (void __user *)p; ++ ++ switch (cmd) { ++ case FS_IOC_GETFLAGS: ++ return efivarfs_ioc_getxflags(file, arg); ++ case FS_IOC_SETFLAGS: ++ return efivarfs_ioc_setxflags(file, arg); ++ } ++ ++ return -ENOTTY; ++} ++ + const struct file_operations efivarfs_file_operations = { + .open = simple_open, + .read = efivarfs_file_read, + .write = efivarfs_file_write, + .llseek = no_llseek, ++ .unlocked_ioctl = efivarfs_file_ioctl, + }; +diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c +index 7e787fb90293..d0351bc7b533 100644 +--- a/fs/efivarfs/inode.c ++++ b/fs/efivarfs/inode.c +@@ -15,7 +15,8 @@ + #include "internal.h" + + struct inode *efivarfs_get_inode(struct super_block *sb, +- const struct inode *dir, int mode, dev_t dev) ++ const struct inode *dir, int mode, ++ dev_t dev, bool is_removable) + { + struct inode *inode = new_inode(sb); + +@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb, + inode->i_ino = get_next_ino(); + inode->i_mode = mode; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; ++ inode->i_flags = is_removable ? 0 : S_IMMUTABLE; + switch (mode & S_IFMT) { + case S_IFREG: + inode->i_fop = &efivarfs_file_operations; +@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid) + static int efivarfs_create(struct inode *dir, struct dentry *dentry, + umode_t mode, bool excl) + { +- struct inode *inode; ++ struct inode *inode = NULL; + struct efivar_entry *var; + int namelen, i = 0, err = 0; ++ bool is_removable = false; + + if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len)) + return -EINVAL; + +- inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0); +- if (!inode) +- return -ENOMEM; +- + var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL); +- if (!var) { +- err = -ENOMEM; +- goto out; +- } ++ if (!var) ++ return -ENOMEM; + + /* length of the variable name itself: remove GUID and separator */ + namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1; +@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, + efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1, + &var->var.VendorGuid); + ++ if (efivar_variable_is_removable(var->var.VendorGuid, ++ dentry->d_name.name, namelen)) ++ is_removable = true; ++ ++ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable); ++ if (!inode) { ++ err = -ENOMEM; ++ goto out; ++ } ++ + for (i = 0; i < namelen; i++) + var->var.VariableName[i] = dentry->d_name.name[i]; + +@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, + out: + if (err) { + kfree(var); +- iput(inode); ++ if (inode) ++ iput(inode); + } + return err; + } +diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h +index b5ff16addb7c..b4505188e799 100644 +--- a/fs/efivarfs/internal.h ++++ b/fs/efivarfs/internal.h +@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations; + extern const struct inode_operations efivarfs_dir_inode_operations; + extern bool efivarfs_valid_name(const char *str, int len); + extern struct inode *efivarfs_get_inode(struct super_block *sb, +- const struct inode *dir, int mode, dev_t dev); ++ const struct inode *dir, int mode, dev_t dev, ++ bool is_removable); + + extern struct list_head efivarfs_list; + +diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c +index 141aee31884f..5a3655f690d9 100644 +--- a/fs/efivarfs/super.c ++++ b/fs/efivarfs/super.c +@@ -128,8 +128,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + struct dentry *dentry, *root = sb->s_root; + unsigned long size = 0; + char *name; +- int len, i; ++ int len; + int err = -ENOMEM; ++ bool is_removable = false; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) +@@ -138,15 +139,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + memcpy(entry->var.VariableName, name16, name_size); + memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); + +- len = ucs2_strlen(entry->var.VariableName); ++ len = ucs2_utf8size(entry->var.VariableName); + + /* name, plus '-', plus GUID, plus NUL*/ + name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); + if (!name) + goto fail; + +- for (i = 0; i < len; i++) +- name[i] = entry->var.VariableName[i] & 0xFF; ++ ucs2_as_utf8(name, entry->var.VariableName, len); ++ ++ if (efivar_variable_is_removable(entry->var.VendorGuid, name, len)) ++ is_removable = true; + + name[len] = '-'; + +@@ -154,7 +157,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + + name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; + +- inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0); ++ inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0, ++ is_removable); + if (!inode) + goto fail_name; + +@@ -210,7 +214,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) + sb->s_d_op = &efivarfs_d_ops; + sb->s_time_gran = 1; + +- inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0); ++ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true); + if (!inode) + return -ENOMEM; + inode->i_op = &efivarfs_dir_inode_operations; +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h +index 2f38daaab3d7..d61c11170213 100644 +--- a/include/crypto/if_alg.h ++++ b/include/crypto/if_alg.h +@@ -30,8 +30,6 @@ struct alg_sock { + + struct sock *parent; + +- unsigned int refcnt; +- + const struct af_alg_type *type; + void *private; + }; +@@ -66,7 +64,6 @@ int af_alg_register_type(const struct af_alg_type *type); + int af_alg_unregister_type(const struct af_alg_type *type); + + int af_alg_release(struct socket *sock); +-void af_alg_release_parent(struct sock *sk); + int af_alg_accept(struct sock *sk, struct socket *newsock); + + int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len, +@@ -83,6 +80,11 @@ static inline struct alg_sock *alg_sk(struct sock *sk) + return (struct alg_sock *)sk; + } + ++static inline void af_alg_release_parent(struct sock *sk) ++{ ++ sock_put(alg_sk(sk)->parent); ++} ++ + static inline void af_alg_init_completion(struct af_alg_completion *completion) + { + init_completion(&completion->completion); +diff --git a/include/linux/efi.h b/include/linux/efi.h +index 2bc0ad78d058..63fa51c864ec 100644 +--- a/include/linux/efi.h ++++ b/include/linux/efi.h +@@ -769,8 +769,10 @@ struct efivars { + * and we use a page for reading/writing. + */ + ++#define EFI_VAR_NAME_LEN 1024 ++ + struct efi_variable { +- efi_char16_t VariableName[1024/sizeof(efi_char16_t)]; ++ efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)]; + efi_guid_t VendorGuid; + unsigned long DataSize; + __u8 Data[1024]; +@@ -832,7 +834,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), + struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, + struct list_head *head, bool remove); + +-bool efivar_validate(struct efi_variable *var, u8 *data, unsigned long len); ++bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, ++ unsigned long data_size); ++bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, ++ size_t len); + + extern struct work_struct efivar_work; + void efivar_run_worker(void); +diff --git a/include/linux/module.h b/include/linux/module.h +index 46f1ea01e6f6..761dc2848ffa 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -220,6 +220,12 @@ struct module_ref { + unsigned long decs; + } __attribute((aligned(2 * sizeof(unsigned long)))); + ++struct mod_kallsyms { ++ Elf_Sym *symtab; ++ unsigned int num_symtab; ++ char *strtab; ++}; ++ + struct module + { + enum module_state state; +@@ -308,14 +314,9 @@ struct module + #endif + + #ifdef CONFIG_KALLSYMS +- /* +- * We keep the symbol and string tables for kallsyms. +- * The core_* fields below are temporary, loader-only (they +- * could really be discarded after module init). +- */ +- Elf_Sym *symtab, *core_symtab; +- unsigned int num_symtab, core_num_syms; +- char *strtab, *core_strtab; ++ /* Protected by RCU and/or module_mutex: use rcu_dereference() */ ++ struct mod_kallsyms *kallsyms; ++ struct mod_kallsyms core_kallsyms; + + /* Section attributes */ + struct module_sect_attrs *sect_attrs; +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h +index 0b2d0cbe0bab..36e5e9998865 100644 +--- a/include/linux/tracepoint.h ++++ b/include/linux/tracepoint.h +@@ -129,9 +129,6 @@ static inline void tracepoint_synchronize_unregister(void) + void *it_func; \ + void *__data; \ + \ +- if (!cpu_online(raw_smp_processor_id())) \ +- return; \ +- \ + if (!(cond)) \ + return; \ + prercu; \ +@@ -265,15 +262,19 @@ static inline void tracepoint_synchronize_unregister(void) + * "void *__data, proto" as the callback prototype. + */ + #define DECLARE_TRACE_NOARGS(name) \ +- __DECLARE_TRACE(name, void, , 1, void *__data, __data) ++ __DECLARE_TRACE(name, void, , \ ++ cpu_online(raw_smp_processor_id()), \ ++ void *__data, __data) + + #define DECLARE_TRACE(name, proto, args) \ +- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \ +- PARAMS(void *__data, proto), \ +- PARAMS(__data, args)) ++ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ ++ cpu_online(raw_smp_processor_id()), \ ++ PARAMS(void *__data, proto), \ ++ PARAMS(__data, args)) + + #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ +- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ ++ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ ++ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) + +diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h +index cbb20afdbc01..bb679b48f408 100644 +--- a/include/linux/ucs2_string.h ++++ b/include/linux/ucs2_string.h +@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s); + unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); + int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); + ++unsigned long ucs2_utf8size(const ucs2_char_t *src); ++unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, ++ unsigned long maxlength); ++ + #endif /* _LINUX_UCS2_STRING_H_ */ +diff --git a/kernel/module.c b/kernel/module.c +index 70a4754c001f..f8a4f48b48a9 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -179,6 +179,9 @@ struct load_info { + struct _ddebug *debug; + unsigned int num_debug; + bool sig_ok; ++#ifdef CONFIG_KALLSYMS ++ unsigned long mod_kallsyms_init_off; ++#endif + struct { + unsigned int sym, str, mod, vers, info, pcpu; + } index; +@@ -2346,8 +2349,20 @@ static void layout_symtab(struct module *mod, struct load_info *info) + strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, + info->index.str) | INIT_OFFSET_MASK; + pr_debug("\t%s\n", info->secstrings + strsect->sh_name); ++ ++ /* We'll tack temporary mod_kallsyms on the end. */ ++ mod->init_size = ALIGN(mod->init_size, ++ __alignof__(struct mod_kallsyms)); ++ info->mod_kallsyms_init_off = mod->init_size; ++ mod->init_size += sizeof(struct mod_kallsyms); ++ mod->init_size = debug_align(mod->init_size); + } + ++/* ++ * We use the full symtab and strtab which layout_symtab arranged to ++ * be appended to the init section. Later we switch to the cut-down ++ * core-only ones. ++ */ + static void add_kallsyms(struct module *mod, const struct load_info *info) + { + unsigned int i, ndst; +@@ -2356,28 +2371,33 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) + char *s; + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + +- mod->symtab = (void *)symsec->sh_addr; +- mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); ++ /* Set up to point into init section. */ ++ mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off; ++ ++ mod->kallsyms->symtab = (void *)symsec->sh_addr; ++ mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + /* Make sure we get permanent strtab: don't use info->strtab. */ +- mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; ++ mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + + /* Set types up while we still have access to sections. */ +- for (i = 0; i < mod->num_symtab; i++) +- mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); +- +- mod->core_symtab = dst = mod->module_core + info->symoffs; +- mod->core_strtab = s = mod->module_core + info->stroffs; +- src = mod->symtab; +- for (ndst = i = 0; i < mod->num_symtab; i++) { ++ for (i = 0; i < mod->kallsyms->num_symtab; i++) ++ mod->kallsyms->symtab[i].st_info ++ = elf_type(&mod->kallsyms->symtab[i], info); ++ ++ /* Now populate the cut down core kallsyms for after init. */ ++ mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs; ++ mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs; ++ src = mod->kallsyms->symtab; ++ for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { + if (i == 0 || + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { + dst[ndst] = src[i]; +- dst[ndst++].st_name = s - mod->core_strtab; +- s += strlcpy(s, &mod->strtab[src[i].st_name], ++ dst[ndst++].st_name = s - mod->core_kallsyms.strtab; ++ s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], + KSYM_NAME_LEN) + 1; + } + } +- mod->core_num_syms = ndst; ++ mod->core_kallsyms.num_symtab = ndst; + } + #else + static inline void layout_symtab(struct module *mod, struct load_info *info) +@@ -3117,9 +3137,8 @@ static int do_init_module(struct module *mod) + module_put(mod); + trim_init_extable(mod); + #ifdef CONFIG_KALLSYMS +- mod->num_symtab = mod->core_num_syms; +- mod->symtab = mod->core_symtab; +- mod->strtab = mod->core_strtab; ++ /* Switch to core kallsyms now init is done: kallsyms may be walking! */ ++ rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); + #endif + unset_module_init_ro_nx(mod); + module_free(mod, mod->module_init); +@@ -3398,9 +3417,9 @@ static inline int is_arm_mapping_symbol(const char *str) + && (str[2] == '\0' || str[2] == '.'); + } + +-static const char *symname(struct module *mod, unsigned int symnum) ++static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum) + { +- return mod->strtab + mod->symtab[symnum].st_name; ++ return kallsyms->strtab + kallsyms->symtab[symnum].st_name; + } + + static const char *get_ksymbol(struct module *mod, +@@ -3410,6 +3429,7 @@ static const char *get_ksymbol(struct module *mod, + { + unsigned int i, best = 0; + unsigned long nextval; ++ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + + /* At worse, next value is at end of module */ + if (within_module_init(addr, mod)) +@@ -3419,32 +3439,32 @@ static const char *get_ksymbol(struct module *mod, + + /* Scan for closest preceding symbol, and next symbol. (ELF + starts real symbols at 1). */ +- for (i = 1; i < mod->num_symtab; i++) { +- if (mod->symtab[i].st_shndx == SHN_UNDEF) ++ for (i = 1; i < kallsyms->num_symtab; i++) { ++ if (kallsyms->symtab[i].st_shndx == SHN_UNDEF) + continue; + + /* We ignore unnamed symbols: they're uninformative + * and inserted at a whim. */ +- if (*symname(mod, i) == '\0' +- || is_arm_mapping_symbol(symname(mod, i))) ++ if (*symname(kallsyms, i) == '\0' ++ || is_arm_mapping_symbol(symname(kallsyms, i))) + continue; + +- if (mod->symtab[i].st_value <= addr +- && mod->symtab[i].st_value > mod->symtab[best].st_value) ++ if (kallsyms->symtab[i].st_value <= addr ++ && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value) + best = i; +- if (mod->symtab[i].st_value > addr +- && mod->symtab[i].st_value < nextval) +- nextval = mod->symtab[i].st_value; ++ if (kallsyms->symtab[i].st_value > addr ++ && kallsyms->symtab[i].st_value < nextval) ++ nextval = kallsyms->symtab[i].st_value; + } + + if (!best) + return NULL; + + if (size) +- *size = nextval - mod->symtab[best].st_value; ++ *size = nextval - kallsyms->symtab[best].st_value; + if (offset) +- *offset = addr - mod->symtab[best].st_value; +- return symname(mod, best); ++ *offset = addr - kallsyms->symtab[best].st_value; ++ return symname(kallsyms, best); + } + + /* For kallsyms to ask for address resolution. NULL means not found. Careful +@@ -3540,18 +3560,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { ++ struct mod_kallsyms *kallsyms; ++ + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- if (symnum < mod->num_symtab) { +- *value = mod->symtab[symnum].st_value; +- *type = mod->symtab[symnum].st_info; +- strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN); ++ kallsyms = rcu_dereference_sched(mod->kallsyms); ++ if (symnum < kallsyms->num_symtab) { ++ *value = kallsyms->symtab[symnum].st_value; ++ *type = kallsyms->symtab[symnum].st_info; ++ strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN); + strlcpy(module_name, mod->name, MODULE_NAME_LEN); + *exported = is_exported(name, *value, mod); + preempt_enable(); + return 0; + } +- symnum -= mod->num_symtab; ++ symnum -= kallsyms->num_symtab; + } + preempt_enable(); + return -ERANGE; +@@ -3560,11 +3583,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + static unsigned long mod_find_symname(struct module *mod, const char *name) + { + unsigned int i; ++ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + +- for (i = 0; i < mod->num_symtab; i++) +- if (strcmp(name, symname(mod, i)) == 0 && +- mod->symtab[i].st_info != 'U') +- return mod->symtab[i].st_value; ++ for (i = 0; i < kallsyms->num_symtab; i++) ++ if (strcmp(name, symname(kallsyms, i)) == 0 && ++ kallsyms->symtab[i].st_info != 'U') ++ return kallsyms->symtab[i].st_value; + return 0; + } + +@@ -3603,11 +3627,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + int ret; + + list_for_each_entry(mod, &modules, list) { ++ /* We hold module_mutex: no need for rcu_dereference_sched */ ++ struct mod_kallsyms *kallsyms = mod->kallsyms; ++ + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- for (i = 0; i < mod->num_symtab; i++) { +- ret = fn(data, symname(mod, i), +- mod, mod->symtab[i].st_value); ++ for (i = 0; i < kallsyms->num_symtab; i++) { ++ ret = fn(data, symname(kallsyms, i), ++ mod, kallsyms->symtab[i].st_value); + if (ret != 0) + return ret; + } +diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c +index 6f500ef2301d..f0b323abb4c6 100644 +--- a/lib/ucs2_string.c ++++ b/lib/ucs2_string.c +@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) + } + } + EXPORT_SYMBOL(ucs2_strncmp); ++ ++unsigned long ++ucs2_utf8size(const ucs2_char_t *src) ++{ ++ unsigned long i; ++ unsigned long j = 0; ++ ++ for (i = 0; i < ucs2_strlen(src); i++) { ++ u16 c = src[i]; ++ ++ if (c >= 0x800) ++ j += 3; ++ else if (c >= 0x80) ++ j += 2; ++ else ++ j += 1; ++ } ++ ++ return j; ++} ++EXPORT_SYMBOL(ucs2_utf8size); ++ ++/* ++ * copy at most maxlength bytes of whole utf8 characters to dest from the ++ * ucs2 string src. ++ * ++ * The return value is the number of characters copied, not including the ++ * final NUL character. ++ */ ++unsigned long ++ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength) ++{ ++ unsigned int i; ++ unsigned long j = 0; ++ unsigned long limit = ucs2_strnlen(src, maxlength); ++ ++ for (i = 0; maxlength && i < limit; i++) { ++ u16 c = src[i]; ++ ++ if (c >= 0x800) { ++ if (maxlength < 3) ++ break; ++ maxlength -= 3; ++ dest[j++] = 0xe0 | (c & 0xf000) >> 12; ++ dest[j++] = 0x80 | (c & 0x0fc0) >> 6; ++ dest[j++] = 0x80 | (c & 0x003f); ++ } else if (c >= 0x80) { ++ if (maxlength < 2) ++ break; ++ maxlength -= 2; ++ dest[j++] = 0xc0 | (c & 0x7c0) >> 6; ++ dest[j++] = 0x80 | (c & 0x03f); ++ } else { ++ maxlength -= 1; ++ dest[j++] = c & 0x7f; ++ } ++ } ++ if (maxlength) ++ dest[j] = '\0'; ++ return j; ++} ++EXPORT_SYMBOL(ucs2_as_utf8); +diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c +index 31bf2586fb84..864408026202 100644 +--- a/net/mac80211/agg-rx.c ++++ b/net/mac80211/agg-rx.c +@@ -290,7 +290,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, + } + + /* prepare A-MPDU MLME for Rx aggregation */ +- tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); ++ tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL); + if (!tid_agg_rx) + goto end; + +diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c +index f3bbea1eb9e7..13f10aab9213 100644 +--- a/net/mac80211/rc80211_minstrel_ht.c ++++ b/net/mac80211/rc80211_minstrel_ht.c +@@ -454,7 +454,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) + if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) + return; + +- ieee80211_start_tx_ba_session(pubsta, tid, 5000); ++ ieee80211_start_tx_ba_session(pubsta, tid, 0); + } + + static void +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c +index c8717c1d082e..87dd619fb2e9 100644 +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -342,6 +342,39 @@ static const int compat_event_type_size[] = { + + /* IW event code */ + ++static void wireless_nlevent_flush(void) ++{ ++ struct sk_buff *skb; ++ struct net *net; ++ ++ ASSERT_RTNL(); ++ ++ for_each_net(net) { ++ while ((skb = skb_dequeue(&net->wext_nlevents))) ++ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, ++ GFP_KERNEL); ++ } ++} ++ ++static int wext_netdev_notifier_call(struct notifier_block *nb, ++ unsigned long state, void *ptr) ++{ ++ /* ++ * When a netdev changes state in any way, flush all pending messages ++ * to avoid them going out in a strange order, e.g. RTM_NEWLINK after ++ * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close() ++ * or similar - all of which could otherwise happen due to delays from ++ * schedule_work(). ++ */ ++ wireless_nlevent_flush(); ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block wext_netdev_notifier = { ++ .notifier_call = wext_netdev_notifier_call, ++}; ++ + static int __net_init wext_pernet_init(struct net *net) + { + skb_queue_head_init(&net->wext_nlevents); +@@ -360,7 +393,12 @@ static struct pernet_operations wext_pernet_ops = { + + static int __init wireless_nlevent_init(void) + { +- return register_pernet_subsys(&wext_pernet_ops); ++ int err = register_pernet_subsys(&wext_pernet_ops); ++ ++ if (err) ++ return err; ++ ++ return register_netdevice_notifier(&wext_netdev_notifier); + } + + subsys_initcall(wireless_nlevent_init); +@@ -368,17 +406,8 @@ subsys_initcall(wireless_nlevent_init); + /* Process events generated by the wireless layer or the driver. */ + static void wireless_nlevent_process(struct work_struct *work) + { +- struct sk_buff *skb; +- struct net *net; +- + rtnl_lock(); +- +- for_each_net(net) { +- while ((skb = skb_dequeue(&net->wext_nlevents))) +- rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, +- GFP_KERNEL); +- } +- ++ wireless_nlevent_flush(); + rtnl_unlock(); + } + +diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c +index 754f88e1fdab..4892966fc1b8 100644 +--- a/sound/soc/codecs/wm8958-dsp2.c ++++ b/sound/soc/codecs/wm8958-dsp2.c +@@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c +index 6e746c7474bf..cda3cf23474b 100644 +--- a/sound/soc/codecs/wm8994.c ++++ b/sound/soc/codecs/wm8994.c +@@ -361,7 +361,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol, + struct wm8994 *control = wm8994->wm8994; + struct wm8994_pdata *pdata = &control->pdata; + int drc = wm8994_get_drc(kcontrol->id.name); +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + + if (drc < 0) + return drc; +@@ -468,7 +468,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol, + struct wm8994 *control = wm8994->wm8994; + struct wm8994_pdata *pdata = &control->pdata; + int block = wm8994_get_retune_mobile_block(kcontrol->id.name); +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + + if (block < 0) + return block; +diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh +index 77edcdcc016b..057278448515 100644 +--- a/tools/testing/selftests/efivarfs/efivarfs.sh ++++ b/tools/testing/selftests/efivarfs/efivarfs.sh +@@ -88,7 +88,11 @@ test_delete() + exit 1 + fi + +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + + if [ -e $file ]; then + echo "$file couldn't be deleted" >&2 +@@ -111,6 +115,7 @@ test_zero_size_delete() + exit 1 + fi + ++ chattr -i $file + printf "$attrs" > $file + + if [ -e $file ]; then +@@ -141,7 +146,11 @@ test_valid_filenames() + echo "$file could not be created" >&2 + ret=1 + else +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + fi + done + +@@ -174,7 +183,11 @@ test_invalid_filenames() + + if [ -e $file ]; then + echo "Creating $file should have failed" >&2 +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + ret=1 + fi + done +diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c +index 8c0764407b3c..4af74f733036 100644 +--- a/tools/testing/selftests/efivarfs/open-unlink.c ++++ b/tools/testing/selftests/efivarfs/open-unlink.c +@@ -1,10 +1,68 @@ ++#include + #include + #include + #include + #include ++#include + #include + #include + #include ++#include ++ ++static int set_immutable(const char *path, int immutable) ++{ ++ unsigned int flags; ++ int fd; ++ int rc; ++ int error; ++ ++ fd = open(path, O_RDONLY); ++ if (fd < 0) ++ return fd; ++ ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); ++ if (rc < 0) { ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++ } ++ ++ if (immutable) ++ flags |= FS_IMMUTABLE_FL; ++ else ++ flags &= ~FS_IMMUTABLE_FL; ++ ++ rc = ioctl(fd, FS_IOC_SETFLAGS, &flags); ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++} ++ ++static int get_immutable(const char *path) ++{ ++ unsigned int flags; ++ int fd; ++ int rc; ++ int error; ++ ++ fd = open(path, O_RDONLY); ++ if (fd < 0) ++ return fd; ++ ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); ++ if (rc < 0) { ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++ } ++ close(fd); ++ if (flags & FS_IMMUTABLE_FL) ++ return 1; ++ return 0; ++} + + int main(int argc, char **argv) + { +@@ -27,7 +85,7 @@ int main(int argc, char **argv) + buf[4] = 0; + + /* create a test variable */ +- fd = open(path, O_WRONLY | O_CREAT); ++ fd = open(path, O_WRONLY | O_CREAT, 0600); + if (fd < 0) { + perror("open(O_WRONLY)"); + return EXIT_FAILURE; +@@ -41,6 +99,18 @@ int main(int argc, char **argv) + + close(fd); + ++ rc = get_immutable(path); ++ if (rc < 0) { ++ perror("ioctl(FS_IOC_GETFLAGS)"); ++ return EXIT_FAILURE; ++ } else if (rc) { ++ rc = set_immutable(path, 0); ++ if (rc < 0) { ++ perror("ioctl(FS_IOC_SETFLAGS)"); ++ return EXIT_FAILURE; ++ } ++ } ++ + fd = open(path, O_RDONLY); + if (fd < 0) { + perror("open"); diff --git a/patch/kernel/neo-default/patch-3.14.63-64.patch b/patch/kernel/neo-default/patch-3.14.63-64.patch new file mode 100644 index 000000000..4e1bc88a6 --- /dev/null +++ b/patch/kernel/neo-default/patch-3.14.63-64.patch @@ -0,0 +1,2749 @@ +diff --git a/Makefile b/Makefile +index 0843ef4cc0a4..de41fa82652f 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 63 ++SUBLEVEL = 64 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c +index 81e6ae0220bc..887535c4c93d 100644 +--- a/arch/mips/kernel/traps.c ++++ b/arch/mips/kernel/traps.c +@@ -688,15 +688,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode) + asmlinkage void do_ov(struct pt_regs *regs) + { + enum ctx_state prev_state; +- siginfo_t info; ++ siginfo_t info = { ++ .si_signo = SIGFPE, ++ .si_code = FPE_INTOVF, ++ .si_addr = (void __user *)regs->cp0_epc, ++ }; + + prev_state = exception_enter(); + die_if_kernel("Integer overflow", regs); + +- info.si_code = FPE_INTOVF; +- info.si_signo = SIGFPE; +- info.si_errno = 0; +- info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); + exception_exit(prev_state); + } +@@ -797,7 +797,7 @@ out: + static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + const char *str) + { +- siginfo_t info; ++ siginfo_t info = { 0 }; + char b[40]; + + #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP +@@ -825,7 +825,6 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + else + info.si_code = FPE_INTOVF; + info.si_signo = SIGFPE; +- info.si_errno = 0; + info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); + break; +diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S +index 92a2e9333620..b74ac9c5710b 100644 +--- a/arch/x86/ia32/ia32entry.S ++++ b/arch/x86/ia32/ia32entry.S +@@ -422,6 +422,7 @@ ENTRY(ia32_syscall) + /*CFI_REL_OFFSET cs,CS-RIP*/ + CFI_REL_OFFSET rip,RIP-RIP + PARAVIRT_ADJUST_EXCEPTION_FRAME ++ ASM_CLAC /* Do this early to minimize exposure */ + SWAPGS + /* + * No need to follow this irqs on/off section: the syscall +diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c +index 3a2ae4c88948..398c7a908c17 100644 +--- a/arch/x86/kernel/acpi/sleep.c ++++ b/arch/x86/kernel/acpi/sleep.c +@@ -16,6 +16,7 @@ + #include + #include + ++#include + #include "../../realmode/rm/wakeup.h" + #include "sleep.h" + +@@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void) + saved_magic = 0x123456789abcdef0L; + #endif /* CONFIG_64BIT */ + ++ /* ++ * Pause/unpause graph tracing around do_suspend_lowlevel as it has ++ * inconsistent call/return info after it jumps to the wakeup vector. ++ */ ++ pause_graph_tracing(); + do_suspend_lowlevel(); ++ unpause_graph_tracing(); + return 0; + } + +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 6fecf0bde105..1e82d2a1e205 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -674,19 +674,18 @@ static int ata_ioc32(struct ata_port *ap) + int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, + int cmd, void __user *arg) + { +- int val = -EINVAL, rc = -EINVAL; ++ unsigned long val; ++ int rc = -EINVAL; + unsigned long flags; + + switch (cmd) { +- case ATA_IOC_GET_IO32: ++ case HDIO_GET_32BIT: + spin_lock_irqsave(ap->lock, flags); + val = ata_ioc32(ap); + spin_unlock_irqrestore(ap->lock, flags); +- if (copy_to_user(arg, &val, 1)) +- return -EFAULT; +- return 0; ++ return put_user(val, (unsigned long __user *)arg); + +- case ATA_IOC_SET_IO32: ++ case HDIO_SET_32BIT: + val = (unsigned long) arg; + rc = 0; + spin_lock_irqsave(ap->lock, flags); +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c +index b335c6ab5efe..b2dd473237e8 100644 +--- a/drivers/edac/edac_mc_sysfs.c ++++ b/drivers/edac/edac_mc_sysfs.c +@@ -973,21 +973,26 @@ nomem: + */ + int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) + { ++ char *name; + int i, err; + + /* + * The memory controller needs its own bus, in order to avoid + * namespace conflicts at /sys/bus/edac. + */ +- mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); +- if (!mci->bus->name) ++ name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); ++ if (!name) + return -ENOMEM; + ++ mci->bus->name = name; ++ + edac_dbg(0, "creating bus %s\n", mci->bus->name); + + err = bus_register(mci->bus); +- if (err < 0) ++ if (err < 0) { ++ kfree(name); + return err; ++ } + + /* get the /sys/devices/system/edac subsys reference */ + mci->dev.type = &mci_attr_type; +@@ -1071,7 +1076,8 @@ fail: + fail2: + device_unregister(&mci->dev); + bus_unregister(mci->bus); +- kfree(mci->bus->name); ++ kfree(name); ++ + return err; + } + +@@ -1102,10 +1108,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) + + void edac_unregister_sysfs(struct mem_ctl_info *mci) + { ++ const char *name = mci->bus->name; ++ + edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); + device_unregister(&mci->dev); + bus_unregister(mci->bus); +- kfree(mci->bus->name); ++ kfree(name); + } + + static void mc_attr_release(struct device *dev) +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index c0f284230a39..ed8d93cbd1ca 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -124,7 +124,7 @@ static int ast_get_dram_info(struct drm_device *dev) + } while (ast_read32(ast, 0x10000) != 0x01); + data = ast_read32(ast, 0x10004); + +- if (data & 0x400) ++ if (data & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index a49ce4a6e72f..1320a81df792 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -2071,6 +2071,24 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) + return isert_post_response(isert_conn, isert_cmd); + } + ++static void ++isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) ++{ ++ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); ++ struct isert_conn *isert_conn = (struct isert_conn *)conn->context; ++ struct isert_device *device = isert_conn->conn_device; ++ ++ spin_lock_bh(&conn->cmd_lock); ++ if (!list_empty(&cmd->i_conn_node)) ++ list_del_init(&cmd->i_conn_node); ++ spin_unlock_bh(&conn->cmd_lock); ++ ++ if (cmd->data_direction == DMA_TO_DEVICE) ++ iscsit_stop_dataout_timer(cmd); ++ ++ device->unreg_rdma_mem(isert_cmd, isert_conn); ++} ++ + static int + isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, + bool nopout_response) +@@ -2999,6 +3017,7 @@ static struct iscsit_transport iser_target_transport = { + .iscsit_get_dataout = isert_get_dataout, + .iscsit_queue_data_in = isert_put_datain, + .iscsit_queue_status = isert_put_response, ++ .iscsit_aborted_task = isert_aborted_task, + }; + + static int __init isert_init(void) +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c +index 0097b8dae5bc..727a88d9a708 100644 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c +@@ -3093,6 +3093,14 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd) + srpt_queue_response(cmd); + } + ++static void srpt_aborted_task(struct se_cmd *cmd) ++{ ++ struct srpt_send_ioctx *ioctx = container_of(cmd, ++ struct srpt_send_ioctx, cmd); ++ ++ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); ++} ++ + static int srpt_queue_status(struct se_cmd *cmd) + { + struct srpt_send_ioctx *ioctx; +@@ -3940,6 +3948,7 @@ static struct target_core_fabric_ops srpt_template = { + .queue_data_in = srpt_queue_data_in, + .queue_status = srpt_queue_status, + .queue_tm_rsp = srpt_queue_tm_rsp, ++ .aborted_task = srpt_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c +index 28b4bea7c109..a8e52ef6e266 100644 +--- a/drivers/iommu/amd_iommu_init.c ++++ b/drivers/iommu/amd_iommu_init.c +@@ -226,6 +226,10 @@ static enum iommu_init_state init_state = IOMMU_START_STATE; + static int amd_iommu_enable_interrupts(void); + static int __init iommu_go_to_state(enum iommu_init_state state); + ++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, ++ u8 bank, u8 cntr, u8 fxn, ++ u64 *value, bool is_write); ++ + static inline void update_last_devid(u16 devid) + { + if (devid > amd_iommu_last_bdf) +@@ -1182,8 +1186,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu) + amd_iommu_pc_present = true; + + /* Check if the performance counters can be written to */ +- if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || +- (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || ++ if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) || ++ (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) || + (val != val2)) { + pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); + amd_iommu_pc_present = false; +@@ -2314,22 +2318,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid) + } + EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); + +-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, ++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, ++ u8 bank, u8 cntr, u8 fxn, + u64 *value, bool is_write) + { +- struct amd_iommu *iommu; + u32 offset; + u32 max_offset_lim; + +- /* Make sure the IOMMU PC resource is available */ +- if (!amd_iommu_pc_present) +- return -ENODEV; +- +- /* Locate the iommu associated with the device ID */ +- iommu = amd_iommu_rlookup_table[devid]; +- + /* Check for valid iommu and pc register indexing */ +- if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) ++ if (WARN_ON((fxn > 0x28) || (fxn & 7))) + return -ENODEV; + + offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); +@@ -2353,3 +2350,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, + return 0; + } + EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val); ++ ++int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, ++ u64 *value, bool is_write) ++{ ++ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; ++ ++ /* Make sure the IOMMU PC resource is available */ ++ if (!amd_iommu_pc_present || iommu == NULL) ++ return -ENODEV; ++ ++ return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn, ++ value, is_write); ++} +diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c +index 2a1b6e037e1a..0134ba32a057 100644 +--- a/drivers/mtd/ubi/upd.c ++++ b/drivers/mtd/ubi/upd.c +@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, + vol->changing_leb = 1; + vol->ch_lnum = req->lnum; + +- vol->upd_buf = vmalloc(req->bytes); ++ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size)); + if (!vol->upd_buf) + return -ENOMEM; + +diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +index 1817f3f2b02d..f46c0a6c5016 100644 +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +@@ -684,6 +684,20 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) + qlt_xmit_tm_rsp(mcmd); + } + ++static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) ++{ ++ struct qla_tgt_cmd *cmd = container_of(se_cmd, ++ struct qla_tgt_cmd, se_cmd); ++ struct scsi_qla_host *vha = cmd->vha; ++ struct qla_hw_data *ha = vha->hw; ++ ++ if (!cmd->sg_mapped) ++ return; ++ ++ pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); ++ cmd->sg_mapped = 0; ++} ++ + /* Local pointer to allocated TCM configfs fabric module */ + struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; + struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; +@@ -1886,6 +1900,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { + .queue_data_in = tcm_qla2xxx_queue_data_in, + .queue_status = tcm_qla2xxx_queue_status, + .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, ++ .aborted_task = tcm_qla2xxx_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c +@@ -1935,6 +1950,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { + .queue_data_in = tcm_qla2xxx_queue_data_in, + .queue_status = tcm_qla2xxx_queue_status, + .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, ++ .aborted_task = tcm_qla2xxx_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index c066e6e298c3..5600eab07865 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -500,6 +500,18 @@ static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) + return 0; + } + ++static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) ++{ ++ bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); ++ ++ spin_lock_bh(&conn->cmd_lock); ++ if (!list_empty(&cmd->i_conn_node)) ++ list_del_init(&cmd->i_conn_node); ++ spin_unlock_bh(&conn->cmd_lock); ++ ++ __iscsit_free_cmd(cmd, scsi_cmd, true); ++} ++ + static struct iscsit_transport iscsi_target_transport = { + .name = "iSCSI/TCP", + .transport_type = ISCSI_TCP, +@@ -514,6 +526,7 @@ static struct iscsit_transport iscsi_target_transport = { + .iscsit_response_queue = iscsit_response_queue, + .iscsit_queue_data_in = iscsit_queue_rsp, + .iscsit_queue_status = iscsit_queue_rsp, ++ .iscsit_aborted_task = iscsit_aborted_task, + }; + + static int __init iscsi_target_init_module(void) +diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c +index 83465617ad6d..4a28c5f0dfd1 100644 +--- a/drivers/target/iscsi/iscsi_target_configfs.c ++++ b/drivers/target/iscsi/iscsi_target_configfs.c +@@ -1815,6 +1815,13 @@ static void lio_queue_tm_rsp(struct se_cmd *se_cmd) + iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); + } + ++static void lio_aborted_task(struct se_cmd *se_cmd) ++{ ++ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); ++ ++ cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd); ++} ++ + static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg) + { + struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; +@@ -2013,6 +2020,7 @@ int iscsi_target_register_configfs(void) + fabric->tf_ops.queue_data_in = &lio_queue_data_in; + fabric->tf_ops.queue_status = &lio_queue_status; + fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; ++ fabric->tf_ops.aborted_task = &lio_aborted_task; + /* + * Setup function pointers for generic logic in target_core_fabric_configfs.c + */ +diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c +index 1e406af4ee47..2e96ae6cf3c1 100644 +--- a/drivers/target/iscsi/iscsi_target_util.c ++++ b/drivers/target/iscsi/iscsi_target_util.c +@@ -705,8 +705,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) + } + EXPORT_SYMBOL(iscsit_release_cmd); + +-static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, +- bool check_queues) ++void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, ++ bool check_queues) + { + struct iscsi_conn *conn = cmd->conn; + +diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h +index 561a424d1980..a68508c4fec8 100644 +--- a/drivers/target/iscsi/iscsi_target_util.h ++++ b/drivers/target/iscsi/iscsi_target_util.h +@@ -30,6 +30,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co + extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); + extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); + extern void iscsit_release_cmd(struct iscsi_cmd *); ++extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool); + extern void iscsit_free_cmd(struct iscsi_cmd *, bool); + extern int iscsit_check_session_usage_count(struct iscsi_session *); + extern void iscsit_dec_session_usage_count(struct iscsi_session *); +diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c +index 67c802c93ef3..fd974d69458b 100644 +--- a/drivers/target/loopback/tcm_loop.c ++++ b/drivers/target/loopback/tcm_loop.c +@@ -892,6 +892,11 @@ static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) + wake_up(&tl_tmr->tl_tmr_wait); + } + ++static void tcm_loop_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) + { + switch (tl_hba->tl_proto_id) { +@@ -1456,6 +1461,7 @@ static int tcm_loop_register_configfs(void) + fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; + fabric->tf_ops.queue_status = &tcm_loop_queue_status; + fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; ++ fabric->tf_ops.aborted_task = &tcm_loop_aborted_task; + + /* + * Setup function pointers for generic logic in target_core_fabric_configfs.c +diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c +index 24884cac19ce..ad04ea928e4f 100644 +--- a/drivers/target/sbp/sbp_target.c ++++ b/drivers/target/sbp/sbp_target.c +@@ -1846,6 +1846,11 @@ static void sbp_queue_tm_rsp(struct se_cmd *se_cmd) + { + } + ++static void sbp_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static int sbp_check_stop_free(struct se_cmd *se_cmd) + { + struct sbp_target_request *req = container_of(se_cmd, +@@ -2526,6 +2531,7 @@ static struct target_core_fabric_ops sbp_ops = { + .queue_data_in = sbp_queue_data_in, + .queue_status = sbp_queue_status, + .queue_tm_rsp = sbp_queue_tm_rsp, ++ .aborted_task = sbp_aborted_task, + .check_stop_free = sbp_check_stop_free, + + .fabric_make_wwn = sbp_make_tport, +diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c +index f30385385544..756def38c77a 100644 +--- a/drivers/target/target_core_configfs.c ++++ b/drivers/target/target_core_configfs.c +@@ -457,6 +457,10 @@ static int target_fabric_tf_ops_check( + pr_err("Missing tfo->queue_tm_rsp()\n"); + return -EINVAL; + } ++ if (!tfo->aborted_task) { ++ pr_err("Missing tfo->aborted_task()\n"); ++ return -EINVAL; ++ } + /* + * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() + * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index 093b8cb85de7..e366b812f0e1 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -1577,6 +1577,49 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) + return dev; + } + ++/* ++ * Check if the underlying struct block_device request_queue supports ++ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM ++ * in ATA and we need to set TPE=1 ++ */ ++bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, ++ struct request_queue *q, int block_size) ++{ ++ if (!blk_queue_discard(q)) ++ return false; ++ ++ attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) / ++ block_size; ++ /* ++ * Currently hardcoded to 1 in Linux/SCSI code.. ++ */ ++ attrib->max_unmap_block_desc_count = 1; ++ attrib->unmap_granularity = q->limits.discard_granularity / block_size; ++ attrib->unmap_granularity_alignment = q->limits.discard_alignment / ++ block_size; ++ return true; ++} ++EXPORT_SYMBOL(target_configure_unmap_from_queue); ++ ++/* ++ * Convert from blocksize advertised to the initiator to the 512 byte ++ * units unconditionally used by the Linux block layer. ++ */ ++sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) ++{ ++ switch (dev->dev_attrib.block_size) { ++ case 4096: ++ return lb << 3; ++ case 2048: ++ return lb << 2; ++ case 1024: ++ return lb << 1; ++ default: ++ return lb; ++ } ++} ++EXPORT_SYMBOL(target_to_linux_sector); ++ + int target_configure_device(struct se_device *dev) + { + struct se_hba *hba = dev->se_hba; +diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c +index b199f1e21d0e..6fe5b503f6e1 100644 +--- a/drivers/target/target_core_file.c ++++ b/drivers/target/target_core_file.c +@@ -164,25 +164,11 @@ static int fd_configure_device(struct se_device *dev) + " block_device blocks: %llu logical_block_size: %d\n", + dev_size, div_u64(dev_size, fd_dev->fd_block_size), + fd_dev->fd_block_size); +- /* +- * Check if the underlying struct block_device request_queue supports +- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM +- * in ATA and we need to set TPE=1 +- */ +- if (blk_queue_discard(q)) { +- dev->dev_attrib.max_unmap_lba_count = +- q->limits.max_discard_sectors; +- /* +- * Currently hardcoded to 1 in Linux/SCSI code.. +- */ +- dev->dev_attrib.max_unmap_block_desc_count = 1; +- dev->dev_attrib.unmap_granularity = +- q->limits.discard_granularity >> 9; +- dev->dev_attrib.unmap_granularity_alignment = +- q->limits.discard_alignment; ++ ++ if (target_configure_unmap_from_queue(&dev->dev_attrib, q, ++ fd_dev->fd_block_size)) + pr_debug("IFILE: BLOCK Discard support available," +- " disabled by default\n"); +- } ++ " disabled by default\n"); + /* + * Enable write same emulation for IBLOCK and use 0xFFFF as + * the smaller WRITE_SAME(10) only has a two-byte block count. +@@ -545,9 +531,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) + if (S_ISBLK(inode->i_mode)) { + /* The backend is block device, use discard */ + struct block_device *bdev = inode->i_bdev; ++ struct se_device *dev = cmd->se_dev; + +- ret = blkdev_issue_discard(bdev, lba, +- nolb, GFP_KERNEL, 0); ++ ret = blkdev_issue_discard(bdev, ++ target_to_linux_sector(dev, lba), ++ target_to_linux_sector(dev, nolb), ++ GFP_KERNEL, 0); + if (ret < 0) { + pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", + ret); +diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c +index feefe24a88f7..357b9fb61499 100644 +--- a/drivers/target/target_core_iblock.c ++++ b/drivers/target/target_core_iblock.c +@@ -126,27 +126,11 @@ static int iblock_configure_device(struct se_device *dev) + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); + dev->dev_attrib.hw_queue_depth = q->nr_requests; + +- /* +- * Check if the underlying struct block_device request_queue supports +- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM +- * in ATA and we need to set TPE=1 +- */ +- if (blk_queue_discard(q)) { +- dev->dev_attrib.max_unmap_lba_count = +- q->limits.max_discard_sectors; +- +- /* +- * Currently hardcoded to 1 in Linux/SCSI code.. +- */ +- dev->dev_attrib.max_unmap_block_desc_count = 1; +- dev->dev_attrib.unmap_granularity = +- q->limits.discard_granularity >> 9; +- dev->dev_attrib.unmap_granularity_alignment = +- q->limits.discard_alignment; +- ++ if (target_configure_unmap_from_queue(&dev->dev_attrib, q, ++ dev->dev_attrib.hw_block_size)) + pr_debug("IBLOCK: BLOCK Discard support available," +- " disabled by default\n"); +- } ++ " disabled by default\n"); ++ + /* + * Enable write same emulation for IBLOCK and use 0xFFFF as + * the smaller WRITE_SAME(10) only has a two-byte block count. +@@ -418,9 +402,13 @@ iblock_do_unmap(struct se_cmd *cmd, void *priv, + sector_t lba, sector_t nolb) + { + struct block_device *bdev = priv; ++ struct se_device *dev = cmd->se_dev; + int ret; + +- ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); ++ ret = blkdev_issue_discard(bdev, ++ target_to_linux_sector(dev, lba), ++ target_to_linux_sector(dev, nolb), ++ GFP_KERNEL, 0); + if (ret < 0) { + pr_err("blkdev_issue_discard() failed: %d\n", ret); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +@@ -460,8 +448,10 @@ iblock_execute_write_same(struct se_cmd *cmd) + struct scatterlist *sg; + struct bio *bio; + struct bio_list list; +- sector_t block_lba = cmd->t_task_lba; +- sector_t sectors = sbc_get_write_same_sectors(cmd); ++ struct se_device *dev = cmd->se_dev; ++ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); ++ sector_t sectors = target_to_linux_sector(dev, ++ sbc_get_write_same_sectors(cmd)); + + sg = &cmd->t_data_sg[0]; + +@@ -670,12 +660,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) + { + struct se_device *dev = cmd->se_dev; ++ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); + struct iblock_req *ibr; + struct bio *bio, *bio_start; + struct bio_list list; + struct scatterlist *sg; + u32 sg_num = sgl_nents; +- sector_t block_lba; + unsigned bio_cnt; + int rw = 0; + int i; +@@ -701,24 +691,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + rw = READ; + } + +- /* +- * Convert the blocksize advertised to the initiator to the 512 byte +- * units unconditionally used by the Linux block layer. +- */ +- if (dev->dev_attrib.block_size == 4096) +- block_lba = (cmd->t_task_lba << 3); +- else if (dev->dev_attrib.block_size == 2048) +- block_lba = (cmd->t_task_lba << 2); +- else if (dev->dev_attrib.block_size == 1024) +- block_lba = (cmd->t_task_lba << 1); +- else if (dev->dev_attrib.block_size == 512) +- block_lba = cmd->t_task_lba; +- else { +- pr_err("Unsupported SCSI -> BLOCK LBA conversion:" +- " %u\n", dev->dev_attrib.block_size); +- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +- } +- + ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); + if (!ibr) + goto fail; +diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c +index 70c638f730af..47a90d631a90 100644 +--- a/drivers/target/target_core_tmr.c ++++ b/drivers/target/target_core_tmr.c +@@ -76,25 +76,29 @@ void core_tmr_release_req( + } + + spin_lock_irqsave(&dev->se_tmr_lock, flags); +- list_del(&tmr->tmr_list); ++ list_del_init(&tmr->tmr_list); + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + + kfree(tmr); + } + +-static void core_tmr_handle_tas_abort( +- struct se_node_acl *tmr_nacl, +- struct se_cmd *cmd, +- int tas) ++static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) + { ++ unsigned long flags; ++ bool remove = true, send_tas; + /* + * TASK ABORTED status (TAS) bit support +- */ +- if ((tmr_nacl && +- (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) ++ */ ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ send_tas = (cmd->transport_state & CMD_T_TAS); ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ ++ if (send_tas) { ++ remove = false; + transport_send_task_abort(cmd); ++ } + +- transport_cmd_finish_abort(cmd, 0); ++ transport_cmd_finish_abort(cmd, remove); + } + + static int target_check_cdb_and_preempt(struct list_head *list, +@@ -112,6 +116,47 @@ static int target_check_cdb_and_preempt(struct list_head *list, + return 1; + } + ++static bool __target_check_io_state(struct se_cmd *se_cmd, ++ struct se_session *tmr_sess, int tas) ++{ ++ struct se_session *sess = se_cmd->se_sess; ++ ++ assert_spin_locked(&sess->sess_cmd_lock); ++ WARN_ON_ONCE(!irqs_disabled()); ++ /* ++ * If command already reached CMD_T_COMPLETE state within ++ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, ++ * this se_cmd has been passed to fabric driver and will ++ * not be aborted. ++ * ++ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR ++ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as ++ * long as se_cmd->cmd_kref is still active unless zero. ++ */ ++ spin_lock(&se_cmd->t_state_lock); ++ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) { ++ pr_debug("Attempted to abort io tag: %u already complete or" ++ " fabric stop, skipping\n", ++ se_cmd->se_tfo->get_task_tag(se_cmd)); ++ spin_unlock(&se_cmd->t_state_lock); ++ return false; ++ } ++ if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { ++ pr_debug("Attempted to abort io tag: %u already shutdown," ++ " skipping\n", se_cmd->se_tfo->get_task_tag(se_cmd)); ++ spin_unlock(&se_cmd->t_state_lock); ++ return false; ++ } ++ se_cmd->transport_state |= CMD_T_ABORTED; ++ ++ if ((tmr_sess != se_cmd->se_sess) && tas) ++ se_cmd->transport_state |= CMD_T_TAS; ++ ++ spin_unlock(&se_cmd->t_state_lock); ++ ++ return kref_get_unless_zero(&se_cmd->cmd_kref); ++} ++ + void core_tmr_abort_task( + struct se_device *dev, + struct se_tmr_req *tmr, +@@ -134,33 +179,19 @@ void core_tmr_abort_task( + printk("ABORT_TASK: Found referenced %s task_tag: %u\n", + se_cmd->se_tfo->get_fabric_name(), ref_tag); + +- spin_lock(&se_cmd->t_state_lock); +- if (se_cmd->transport_state & CMD_T_COMPLETE) { +- printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag); +- spin_unlock(&se_cmd->t_state_lock); ++ if (!__target_check_io_state(se_cmd, se_sess, 0)) { + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); ++ target_put_sess_cmd(se_sess, se_cmd); + goto out; + } +- se_cmd->transport_state |= CMD_T_ABORTED; +- spin_unlock(&se_cmd->t_state_lock); + + list_del_init(&se_cmd->se_cmd_list); +- kref_get(&se_cmd->cmd_kref); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + + cancel_work_sync(&se_cmd->work); + transport_wait_for_tasks(se_cmd); +- /* +- * Now send SAM_STAT_TASK_ABORTED status for the referenced +- * se_cmd descriptor.. +- */ +- transport_send_task_abort(se_cmd); +- /* +- * Also deal with possible extra acknowledge reference.. +- */ +- if (se_cmd->se_cmd_flags & SCF_ACK_KREF) +- target_put_sess_cmd(se_sess, se_cmd); + ++ transport_cmd_finish_abort(se_cmd, true); + target_put_sess_cmd(se_sess, se_cmd); + + printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" +@@ -182,9 +213,11 @@ static void core_tmr_drain_tmr_list( + struct list_head *preempt_and_abort_list) + { + LIST_HEAD(drain_tmr_list); ++ struct se_session *sess; + struct se_tmr_req *tmr_p, *tmr_pp; + struct se_cmd *cmd; + unsigned long flags; ++ bool rc; + /* + * Release all pending and outgoing TMRs aside from the received + * LUN_RESET tmr.. +@@ -210,17 +243,39 @@ static void core_tmr_drain_tmr_list( + if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) + continue; + ++ sess = cmd->se_sess; ++ if (WARN_ON_ONCE(!sess)) ++ continue; ++ ++ spin_lock(&sess->sess_cmd_lock); + spin_lock(&cmd->t_state_lock); +- if (!(cmd->transport_state & CMD_T_ACTIVE)) { ++ if (!(cmd->transport_state & CMD_T_ACTIVE) || ++ (cmd->transport_state & CMD_T_FABRIC_STOP)) { + spin_unlock(&cmd->t_state_lock); ++ spin_unlock(&sess->sess_cmd_lock); + continue; + } + if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { + spin_unlock(&cmd->t_state_lock); ++ spin_unlock(&sess->sess_cmd_lock); + continue; + } ++ if (sess->sess_tearing_down || cmd->cmd_wait_set) { ++ spin_unlock(&cmd->t_state_lock); ++ spin_unlock(&sess->sess_cmd_lock); ++ continue; ++ } ++ cmd->transport_state |= CMD_T_ABORTED; + spin_unlock(&cmd->t_state_lock); + ++ rc = kref_get_unless_zero(&cmd->cmd_kref); ++ if (!rc) { ++ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n"); ++ spin_unlock(&sess->sess_cmd_lock); ++ continue; ++ } ++ spin_unlock(&sess->sess_cmd_lock); ++ + list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); + } + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); +@@ -234,20 +289,26 @@ static void core_tmr_drain_tmr_list( + (preempt_and_abort_list) ? "Preempt" : "", tmr_p, + tmr_p->function, tmr_p->response, cmd->t_state); + ++ cancel_work_sync(&cmd->work); ++ transport_wait_for_tasks(cmd); ++ + transport_cmd_finish_abort(cmd, 1); ++ target_put_sess_cmd(cmd->se_sess, cmd); + } + } + + static void core_tmr_drain_state_list( + struct se_device *dev, + struct se_cmd *prout_cmd, +- struct se_node_acl *tmr_nacl, ++ struct se_session *tmr_sess, + int tas, + struct list_head *preempt_and_abort_list) + { + LIST_HEAD(drain_task_list); ++ struct se_session *sess; + struct se_cmd *cmd, *next; + unsigned long flags; ++ int rc; + + /* + * Complete outstanding commands with TASK_ABORTED SAM status. +@@ -286,6 +347,16 @@ static void core_tmr_drain_state_list( + if (prout_cmd == cmd) + continue; + ++ sess = cmd->se_sess; ++ if (WARN_ON_ONCE(!sess)) ++ continue; ++ ++ spin_lock(&sess->sess_cmd_lock); ++ rc = __target_check_io_state(cmd, tmr_sess, tas); ++ spin_unlock(&sess->sess_cmd_lock); ++ if (!rc) ++ continue; ++ + list_move_tail(&cmd->state_list, &drain_task_list); + cmd->state_active = false; + } +@@ -293,7 +364,7 @@ static void core_tmr_drain_state_list( + + while (!list_empty(&drain_task_list)) { + cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); +- list_del(&cmd->state_list); ++ list_del_init(&cmd->state_list); + + pr_debug("LUN_RESET: %s cmd: %p" + " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" +@@ -317,16 +388,11 @@ static void core_tmr_drain_state_list( + * loop above, but we do it down here given that + * cancel_work_sync may block. + */ +- if (cmd->t_state == TRANSPORT_COMPLETE) +- cancel_work_sync(&cmd->work); +- +- spin_lock_irqsave(&cmd->t_state_lock, flags); +- target_stop_cmd(cmd, &flags); +- +- cmd->transport_state |= CMD_T_ABORTED; +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ cancel_work_sync(&cmd->work); ++ transport_wait_for_tasks(cmd); + +- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); ++ core_tmr_handle_tas_abort(cmd, tas); ++ target_put_sess_cmd(cmd->se_sess, cmd); + } + } + +@@ -338,6 +404,7 @@ int core_tmr_lun_reset( + { + struct se_node_acl *tmr_nacl = NULL; + struct se_portal_group *tmr_tpg = NULL; ++ struct se_session *tmr_sess = NULL; + int tas; + /* + * TASK_ABORTED status bit, this is configurable via ConfigFS +@@ -356,8 +423,9 @@ int core_tmr_lun_reset( + * or struct se_device passthrough.. + */ + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { +- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; +- tmr_tpg = tmr->task_cmd->se_sess->se_tpg; ++ tmr_sess = tmr->task_cmd->se_sess; ++ tmr_nacl = tmr_sess->se_node_acl; ++ tmr_tpg = tmr_sess->se_tpg; + if (tmr_nacl && tmr_tpg) { + pr_debug("LUN_RESET: TMR caller fabric: %s" + " initiator port %s\n", +@@ -370,7 +438,7 @@ int core_tmr_lun_reset( + dev->transport->name, tas); + + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); +- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, ++ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas, + preempt_and_abort_list); + + /* +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index 7afea9b59e2c..cbf927a67160 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -509,9 +509,6 @@ void transport_deregister_session(struct se_session *se_sess) + } + EXPORT_SYMBOL(transport_deregister_session); + +-/* +- * Called with cmd->t_state_lock held. +- */ + static void target_remove_from_state_list(struct se_cmd *cmd) + { + struct se_device *dev = cmd->se_dev; +@@ -536,10 +533,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, + { + unsigned long flags; + +- spin_lock_irqsave(&cmd->t_state_lock, flags); +- if (write_pending) +- cmd->t_state = TRANSPORT_WRITE_PENDING; +- + if (remove_from_lists) { + target_remove_from_state_list(cmd); + +@@ -549,6 +542,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, + cmd->se_lun = NULL; + } + ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (write_pending) ++ cmd->t_state = TRANSPORT_WRITE_PENDING; ++ + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. +@@ -603,9 +600,20 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) + + void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) + { ++ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); ++ ++ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) ++ transport_lun_remove_cmd(cmd); ++ /* ++ * Allow the fabric driver to unmap any resources before ++ * releasing the descriptor via TFO->release_cmd() ++ */ ++ if (remove) ++ cmd->se_tfo->aborted_task(cmd); ++ + if (transport_cmd_check_stop_to_fabric(cmd)) + return; +- if (remove) ++ if (remove && ack_kref) + transport_put_cmd(cmd); + } + +@@ -673,7 +681,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) + * Check for case where an explicit ABORT_TASK has been received + * and transport_wait_for_tasks() will be waiting for completion.. + */ +- if (cmd->transport_state & CMD_T_ABORTED && ++ if (cmd->transport_state & CMD_T_ABORTED || + cmd->transport_state & CMD_T_STOP) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + complete_all(&cmd->t_transport_stop_comp); +@@ -1746,19 +1754,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd) + return true; + } + ++static int __transport_check_aborted_status(struct se_cmd *, int); ++ + void target_execute_cmd(struct se_cmd *cmd) + { + /* +- * If the received CDB has aleady been aborted stop processing it here. +- */ +- if (transport_check_aborted_status(cmd, 1)) +- return; +- +- /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. ++ * ++ * If the received CDB has aleady been aborted stop processing it here. + */ + spin_lock_irq(&cmd->t_state_lock); ++ if (__transport_check_aborted_status(cmd, 1)) { ++ spin_unlock_irq(&cmd->t_state_lock); ++ return; ++ } + if (cmd->transport_state & CMD_T_STOP) { + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", + __func__, __LINE__, +@@ -2076,20 +2086,14 @@ static inline void transport_free_pages(struct se_cmd *cmd) + } + + /** +- * transport_release_cmd - free a command +- * @cmd: command to free ++ * transport_put_cmd - release a reference to a command ++ * @cmd: command to release + * +- * This routine unconditionally frees a command, and reference counting +- * or list removal must be done in the caller. ++ * This routine releases our reference to the command and frees it if possible. + */ +-static int transport_release_cmd(struct se_cmd *cmd) ++static int transport_put_cmd(struct se_cmd *cmd) + { + BUG_ON(!cmd->se_tfo); +- +- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) +- core_tmr_release_req(cmd->se_tmr_req); +- if (cmd->t_task_cdb != cmd->__t_task_cdb) +- kfree(cmd->t_task_cdb); + /* + * If this cmd has been setup with target_get_sess_cmd(), drop + * the kref and call ->release_cmd() in kref callback. +@@ -2097,18 +2101,6 @@ static int transport_release_cmd(struct se_cmd *cmd) + return target_put_sess_cmd(cmd->se_sess, cmd); + } + +-/** +- * transport_put_cmd - release a reference to a command +- * @cmd: command to release +- * +- * This routine releases our reference to the command and frees it if possible. +- */ +-static int transport_put_cmd(struct se_cmd *cmd) +-{ +- transport_free_pages(cmd); +- return transport_release_cmd(cmd); +-} +- + void *transport_kmap_data_sg(struct se_cmd *cmd) + { + struct scatterlist *sg = cmd->t_data_sg; +@@ -2296,34 +2288,59 @@ static void transport_write_pending_qf(struct se_cmd *cmd) + } + } + +-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) ++static bool ++__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, ++ unsigned long *flags); ++ ++static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) + { + unsigned long flags; ++ ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++} ++ ++int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) ++{ + int ret = 0; ++ bool aborted = false, tas = false; + + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { + if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) +- transport_wait_for_tasks(cmd); ++ target_wait_free_cmd(cmd, &aborted, &tas); + +- ret = transport_release_cmd(cmd); ++ if (!aborted || tas) ++ ret = transport_put_cmd(cmd); + } else { + if (wait_for_tasks) +- transport_wait_for_tasks(cmd); ++ target_wait_free_cmd(cmd, &aborted, &tas); + /* + * Handle WRITE failure case where transport_generic_new_cmd() + * has already added se_cmd to state_list, but fabric has + * failed command before I/O submission. + */ +- if (cmd->state_active) { +- spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->state_active) + target_remove_from_state_list(cmd); +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); +- } + + if (cmd->se_lun) + transport_lun_remove_cmd(cmd); + +- ret = transport_put_cmd(cmd); ++ if (!aborted || tas) ++ ret = transport_put_cmd(cmd); ++ } ++ /* ++ * If the task has been internally aborted due to TMR ABORT_TASK ++ * or LUN_RESET, target_core_tmr.c is responsible for performing ++ * the remaining calls to target_put_sess_cmd(), and not the ++ * callers of this function. ++ */ ++ if (aborted) { ++ pr_debug("Detected CMD_T_ABORTED for ITT: %u\n", ++ cmd->se_tfo->get_task_tag(cmd)); ++ wait_for_completion(&cmd->cmd_wait_comp); ++ cmd->se_tfo->release_cmd(cmd); ++ ret = 1; + } + return ret; + } +@@ -2366,24 +2383,44 @@ out: + } + EXPORT_SYMBOL(target_get_sess_cmd); + ++static void target_free_cmd_mem(struct se_cmd *cmd) ++{ ++ transport_free_pages(cmd); ++ ++ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) ++ core_tmr_release_req(cmd->se_tmr_req); ++ if (cmd->t_task_cdb != cmd->__t_task_cdb) ++ kfree(cmd->t_task_cdb); ++} ++ + static void target_release_cmd_kref(struct kref *kref) + { + struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); + struct se_session *se_sess = se_cmd->se_sess; ++ bool fabric_stop; + + if (list_empty(&se_cmd->se_cmd_list)) { + spin_unlock(&se_sess->sess_cmd_lock); ++ target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + return; + } +- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { ++ ++ spin_lock(&se_cmd->t_state_lock); ++ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP); ++ spin_unlock(&se_cmd->t_state_lock); ++ ++ if (se_cmd->cmd_wait_set || fabric_stop) { ++ list_del_init(&se_cmd->se_cmd_list); + spin_unlock(&se_sess->sess_cmd_lock); ++ target_free_cmd_mem(se_cmd); + complete(&se_cmd->cmd_wait_comp); + return; + } +- list_del(&se_cmd->se_cmd_list); ++ list_del_init(&se_cmd->se_cmd_list); + spin_unlock(&se_sess->sess_cmd_lock); + ++ target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + } + +@@ -2394,6 +2431,7 @@ static void target_release_cmd_kref(struct kref *kref) + int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) + { + if (!se_sess) { ++ target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + return 1; + } +@@ -2411,6 +2449,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) + { + struct se_cmd *se_cmd; + unsigned long flags; ++ int rc; + + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + if (se_sess->sess_tearing_down) { +@@ -2420,8 +2459,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) + se_sess->sess_tearing_down = 1; + list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); + +- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) +- se_cmd->cmd_wait_set = 1; ++ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) { ++ rc = kref_get_unless_zero(&se_cmd->cmd_kref); ++ if (rc) { ++ se_cmd->cmd_wait_set = 1; ++ spin_lock(&se_cmd->t_state_lock); ++ se_cmd->transport_state |= CMD_T_FABRIC_STOP; ++ spin_unlock(&se_cmd->t_state_lock); ++ } ++ } + + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + } +@@ -2434,15 +2480,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) + { + struct se_cmd *se_cmd, *tmp_cmd; + unsigned long flags; ++ bool tas; + + list_for_each_entry_safe(se_cmd, tmp_cmd, + &se_sess->sess_wait_list, se_cmd_list) { +- list_del(&se_cmd->se_cmd_list); ++ list_del_init(&se_cmd->se_cmd_list); + + pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" + " %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); + ++ spin_lock_irqsave(&se_cmd->t_state_lock, flags); ++ tas = (se_cmd->transport_state & CMD_T_TAS); ++ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); ++ ++ if (!target_put_sess_cmd(se_sess, se_cmd)) { ++ if (tas) ++ target_put_sess_cmd(se_sess, se_cmd); ++ } ++ + wait_for_completion(&se_cmd->cmd_wait_comp); + pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" + " fabric state: %d\n", se_cmd, se_cmd->t_state, +@@ -2485,34 +2541,38 @@ int transport_clear_lun_ref(struct se_lun *lun) + return 0; + } + +-/** +- * transport_wait_for_tasks - wait for completion to occur +- * @cmd: command to wait +- * +- * Called from frontend fabric context to wait for storage engine +- * to pause and/or release frontend generated struct se_cmd. +- */ +-bool transport_wait_for_tasks(struct se_cmd *cmd) ++static bool ++__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, ++ bool *aborted, bool *tas, unsigned long *flags) ++ __releases(&cmd->t_state_lock) ++ __acquires(&cmd->t_state_lock) + { +- unsigned long flags; + +- spin_lock_irqsave(&cmd->t_state_lock, flags); ++ assert_spin_locked(&cmd->t_state_lock); ++ WARN_ON_ONCE(!irqs_disabled()); ++ ++ if (fabric_stop) ++ cmd->transport_state |= CMD_T_FABRIC_STOP; ++ ++ if (cmd->transport_state & CMD_T_ABORTED) ++ *aborted = true; ++ ++ if (cmd->transport_state & CMD_T_TAS) ++ *tas = true; ++ + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && +- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) + return false; +- } + + if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && +- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) + return false; +- } + +- if (!(cmd->transport_state & CMD_T_ACTIVE)) { +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ if (!(cmd->transport_state & CMD_T_ACTIVE)) ++ return false; ++ ++ if (fabric_stop && *aborted) + return false; +- } + + cmd->transport_state |= CMD_T_STOP; + +@@ -2521,20 +2581,37 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) + cmd, cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); + +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ spin_unlock_irqrestore(&cmd->t_state_lock, *flags); + + wait_for_completion(&cmd->t_transport_stop_comp); + +- spin_lock_irqsave(&cmd->t_state_lock, flags); ++ spin_lock_irqsave(&cmd->t_state_lock, *flags); + cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); + + pr_debug("wait_for_tasks: Stopped wait_for_completion(" + "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", + cmd->se_tfo->get_task_tag(cmd)); + ++ return true; ++} ++ ++/** ++ * transport_wait_for_tasks - wait for completion to occur ++ * @cmd: command to wait ++ * ++ * Called from frontend fabric context to wait for storage engine ++ * to pause and/or release frontend generated struct se_cmd. ++ */ ++bool transport_wait_for_tasks(struct se_cmd *cmd) ++{ ++ unsigned long flags; ++ bool ret, aborted = false, tas = false; ++ ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + +- return true; ++ return ret; + } + EXPORT_SYMBOL(transport_wait_for_tasks); + +@@ -2820,24 +2897,51 @@ after_reason: + } + EXPORT_SYMBOL(transport_send_check_condition_and_sense); + +-int transport_check_aborted_status(struct se_cmd *cmd, int send_status) ++static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) ++ __releases(&cmd->t_state_lock) ++ __acquires(&cmd->t_state_lock) + { ++ assert_spin_locked(&cmd->t_state_lock); ++ WARN_ON_ONCE(!irqs_disabled()); ++ + if (!(cmd->transport_state & CMD_T_ABORTED)) + return 0; + +- if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) ++ /* ++ * If cmd has been aborted but either no status is to be sent or it has ++ * already been sent, just return ++ */ ++ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) { ++ if (send_status) ++ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; + return 1; ++ } + +- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", +- cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); ++ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:" ++ " 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0], ++ cmd->se_tfo->get_task_tag(cmd)); + +- cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; ++ cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; + cmd->scsi_status = SAM_STAT_TASK_ABORTED; + trace_target_cmd_complete(cmd); ++ ++ spin_unlock_irq(&cmd->t_state_lock); + cmd->se_tfo->queue_status(cmd); ++ spin_lock_irq(&cmd->t_state_lock); + + return 1; + } ++ ++int transport_check_aborted_status(struct se_cmd *cmd, int send_status) ++{ ++ int ret; ++ ++ spin_lock_irq(&cmd->t_state_lock); ++ ret = __transport_check_aborted_status(cmd, send_status); ++ spin_unlock_irq(&cmd->t_state_lock); ++ ++ return ret; ++} + EXPORT_SYMBOL(transport_check_aborted_status); + + void transport_send_task_abort(struct se_cmd *cmd) +@@ -2845,7 +2949,7 @@ void transport_send_task_abort(struct se_cmd *cmd) + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); +- if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { ++ if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } +@@ -2859,11 +2963,17 @@ void transport_send_task_abort(struct se_cmd *cmd) + */ + if (cmd->data_direction == DMA_TO_DEVICE) { + if (cmd->se_tfo->write_pending_status(cmd) != 0) { +- cmd->transport_state |= CMD_T_ABORTED; +- smp_mb__after_atomic_inc(); ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) { ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ goto send_abort; ++ } ++ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } + } ++send_abort: + cmd->scsi_status = SAM_STAT_TASK_ABORTED; + + transport_lun_remove_cmd(cmd); +@@ -2881,8 +2991,17 @@ static void target_tmr_work(struct work_struct *work) + struct se_cmd *cmd = container_of(work, struct se_cmd, work); + struct se_device *dev = cmd->se_dev; + struct se_tmr_req *tmr = cmd->se_tmr_req; ++ unsigned long flags; + int ret; + ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->transport_state & CMD_T_ABORTED) { ++ tmr->response = TMR_FUNCTION_REJECTED; ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ goto check_stop; ++ } ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ + switch (tmr->function) { + case TMR_ABORT_TASK: + core_tmr_abort_task(dev, tmr, cmd->se_sess); +@@ -2910,9 +3029,17 @@ static void target_tmr_work(struct work_struct *work) + break; + } + ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->transport_state & CMD_T_ABORTED) { ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ goto check_stop; ++ } + cmd->t_state = TRANSPORT_ISTATE_PROCESSING; ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ + cmd->se_tfo->queue_tm_rsp(cmd); + ++check_stop: + transport_cmd_check_stop_to_fabric(cmd); + } + +diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h +index 752863acecb8..4f4b97161228 100644 +--- a/drivers/target/tcm_fc/tcm_fc.h ++++ b/drivers/target/tcm_fc/tcm_fc.h +@@ -163,6 +163,7 @@ int ft_write_pending_status(struct se_cmd *); + u32 ft_get_task_tag(struct se_cmd *); + int ft_get_cmd_state(struct se_cmd *); + void ft_queue_tm_resp(struct se_cmd *); ++void ft_aborted_task(struct se_cmd *); + + /* + * other internal functions. +diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c +index d22cdc77e9d4..f5fd515b2bee 100644 +--- a/drivers/target/tcm_fc/tfc_cmd.c ++++ b/drivers/target/tcm_fc/tfc_cmd.c +@@ -426,6 +426,11 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd) + ft_send_resp_code(cmd, code); + } + ++void ft_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static void ft_send_work(struct work_struct *work); + + /* +diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c +index e879da81ad93..b8b5a719a784 100644 +--- a/drivers/target/tcm_fc/tfc_conf.c ++++ b/drivers/target/tcm_fc/tfc_conf.c +@@ -536,6 +536,7 @@ static struct target_core_fabric_ops ft_fabric_ops = { + .queue_data_in = ft_queue_data_in, + .queue_status = ft_queue_status, + .queue_tm_rsp = ft_queue_tm_resp, ++ .aborted_task = ft_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c +diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c +index 39bd7ec8bf75..f66a14b6fc73 100644 +--- a/drivers/usb/chipidea/otg.c ++++ b/drivers/usb/chipidea/otg.c +@@ -96,7 +96,7 @@ static void ci_otg_work(struct work_struct *work) + int ci_hdrc_otg_init(struct ci_hdrc *ci) + { + INIT_WORK(&ci->work, ci_otg_work); +- ci->wq = create_singlethread_workqueue("ci_otg"); ++ ci->wq = create_freezable_workqueue("ci_otg"); + if (!ci->wq) { + dev_err(ci->dev, "can't create workqueue\n"); + return -ENODEV; +diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c +index 460c266b8e24..cdec2492ff40 100644 +--- a/drivers/usb/gadget/tcm_usb_gadget.c ++++ b/drivers/usb/gadget/tcm_usb_gadget.c +@@ -1471,6 +1471,11 @@ static void usbg_queue_tm_rsp(struct se_cmd *se_cmd) + { + } + ++static void usbg_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static const char *usbg_check_wwn(const char *name) + { + const char *n; +@@ -1897,6 +1902,7 @@ static struct target_core_fabric_ops usbg_ops = { + .queue_data_in = usbg_send_read_response, + .queue_status = usbg_send_status_response, + .queue_tm_rsp = usbg_queue_tm_rsp, ++ .aborted_task = usbg_aborted_task, + .check_stop_free = usbg_check_stop_free, + + .fabric_make_wwn = usbg_make_tport, +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 21bf168981f9..922723edd6b0 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -164,6 +164,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ + { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ + { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ ++ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 9bab34cf01d4..24366a2afea6 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -271,6 +271,7 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_UE910_V2 0x1012 + #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 + #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 ++#define TELIT_PRODUCT_LE922_USBCFG5 0x1045 + #define TELIT_PRODUCT_LE920 0x1200 + #define TELIT_PRODUCT_LE910 0x1201 + +@@ -1140,6 +1141,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), +@@ -1191,6 +1194,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c +index 6aeea1936aea..8f48f2bf34d4 100644 +--- a/drivers/vhost/scsi.c ++++ b/drivers/vhost/scsi.c +@@ -539,6 +539,11 @@ static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) + return; + } + ++static void tcm_vhost_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) + { + vs->vs_events_nr--; +@@ -2173,6 +2178,7 @@ static struct target_core_fabric_ops tcm_vhost_ops = { + .queue_data_in = tcm_vhost_queue_data_in, + .queue_status = tcm_vhost_queue_status, + .queue_tm_rsp = tcm_vhost_queue_tm_rsp, ++ .aborted_task = tcm_vhost_aborted_task, + /* + * Setup callers for generic logic in target_core_fabric_configfs.c + */ +diff --git a/fs/bio.c b/fs/bio.c +index b2b1451912b5..6d8cf434bf94 100644 +--- a/fs/bio.c ++++ b/fs/bio.c +@@ -1096,9 +1096,12 @@ int bio_uncopy_user(struct bio *bio) + ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, + bio_data_dir(bio) == READ, + 0, bmd->is_our_pages); +- else if (bmd->is_our_pages) +- bio_for_each_segment_all(bvec, bio, i) +- __free_page(bvec->bv_page); ++ else { ++ ret = -EINTR; ++ if (bmd->is_our_pages) ++ bio_for_each_segment_all(bvec, bio, i) ++ __free_page(bvec->bv_page); ++ } + } + kfree(bmd); + bio_put(bio); +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index f3264bd7a83d..3f709ab0223b 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -1399,11 +1399,10 @@ openRetry: + * current bigbuf. + */ + static int +-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++discard_remaining_data(struct TCP_Server_Info *server) + { + unsigned int rfclen = get_rfc1002_length(server->smallbuf); + int remaining = rfclen + 4 - server->total_read; +- struct cifs_readdata *rdata = mid->callback_data; + + while (remaining > 0) { + int length; +@@ -1417,10 +1416,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) + remaining -= length; + } + +- dequeue_mid(mid, rdata->result); + return 0; + } + ++static int ++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++{ ++ int length; ++ struct cifs_readdata *rdata = mid->callback_data; ++ ++ length = discard_remaining_data(server); ++ dequeue_mid(mid, rdata->result); ++ return length; ++} ++ + int + cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + { +@@ -1449,6 +1458,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + return length; + server->total_read += length; + ++ if (server->ops->is_status_pending && ++ server->ops->is_status_pending(buf, server, 0)) { ++ discard_remaining_data(server); ++ return -1; ++ } ++ + /* Was the SMB read successful? */ + rdata->result = server->ops->map_error(buf, false); + if (rdata->result != 0) { +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 348792911e1f..ae375dff03da 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -1004,21 +1004,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp, + { + char *data_offset; + struct create_context *cc; +- unsigned int next = 0; ++ unsigned int next; ++ unsigned int remaining; + char *name; + + data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset); ++ remaining = le32_to_cpu(rsp->CreateContextsLength); + cc = (struct create_context *)data_offset; +- do { +- cc = (struct create_context *)((char *)cc + next); ++ while (remaining >= sizeof(struct create_context)) { + name = le16_to_cpu(cc->NameOffset) + (char *)cc; +- if (le16_to_cpu(cc->NameLength) != 4 || +- strncmp(name, "RqLs", 4)) { +- next = le32_to_cpu(cc->Next); +- continue; +- } +- return server->ops->parse_lease_buf(cc, epoch); +- } while (next != 0); ++ if (le16_to_cpu(cc->NameLength) == 4 && ++ strncmp(name, "RqLs", 4) == 0) ++ return server->ops->parse_lease_buf(cc, epoch); ++ ++ next = le32_to_cpu(cc->Next); ++ if (!next) ++ break; ++ remaining -= next; ++ cc = (struct create_context *)((char *)cc + next); ++ } + + return 0; + } +diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking +index 3ea36554107f..8918ac905a3b 100644 +--- a/fs/jffs2/README.Locking ++++ b/fs/jffs2/README.Locking +@@ -2,10 +2,6 @@ + JFFS2 LOCKING DOCUMENTATION + --------------------------- + +-At least theoretically, JFFS2 does not require the Big Kernel Lock +-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS +-code. It has its own locking, as described below. +- + This document attempts to describe the existing locking rules for + JFFS2. It is not expected to remain perfectly up to date, but ought to + be fairly close. +@@ -69,6 +65,7 @@ Ordering constraints: + any f->sem held. + 2. Never attempt to lock two file mutexes in one thread. + No ordering rules have been made for doing so. ++ 3. Never lock a page cache page with f->sem held. + + + erase_completion_lock spinlock +diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c +index a3750f902adc..c1f04947d7dc 100644 +--- a/fs/jffs2/build.c ++++ b/fs/jffs2/build.c +@@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) + + + static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, +- struct jffs2_inode_cache *ic) ++ struct jffs2_inode_cache *ic, ++ int *dir_hardlinks) + { + struct jffs2_full_dirent *fd; + +@@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", + fd->name, fd->ino, ic->ino); + jffs2_mark_node_obsolete(c, fd->raw); ++ /* Clear the ic/raw union so it doesn't cause problems later. */ ++ fd->ic = NULL; + continue; + } + ++ /* From this point, fd->raw is no longer used so we can set fd->ic */ ++ fd->ic = child_ic; ++ child_ic->pino_nlink++; ++ /* If we appear (at this stage) to have hard-linked directories, ++ * set a flag to trigger a scan later */ + if (fd->type == DT_DIR) { +- if (child_ic->pino_nlink) { +- JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", +- fd->name, fd->ino, ic->ino); +- /* TODO: What do we do about it? */ +- } else { +- child_ic->pino_nlink = ic->ino; +- } +- } else +- child_ic->pino_nlink++; ++ child_ic->flags |= INO_FLAGS_IS_DIR; ++ if (child_ic->pino_nlink > 1) ++ *dir_hardlinks = 1; ++ } + + dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); + /* Can't free scan_dents so far. We might need them in pass 2 */ +@@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + */ + static int jffs2_build_filesystem(struct jffs2_sb_info *c) + { +- int ret; +- int i; ++ int ret, i, dir_hardlinks = 0; + struct jffs2_inode_cache *ic; + struct jffs2_full_dirent *fd; + struct jffs2_full_dirent *dead_fds = NULL; +@@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + /* Now scan the directory tree, increasing nlink according to every dirent found. */ + for_each_inode(i, c, ic) { + if (ic->scan_dents) { +- jffs2_build_inode_pass1(c, ic); ++ jffs2_build_inode_pass1(c, ic, &dir_hardlinks); + cond_resched(); + } + } +@@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + } + + dbg_fsbuild("pass 2a complete\n"); ++ ++ if (dir_hardlinks) { ++ /* If we detected directory hardlinks earlier, *hopefully* ++ * they are gone now because some of the links were from ++ * dead directories which still had some old dirents lying ++ * around and not yet garbage-collected, but which have ++ * been discarded above. So clear the pino_nlink field ++ * in each directory, so that the final scan below can ++ * print appropriate warnings. */ ++ for_each_inode(i, c, ic) { ++ if (ic->flags & INO_FLAGS_IS_DIR) ++ ic->pino_nlink = 0; ++ } ++ } + dbg_fsbuild("freeing temporary data structures\n"); + + /* Finally, we can scan again and free the dirent structs */ +@@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + while(ic->scan_dents) { + fd = ic->scan_dents; + ic->scan_dents = fd->next; ++ /* We do use the pino_nlink field to count nlink of ++ * directories during fs build, so set it to the ++ * parent ino# now. Now that there's hopefully only ++ * one. */ ++ if (fd->type == DT_DIR) { ++ if (!fd->ic) { ++ /* We'll have complained about it and marked the coresponding ++ raw node obsolete already. Just skip it. */ ++ continue; ++ } ++ ++ /* We *have* to have set this in jffs2_build_inode_pass1() */ ++ BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR)); ++ ++ /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks ++ * is set. Otherwise, we know this should never trigger anyway, so ++ * we don't do the check. And ic->pino_nlink still contains the nlink ++ * value (which is 1). */ ++ if (dir_hardlinks && fd->ic->pino_nlink) { ++ JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n", ++ fd->name, fd->ino, ic->ino, fd->ic->pino_nlink); ++ /* Should we unlink it from its previous parent? */ ++ } ++ ++ /* For directories, ic->pino_nlink holds that parent inode # */ ++ fd->ic->pino_nlink = ic->ino; ++ } + jffs2_free_full_dirent(fd); + } + ic->scan_dents = NULL; +@@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, + + /* Reduce nlink of the child. If it's now zero, stick it on the + dead_fds list to be cleaned up later. Else just free the fd */ +- +- if (fd->type == DT_DIR) +- child_ic->pino_nlink = 0; +- else +- child_ic->pino_nlink--; ++ child_ic->pino_nlink--; + + if (!child_ic->pino_nlink) { + dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", +diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c +index 256cd19a3b78..d2570ae2e998 100644 +--- a/fs/jffs2/file.c ++++ b/fs/jffs2/file.c +@@ -139,39 +139,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + struct page *pg; + struct inode *inode = mapping->host; + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); +- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); +- struct jffs2_raw_inode ri; +- uint32_t alloc_len = 0; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + uint32_t pageofs = index << PAGE_CACHE_SHIFT; + int ret = 0; + +- jffs2_dbg(1, "%s()\n", __func__); +- +- if (pageofs > inode->i_size) { +- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, +- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); +- if (ret) +- return ret; +- } +- +- mutex_lock(&f->sem); + pg = grab_cache_page_write_begin(mapping, index, flags); +- if (!pg) { +- if (alloc_len) +- jffs2_complete_reservation(c); +- mutex_unlock(&f->sem); ++ if (!pg) + return -ENOMEM; +- } + *pagep = pg; + +- if (alloc_len) { ++ jffs2_dbg(1, "%s()\n", __func__); ++ ++ if (pageofs > inode->i_size) { + /* Make new hole frag from old EOF to new page */ ++ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); ++ struct jffs2_raw_inode ri; + struct jffs2_full_dnode *fn; ++ uint32_t alloc_len; + + jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", + (unsigned int)inode->i_size, pageofs); + ++ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, ++ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); ++ if (ret) ++ goto out_page; ++ ++ mutex_lock(&f->sem); + memset(&ri, 0, sizeof(ri)); + + ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); +@@ -198,6 +192,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + if (IS_ERR(fn)) { + ret = PTR_ERR(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + ret = jffs2_add_full_dnode_to_inode(c, f, fn); +@@ -212,10 +207,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + jffs2_mark_node_obsolete(c, fn->raw); + jffs2_free_full_dnode(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + jffs2_complete_reservation(c); + inode->i_size = pageofs; ++ mutex_unlock(&f->sem); + } + + /* +@@ -224,18 +221,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + * case of a short-copy. + */ + if (!PageUptodate(pg)) { ++ mutex_lock(&f->sem); + ret = jffs2_do_readpage_nolock(inode, pg); ++ mutex_unlock(&f->sem); + if (ret) + goto out_page; + } +- mutex_unlock(&f->sem); + jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); + return ret; + + out_page: + unlock_page(pg); + page_cache_release(pg); +- mutex_unlock(&f->sem); + return ret; + } + +diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c +index 5a2dec2b064c..95d5880a63ee 100644 +--- a/fs/jffs2/gc.c ++++ b/fs/jffs2/gc.c +@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era + BUG_ON(start > orig_start); + } + +- /* First, use readpage() to read the appropriate page into the page cache */ +- /* Q: What happens if we actually try to GC the _same_ page for which commit_write() +- * triggered garbage collection in the first place? +- * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the +- * page OK. We'll actually write it out again in commit_write, which is a little +- * suboptimal, but at least we're correct. +- */ ++ /* The rules state that we must obtain the page lock *before* f->sem, so ++ * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's ++ * actually going to *change* so we're safe; we only allow reading. ++ * ++ * It is important to note that jffs2_write_begin() will ensure that its ++ * page is marked Uptodate before allocating space. That means that if we ++ * end up here trying to GC the *same* page that jffs2_write_begin() is ++ * trying to write out, read_cache_page() will not deadlock. */ ++ mutex_unlock(&f->sem); + pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); ++ mutex_lock(&f->sem); + + if (IS_ERR(pg_ptr)) { + pr_warn("read_cache_page() returned error: %ld\n", +diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h +index fa35ff79ab35..0637271f3770 100644 +--- a/fs/jffs2/nodelist.h ++++ b/fs/jffs2/nodelist.h +@@ -194,6 +194,7 @@ struct jffs2_inode_cache { + #define INO_STATE_CLEARING 6 /* In clear_inode() */ + + #define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */ ++#define INO_FLAGS_IS_DIR 0x02 /* is a directory */ + + #define RAWNODE_CLASS_INODE_CACHE 0 + #define RAWNODE_CLASS_XATTR_DATUM 1 +@@ -249,7 +250,10 @@ struct jffs2_readinode_info + + struct jffs2_full_dirent + { +- struct jffs2_raw_node_ref *raw; ++ union { ++ struct jffs2_raw_node_ref *raw; ++ struct jffs2_inode_cache *ic; /* Just during part of build */ ++ }; + struct jffs2_full_dirent *next; + uint32_t version; + uint32_t ino; /* == zero for unlink */ +diff --git a/fs/locks.c b/fs/locks.c +index 2c61c4e9368c..f3197830f07e 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -2007,7 +2007,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, + goto out; + } + +-again: + error = flock_to_posix_lock(filp, file_lock, &flock); + if (error) + goto out; +@@ -2038,19 +2037,22 @@ again: + * Attempt to detect a close/fcntl race and recover by + * releasing the lock that was just acquired. + */ +- /* +- * we need that spin_lock here - it prevents reordering between +- * update of inode->i_flock and check for it done in close(). +- * rcu_read_lock() wouldn't do. +- */ +- spin_lock(¤t->files->file_lock); +- f = fcheck(fd); +- spin_unlock(¤t->files->file_lock); +- if (!error && f != filp && flock.l_type != F_UNLCK) { +- flock.l_type = F_UNLCK; +- goto again; ++ if (!error && file_lock->fl_type != F_UNLCK) { ++ /* ++ * We need that spin_lock here - it prevents reordering between ++ * update of inode->i_flock and check for it done in ++ * close(). rcu_read_lock() wouldn't do. ++ */ ++ spin_lock(¤t->files->file_lock); ++ f = fcheck(fd); ++ spin_unlock(¤t->files->file_lock); ++ if (f != filp) { ++ file_lock->fl_type = F_UNLCK; ++ error = do_lock_file_wait(filp, cmd, file_lock); ++ WARN_ON_ONCE(error); ++ error = -EBADF; ++ } + } +- + out: + locks_free_lock(file_lock); + return error; +@@ -2125,7 +2127,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, + goto out; + } + +-again: + error = flock64_to_posix_lock(filp, file_lock, &flock); + if (error) + goto out; +@@ -2156,14 +2157,22 @@ again: + * Attempt to detect a close/fcntl race and recover by + * releasing the lock that was just acquired. + */ +- spin_lock(¤t->files->file_lock); +- f = fcheck(fd); +- spin_unlock(¤t->files->file_lock); +- if (!error && f != filp && flock.l_type != F_UNLCK) { +- flock.l_type = F_UNLCK; +- goto again; ++ if (!error && file_lock->fl_type != F_UNLCK) { ++ /* ++ * We need that spin_lock here - it prevents reordering between ++ * update of inode->i_flock and check for it done in ++ * close(). rcu_read_lock() wouldn't do. ++ */ ++ spin_lock(¤t->files->file_lock); ++ f = fcheck(fd); ++ spin_unlock(¤t->files->file_lock); ++ if (f != filp) { ++ file_lock->fl_type = F_UNLCK; ++ error = do_lock_file_wait(filp, cmd, file_lock); ++ WARN_ON_ONCE(error); ++ error = -EBADF; ++ } + } +- + out: + locks_free_lock(file_lock); + return error; +diff --git a/include/linux/ata.h b/include/linux/ata.h +index f2f4d8da97c0..f7ff6554a354 100644 +--- a/include/linux/ata.h ++++ b/include/linux/ata.h +@@ -484,8 +484,8 @@ enum ata_tf_protocols { + }; + + enum ata_ioctls { +- ATA_IOC_GET_IO32 = 0x309, +- ATA_IOC_SET_IO32 = 0x324, ++ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */ ++ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */ + }; + + /* core structures */ +diff --git a/include/linux/libata.h b/include/linux/libata.h +index 189c9ff97b29..a445209be917 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -712,7 +712,7 @@ struct ata_device { + union { + u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ + u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ +- }; ++ } ____cacheline_aligned; + + /* DEVSLP Timing Variables from Identify Device Data Log */ + u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; +diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h +index d1fb912740f3..0d7a62fea895 100644 +--- a/include/target/iscsi/iscsi_transport.h ++++ b/include/target/iscsi/iscsi_transport.h +@@ -21,6 +21,7 @@ struct iscsit_transport { + int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool); + int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *); + int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *); ++ void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *); + }; + + static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd) +diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h +index f5915b39386a..522ae25a61a7 100644 +--- a/include/target/target_core_backend.h ++++ b/include/target/target_core_backend.h +@@ -94,4 +94,8 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, + + void array_free(void *array, int n); + ++sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); ++bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, ++ struct request_queue *q, int block_size); ++ + #endif /* TARGET_CORE_BACKEND_H */ +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 42606764d830..3beb2fed86aa 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -162,7 +162,7 @@ enum se_cmd_flags_table { + SCF_SENT_CHECK_CONDITION = 0x00000800, + SCF_OVERFLOW_BIT = 0x00001000, + SCF_UNDERFLOW_BIT = 0x00002000, +- SCF_SENT_DELAYED_TAS = 0x00004000, ++ SCF_SEND_DELAYED_TAS = 0x00004000, + SCF_ALUA_NON_OPTIMIZED = 0x00008000, + SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, + SCF_ACK_KREF = 0x00040000, +@@ -528,6 +528,8 @@ struct se_cmd { + #define CMD_T_DEV_ACTIVE (1 << 7) + #define CMD_T_REQUEST_STOP (1 << 8) + #define CMD_T_BUSY (1 << 9) ++#define CMD_T_TAS (1 << 10) ++#define CMD_T_FABRIC_STOP (1 << 11) + spinlock_t t_state_lock; + struct completion t_transport_stop_comp; + +diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h +index 0218d689b3d7..1d1043644b9b 100644 +--- a/include/target/target_core_fabric.h ++++ b/include/target/target_core_fabric.h +@@ -62,6 +62,7 @@ struct target_core_fabric_ops { + int (*queue_data_in)(struct se_cmd *); + int (*queue_status)(struct se_cmd *); + void (*queue_tm_rsp)(struct se_cmd *); ++ void (*aborted_task)(struct se_cmd *); + /* + * fabric module calls for target_core_fabric_configfs.c + */ +diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c +index 2bb95a7a8809..c14565bde887 100644 +--- a/sound/core/control_compat.c ++++ b/sound/core/control_compat.c +@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 { + unsigned char reserved[128]; + }; + ++#ifdef CONFIG_X86_X32 ++/* x32 has a different alignment for 64bit values from ia32 */ ++struct snd_ctl_elem_value_x32 { ++ struct snd_ctl_elem_id id; ++ unsigned int indirect; /* bit-field causes misalignment */ ++ union { ++ s32 integer[128]; ++ unsigned char data[512]; ++ s64 integer64[64]; ++ } value; ++ unsigned char reserved[128]; ++}; ++#endif /* CONFIG_X86_X32 */ + + /* get the value type and count of the control */ + static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id, +@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count) + + static int copy_ctl_value_from_user(struct snd_card *card, + struct snd_ctl_elem_value *data, +- struct snd_ctl_elem_value32 __user *data32, ++ void __user *userdata, ++ void __user *valuep, + int *typep, int *countp) + { ++ struct snd_ctl_elem_value32 __user *data32 = userdata; + int i, type, size; + int uninitialized_var(count); + unsigned int indirect; +@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; +- if (get_user(val, &data32->value.integer[i])) ++ if (get_user(val, &intp[i])) + return -EFAULT; + data->value.integer.value[i] = val; + } +@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card, + printk(KERN_ERR "snd_ioctl32_ctl_elem_value: unknown type %d\n", type); + return -EINVAL; + } +- if (copy_from_user(data->value.bytes.data, +- data32->value.data, size)) ++ if (copy_from_user(data->value.bytes.data, valuep, size)) + return -EFAULT; + } + +@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card, + } + + /* restore the value to 32bit */ +-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, ++static int copy_ctl_value_to_user(void __user *userdata, ++ void __user *valuep, + struct snd_ctl_elem_value *data, + int type, int count) + { +@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; + val = data->value.integer.value[i]; +- if (put_user(val, &data32->value.integer[i])) ++ if (put_user(val, &intp[i])) + return -EFAULT; + } + } else { + size = get_elem_size(type, count); +- if (copy_to_user(data32->value.data, +- data->value.bytes.data, size)) ++ if (copy_to_user(valuep, data->value.bytes.data, size)) + return -EFAULT; + } + return 0; + } + +-static int snd_ctl_elem_read_user_compat(struct snd_card *card, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_read_user(struct snd_card *card, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + int err, type, count; +@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + err = snd_ctl_elem_read(card, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + +-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_write_user(struct snd_ctl_file *file, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + struct snd_card *card = file->card; +@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + err = snd_ctl_elem_write(card, file, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + ++static int snd_ctl_elem_read_user_compat(struct snd_card *card, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++ ++#ifdef CONFIG_X86_X32 ++static int snd_ctl_elem_read_user_x32(struct snd_card *card, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++#endif /* CONFIG_X86_X32 */ ++ + /* add or replace a user control */ + static int snd_ctl_elem_add_compat(struct snd_ctl_file *file, + struct snd_ctl_elem_info32 __user *data32, +@@ -393,6 +441,10 @@ enum { + SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32), + SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32), + SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32), ++ SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_ctl_elem_add_compat(ctl, argp, 0); + case SNDRV_CTL_IOCTL_ELEM_REPLACE32: + return snd_ctl_elem_add_compat(ctl, argp, 1); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_CTL_IOCTL_ELEM_READ_X32: ++ return snd_ctl_elem_read_user_x32(ctl->card, argp); ++ case SNDRV_CTL_IOCTL_ELEM_WRITE_X32: ++ return snd_ctl_elem_write_user_x32(ctl, argp); ++#endif /* CONFIG_X86_X32 */ + } + + down_read(&snd_ioctl_rwsem); +diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c +index 5268c1f58c25..09a89094dcf7 100644 +--- a/sound/core/rawmidi_compat.c ++++ b/sound/core/rawmidi_compat.c +@@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has 64bit timespec and 64bit alignment */ ++struct snd_rawmidi_status_x32 { ++ s32 stream; ++ u32 rsvd; /* alignment */ ++ struct timespec tstamp; ++ u32 avail; ++ u32 xruns; ++ unsigned char reserved[16]; ++} __attribute__((packed)); ++ ++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst)) ++ ++static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile, ++ struct snd_rawmidi_status_x32 __user *src) ++{ ++ int err; ++ struct snd_rawmidi_status status; ++ ++ if (rfile->output == NULL) ++ return -EINVAL; ++ if (get_user(status.stream, &src->stream)) ++ return -EFAULT; ++ ++ switch (status.stream) { ++ case SNDRV_RAWMIDI_STREAM_OUTPUT: ++ err = snd_rawmidi_output_status(rfile->output, &status); ++ break; ++ case SNDRV_RAWMIDI_STREAM_INPUT: ++ err = snd_rawmidi_input_status(rfile->input, &status); ++ break; ++ default: ++ return -EINVAL; ++ } ++ if (err < 0) ++ return err; ++ ++ if (put_timespec(&status.tstamp, &src->tstamp) || ++ put_user(status.avail, &src->avail) || ++ put_user(status.xruns, &src->xruns)) ++ return -EFAULT; ++ ++ return 0; ++} ++#endif /* CONFIG_X86_X32 */ ++ + enum { + SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32), + SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign + return snd_rawmidi_ioctl_params_compat(rfile, argp); + case SNDRV_RAWMIDI_IOCTL_STATUS32: + return snd_rawmidi_ioctl_status_compat(rfile, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_RAWMIDI_IOCTL_STATUS_X32: ++ return snd_rawmidi_ioctl_status_x32(rfile, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c +index 8d4d5e853efe..ab774954c985 100644 +--- a/sound/core/seq/oss/seq_oss.c ++++ b/sound/core/seq/oss/seq_oss.c +@@ -150,8 +150,6 @@ odev_release(struct inode *inode, struct file *file) + if ((dp = file->private_data) == NULL) + return 0; + +- snd_seq_oss_drain_write(dp); +- + mutex_lock(®ister_mutex); + snd_seq_oss_release(dp); + mutex_unlock(®ister_mutex); +diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h +index c0154a959d55..2464112b08ad 100644 +--- a/sound/core/seq/oss/seq_oss_device.h ++++ b/sound/core/seq/oss/seq_oss_device.h +@@ -131,7 +131,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co + unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait); + + void snd_seq_oss_reset(struct seq_oss_devinfo *dp); +-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp); + + /* */ + void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time); +diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c +index b3f39b5ed742..f9e09e458227 100644 +--- a/sound/core/seq/oss/seq_oss_init.c ++++ b/sound/core/seq/oss/seq_oss_init.c +@@ -457,23 +457,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp) + + + /* +- * Wait until the queue is empty (if we don't have nonblock) +- */ +-void +-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp) +-{ +- if (! dp->timer->running) +- return; +- if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) && +- dp->writeq) { +- debug_printk(("syncing..\n")); +- while (snd_seq_oss_writeq_sync(dp->writeq)) +- ; +- } +-} +- +- +-/* + * reset sequencer devices + */ + void +diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c +index e05802ae6e1b..2e908225d754 100644 +--- a/sound/core/timer_compat.c ++++ b/sound/core/timer_compat.c +@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file, + struct snd_timer_status32 __user *_status) + { + struct snd_timer_user *tu; +- struct snd_timer_status status; ++ struct snd_timer_status32 status; + + tu = file->private_data; + if (snd_BUG_ON(!tu->timeri)) + return -ENXIO; + memset(&status, 0, sizeof(status)); +- status.tstamp = tu->tstamp; ++ status.tstamp.tv_sec = tu->tstamp.tv_sec; ++ status.tstamp.tv_nsec = tu->tstamp.tv_nsec; + status.resolution = snd_timer_resolution(tu->timeri); + status.lost = tu->timeri->lost; + status.overrun = tu->overrun; +@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has the same struct as x86-64 */ ++#define snd_timer_user_status_x32(file, s) \ ++ snd_timer_user_status(file, s) ++#endif /* CONFIG_X86_X32 */ ++ + /* + */ + + enum { + SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32), + SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_timer_user_info_compat(file, argp); + case SNDRV_TIMER_IOCTL_STATUS32: + return snd_timer_user_status_compat(file, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_TIMER_IOCTL_STATUS_X32: ++ return snd_timer_user_status_x32(file, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c +index bd90c80bb494..4bc0d66377d6 100644 +--- a/sound/pci/rme9652/hdsp.c ++++ b/sound/pci/rme9652/hdsp.c +@@ -2917,7 +2917,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + { + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp); ++ ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp); + return 0; + } + +@@ -2929,7 +2929,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + + if (!snd_hdsp_use_is_exclusive(hdsp)) + return -EBUSY; +- val = ucontrol->value.enumerated.item[0]; ++ val = ucontrol->value.integer.value[0]; + spin_lock_irq(&hdsp->lock); + if (val != hdsp_dds_offset(hdsp)) + change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0; +diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c +index 21167503a3f9..dec173081798 100644 +--- a/sound/pci/rme9652/hdspm.c ++++ b/sound/pci/rme9652/hdspm.c +@@ -1602,6 +1602,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate) + { + u64 n; + ++ if (snd_BUG_ON(rate <= 0)) ++ return; ++ + if (rate >= 112000) + rate /= 4; + else if (rate >= 56000) +@@ -2224,6 +2227,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm) + } else { + /* slave mode, return external sample rate */ + rate = hdspm_external_sample_rate(hdspm); ++ if (!rate) ++ rate = hdspm->system_sample_rate; + } + } + +@@ -2269,8 +2274,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol, + ucontrol) + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); ++ int rate = ucontrol->value.integer.value[0]; + +- hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]); ++ if (rate < 27000 || rate > 207000) ++ return -EINVAL; ++ hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]); + return 0; + } + +@@ -4469,7 +4477,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdspm->tco->term; ++ ucontrol->value.integer.value[0] = hdspm->tco->term; + + return 0; + } +@@ -4480,8 +4488,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) { +- hdspm->tco->term = ucontrol->value.enumerated.item[0]; ++ if (hdspm->tco->term != ucontrol->value.integer.value[0]) { ++ hdspm->tco->term = ucontrol->value.integer.value[0]; + + hdspm_tco_write(hdspm); + diff --git a/patch/kernel/neo-default/patch-3.14.64-65.patch b/patch/kernel/neo-default/patch-3.14.64-65.patch new file mode 100644 index 000000000..73548c75e --- /dev/null +++ b/patch/kernel/neo-default/patch-3.14.64-65.patch @@ -0,0 +1,1518 @@ +diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt +index c477af086e65..686a64bba775 100644 +--- a/Documentation/filesystems/efivarfs.txt ++++ b/Documentation/filesystems/efivarfs.txt +@@ -14,3 +14,10 @@ filesystem. + efivarfs is typically mounted like this, + + mount -t efivarfs none /sys/firmware/efi/efivars ++ ++Due to the presence of numerous firmware bugs where removing non-standard ++UEFI variables causes the system firmware to fail to POST, efivarfs ++files that are not well-known standardized variables are created ++as immutable files. This doesn't prevent removal - "chattr -i" will work - ++but it does prevent this kind of failure from being accomplished ++accidentally. +diff --git a/Makefile b/Makefile +index de41fa82652f..a19c22a77728 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 64 ++SUBLEVEL = 65 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c +index 12664c130d73..78a859ec4946 100644 +--- a/arch/powerpc/kernel/module_64.c ++++ b/arch/powerpc/kernel/module_64.c +@@ -202,7 +202,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) + if (syms[i].st_shndx == SHN_UNDEF) { + char *name = strtab + syms[i].st_name; + if (name[0] == '.') +- memmove(name, name+1, strlen(name)); ++ syms[i].st_name++; + } + } + } +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +index b3bad672e5d9..87fa70f8472a 100644 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +@@ -1148,6 +1148,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) + std r6, VCPU_ACOP(r9) + stw r7, VCPU_GUEST_PID(r9) + std r8, VCPU_WORT(r9) ++ /* ++ * Restore various registers to 0, where non-zero values ++ * set by the guest could disrupt the host. ++ */ ++ li r0, 0 ++ mtspr SPRN_IAMR, r0 ++ mtspr SPRN_CIABR, r0 ++ mtspr SPRN_DAWRX, r0 ++ mtspr SPRN_TCSCR, r0 ++ mtspr SPRN_WORT, r0 ++ /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ ++ li r0, 1 ++ sldi r0, r0, 31 ++ mtspr SPRN_MMCRS, r0 + 8: + + /* Save and reset AMR and UAMOR before turning on the MMU */ +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 80c22a3ca688..b6fc5fc64cef 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1555,6 +1555,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, + return; + } + break; ++ case MSR_IA32_PEBS_ENABLE: ++ /* PEBS needs a quiescent period after being disabled (to write ++ * a record). Disabling PEBS through VMX MSR swapping doesn't ++ * provide that period, so a CPU could write host's record into ++ * guest's memory. ++ */ ++ wrmsrl(MSR_IA32_PEBS_ENABLE, 0); + } + + for (i = 0; i < m->nr; ++i) +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 1777f89875fb..8db66424be97 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1991,6 +1991,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu) + + static void record_steal_time(struct kvm_vcpu *vcpu) + { ++ accumulate_steal_time(vcpu); ++ + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) + return; + +@@ -2123,12 +2125,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + if (!(data & KVM_MSR_ENABLED)) + break; + +- vcpu->arch.st.last_steal = current->sched_info.run_delay; +- +- preempt_disable(); +- accumulate_steal_time(vcpu); +- preempt_enable(); +- + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + + break; +@@ -2818,7 +2814,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + vcpu->cpu = cpu; + } + +- accumulate_steal_time(vcpu); + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + } + +diff --git a/crypto/af_alg.c b/crypto/af_alg.c +index 1971f3ccb09a..eeb2fb239934 100644 +--- a/crypto/af_alg.c ++++ b/crypto/af_alg.c +@@ -125,23 +125,6 @@ int af_alg_release(struct socket *sock) + } + EXPORT_SYMBOL_GPL(af_alg_release); + +-void af_alg_release_parent(struct sock *sk) +-{ +- struct alg_sock *ask = alg_sk(sk); +- bool last; +- +- sk = ask->parent; +- ask = alg_sk(sk); +- +- lock_sock(sk); +- last = !--ask->refcnt; +- release_sock(sk); +- +- if (last) +- sock_put(sk); +-} +-EXPORT_SYMBOL_GPL(af_alg_release_parent); +- + static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + { + struct sock *sk = sock->sk; +@@ -149,7 +132,6 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + struct sockaddr_alg *sa = (void *)uaddr; + const struct af_alg_type *type; + void *private; +- int err; + + if (sock->state == SS_CONNECTED) + return -EINVAL; +@@ -175,22 +157,16 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + return PTR_ERR(private); + } + +- err = -EBUSY; + lock_sock(sk); +- if (ask->refcnt) +- goto unlock; + + swap(ask->type, type); + swap(ask->private, private); + +- err = 0; +- +-unlock: + release_sock(sk); + + alg_do_release(type, private); + +- return err; ++ return 0; + } + + static int alg_setkey(struct sock *sk, char __user *ukey, +@@ -223,15 +199,11 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type; +- int err = -EBUSY; ++ int err = -ENOPROTOOPT; + + lock_sock(sk); +- if (ask->refcnt) +- goto unlock; +- + type = ask->type; + +- err = -ENOPROTOOPT; + if (level != SOL_ALG || !type) + goto unlock; + +@@ -280,8 +252,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) + + sk2->sk_family = PF_ALG; + +- if (!ask->refcnt++) +- sock_hold(sk); ++ sock_hold(sk); + alg_sk(sk2)->parent = sk; + alg_sk(sk2)->type = type; + +diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c +index 3dc248239197..f04ce007bf7f 100644 +--- a/drivers/firmware/efi/efivars.c ++++ b/drivers/firmware/efi/efivars.c +@@ -219,7 +219,8 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) + } + + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || +- efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) { ++ efivar_validate(new_var->VendorGuid, new_var->VariableName, ++ new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } +@@ -334,7 +335,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, + return -EACCES; + + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || +- efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) { ++ efivar_validate(new_var->VendorGuid, new_var->VariableName, ++ new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } +@@ -409,35 +411,27 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) + { + int i, short_name_size; + char *short_name; +- unsigned long variable_name_size; +- efi_char16_t *variable_name; +- +- variable_name = new_var->var.VariableName; +- variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t); ++ unsigned long utf8_name_size; ++ efi_char16_t *variable_name = new_var->var.VariableName; + + /* +- * Length of the variable bytes in ASCII, plus the '-' separator, ++ * Length of the variable bytes in UTF8, plus the '-' separator, + * plus the GUID, plus trailing NUL + */ +- short_name_size = variable_name_size / sizeof(efi_char16_t) +- + 1 + EFI_VARIABLE_GUID_LEN + 1; +- +- short_name = kzalloc(short_name_size, GFP_KERNEL); ++ utf8_name_size = ucs2_utf8size(variable_name); ++ short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1; + ++ short_name = kmalloc(short_name_size, GFP_KERNEL); + if (!short_name) + return 1; + +- /* Convert Unicode to normal chars (assume top bits are 0), +- ala UTF-8 */ +- for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) { +- short_name[i] = variable_name[i] & 0xFF; +- } ++ ucs2_as_utf8(short_name, variable_name, short_name_size); ++ + /* This is ugly, but necessary to separate one vendor's + private variables from another's. */ +- +- *(short_name + strlen(short_name)) = '-'; ++ short_name[utf8_name_size] = '-'; + efi_guid_unparse(&new_var->var.VendorGuid, +- short_name + strlen(short_name)); ++ short_name + utf8_name_size + 1); + + new_var->kobj.kset = efivars_kset; + +diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c +index e6125522860a..4e2f46938bf0 100644 +--- a/drivers/firmware/efi/vars.c ++++ b/drivers/firmware/efi/vars.c +@@ -42,7 +42,7 @@ DECLARE_WORK(efivar_work, NULL); + EXPORT_SYMBOL_GPL(efivar_work); + + static bool +-validate_device_path(struct efi_variable *var, int match, u8 *buffer, ++validate_device_path(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + struct efi_generic_dev_path *node; +@@ -75,7 +75,7 @@ validate_device_path(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_boot_order(struct efi_variable *var, int match, u8 *buffer, ++validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + /* An array of 16-bit integers */ +@@ -86,18 +86,18 @@ validate_boot_order(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_load_option(struct efi_variable *var, int match, u8 *buffer, ++validate_load_option(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + u16 filepathlength; + int i, desclength = 0, namelen; + +- namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName)); ++ namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN); + + /* Either "Boot" or "Driver" followed by four digits of hex */ + for (i = match; i < match+4; i++) { +- if (var->VariableName[i] > 127 || +- hex_to_bin(var->VariableName[i] & 0xff) < 0) ++ if (var_name[i] > 127 || ++ hex_to_bin(var_name[i] & 0xff) < 0) + return true; + } + +@@ -132,12 +132,12 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, + /* + * And, finally, check the filepath + */ +- return validate_device_path(var, match, buffer + desclength + 6, ++ return validate_device_path(var_name, match, buffer + desclength + 6, + filepathlength); + } + + static bool +-validate_uint16(struct efi_variable *var, int match, u8 *buffer, ++validate_uint16(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + /* A single 16-bit integer */ +@@ -148,7 +148,7 @@ validate_uint16(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, ++validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + int i; +@@ -165,67 +165,133 @@ validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, + } + + struct variable_validate { ++ efi_guid_t vendor; + char *name; +- bool (*validate)(struct efi_variable *var, int match, u8 *data, ++ bool (*validate)(efi_char16_t *var_name, int match, u8 *data, + unsigned long len); + }; + ++/* ++ * This is the list of variables we need to validate, as well as the ++ * whitelist for what we think is safe not to default to immutable. ++ * ++ * If it has a validate() method that's not NULL, it'll go into the ++ * validation routine. If not, it is assumed valid, but still used for ++ * whitelisting. ++ * ++ * Note that it's sorted by {vendor,name}, but globbed names must come after ++ * any other name with the same prefix. ++ */ + static const struct variable_validate variable_validate[] = { +- { "BootNext", validate_uint16 }, +- { "BootOrder", validate_boot_order }, +- { "DriverOrder", validate_boot_order }, +- { "Boot*", validate_load_option }, +- { "Driver*", validate_load_option }, +- { "ConIn", validate_device_path }, +- { "ConInDev", validate_device_path }, +- { "ConOut", validate_device_path }, +- { "ConOutDev", validate_device_path }, +- { "ErrOut", validate_device_path }, +- { "ErrOutDev", validate_device_path }, +- { "Timeout", validate_uint16 }, +- { "Lang", validate_ascii_string }, +- { "PlatformLang", validate_ascii_string }, +- { "", NULL }, ++ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 }, ++ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option }, ++ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string }, ++ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL }, ++ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 }, ++ { LINUX_EFI_CRASH_GUID, "*", NULL }, ++ { NULL_GUID, "", NULL }, + }; + ++static bool ++variable_matches(const char *var_name, size_t len, const char *match_name, ++ int *match) ++{ ++ for (*match = 0; ; (*match)++) { ++ char c = match_name[*match]; ++ char u = var_name[*match]; ++ ++ /* Wildcard in the matching name means we've matched */ ++ if (c == '*') ++ return true; ++ ++ /* Case sensitive match */ ++ if (!c && *match == len) ++ return true; ++ ++ if (c != u) ++ return false; ++ ++ if (!c) ++ return true; ++ } ++ return true; ++} ++ + bool +-efivar_validate(struct efi_variable *var, u8 *data, unsigned long len) ++efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, ++ unsigned long data_size) + { + int i; +- u16 *unicode_name = var->VariableName; ++ unsigned long utf8_size; ++ u8 *utf8_name; + +- for (i = 0; variable_validate[i].validate != NULL; i++) { +- const char *name = variable_validate[i].name; +- int match; ++ utf8_size = ucs2_utf8size(var_name); ++ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL); ++ if (!utf8_name) ++ return false; + +- for (match = 0; ; match++) { +- char c = name[match]; +- u16 u = unicode_name[match]; ++ ucs2_as_utf8(utf8_name, var_name, utf8_size); ++ utf8_name[utf8_size] = '\0'; + +- /* All special variables are plain ascii */ +- if (u > 127) +- return true; ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { ++ const char *name = variable_validate[i].name; ++ int match = 0; + +- /* Wildcard in the matching name means we've matched */ +- if (c == '*') +- return variable_validate[i].validate(var, +- match, data, len); ++ if (efi_guidcmp(vendor, variable_validate[i].vendor)) ++ continue; + +- /* Case sensitive match */ +- if (c != u) ++ if (variable_matches(utf8_name, utf8_size+1, name, &match)) { ++ if (variable_validate[i].validate == NULL) + break; +- +- /* Reached the end of the string while matching */ +- if (!c) +- return variable_validate[i].validate(var, +- match, data, len); ++ kfree(utf8_name); ++ return variable_validate[i].validate(var_name, match, ++ data, data_size); + } + } +- ++ kfree(utf8_name); + return true; + } + EXPORT_SYMBOL_GPL(efivar_validate); + ++bool ++efivar_variable_is_removable(efi_guid_t vendor, const char *var_name, ++ size_t len) ++{ ++ int i; ++ bool found = false; ++ int match = 0; ++ ++ /* ++ * Check if our variable is in the validated variables list ++ */ ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { ++ if (efi_guidcmp(variable_validate[i].vendor, vendor)) ++ continue; ++ ++ if (variable_matches(var_name, len, ++ variable_validate[i].name, &match)) { ++ found = true; ++ break; ++ } ++ } ++ ++ /* ++ * If it's in our list, it is removable. ++ */ ++ return found; ++} ++EXPORT_SYMBOL_GPL(efivar_variable_is_removable); ++ + static efi_status_t + check_var_size(u32 attributes, unsigned long size) + { +@@ -805,7 +871,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, + + *set = false; + +- if (efivar_validate(&entry->var, data, *size) == false) ++ if (efivar_validate(*vendor, name, data, *size) == false) + return -EINVAL; + + /* +diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c +index 8dd524f32284..08f105a06fbf 100644 +--- a/fs/efivarfs/file.c ++++ b/fs/efivarfs/file.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include "internal.h" + +@@ -108,9 +109,79 @@ out_free: + return size; + } + ++static int ++efivarfs_ioc_getxflags(struct file *file, void __user *arg) ++{ ++ struct inode *inode = file->f_mapping->host; ++ unsigned int i_flags; ++ unsigned int flags = 0; ++ ++ i_flags = inode->i_flags; ++ if (i_flags & S_IMMUTABLE) ++ flags |= FS_IMMUTABLE_FL; ++ ++ if (copy_to_user(arg, &flags, sizeof(flags))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ++efivarfs_ioc_setxflags(struct file *file, void __user *arg) ++{ ++ struct inode *inode = file->f_mapping->host; ++ unsigned int flags; ++ unsigned int i_flags = 0; ++ int error; ++ ++ if (!inode_owner_or_capable(inode)) ++ return -EACCES; ++ ++ if (copy_from_user(&flags, arg, sizeof(flags))) ++ return -EFAULT; ++ ++ if (flags & ~FS_IMMUTABLE_FL) ++ return -EOPNOTSUPP; ++ ++ if (!capable(CAP_LINUX_IMMUTABLE)) ++ return -EPERM; ++ ++ if (flags & FS_IMMUTABLE_FL) ++ i_flags |= S_IMMUTABLE; ++ ++ ++ error = mnt_want_write_file(file); ++ if (error) ++ return error; ++ ++ mutex_lock(&inode->i_mutex); ++ inode->i_flags &= ~S_IMMUTABLE; ++ inode->i_flags |= i_flags; ++ mutex_unlock(&inode->i_mutex); ++ ++ mnt_drop_write_file(file); ++ ++ return 0; ++} ++ ++long ++efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p) ++{ ++ void __user *arg = (void __user *)p; ++ ++ switch (cmd) { ++ case FS_IOC_GETFLAGS: ++ return efivarfs_ioc_getxflags(file, arg); ++ case FS_IOC_SETFLAGS: ++ return efivarfs_ioc_setxflags(file, arg); ++ } ++ ++ return -ENOTTY; ++} ++ + const struct file_operations efivarfs_file_operations = { + .open = simple_open, + .read = efivarfs_file_read, + .write = efivarfs_file_write, + .llseek = no_llseek, ++ .unlocked_ioctl = efivarfs_file_ioctl, + }; +diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c +index 07ab49745e31..7e7318f10575 100644 +--- a/fs/efivarfs/inode.c ++++ b/fs/efivarfs/inode.c +@@ -15,7 +15,8 @@ + #include "internal.h" + + struct inode *efivarfs_get_inode(struct super_block *sb, +- const struct inode *dir, int mode, dev_t dev) ++ const struct inode *dir, int mode, ++ dev_t dev, bool is_removable) + { + struct inode *inode = new_inode(sb); + +@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb, + inode->i_ino = get_next_ino(); + inode->i_mode = mode; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; ++ inode->i_flags = is_removable ? 0 : S_IMMUTABLE; + switch (mode & S_IFMT) { + case S_IFREG: + inode->i_fop = &efivarfs_file_operations; +@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid) + static int efivarfs_create(struct inode *dir, struct dentry *dentry, + umode_t mode, bool excl) + { +- struct inode *inode; ++ struct inode *inode = NULL; + struct efivar_entry *var; + int namelen, i = 0, err = 0; ++ bool is_removable = false; + + if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len)) + return -EINVAL; + +- inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0); +- if (!inode) +- return -ENOMEM; +- + var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL); +- if (!var) { +- err = -ENOMEM; +- goto out; +- } ++ if (!var) ++ return -ENOMEM; + + /* length of the variable name itself: remove GUID and separator */ + namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1; +@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, + efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1, + &var->var.VendorGuid); + ++ if (efivar_variable_is_removable(var->var.VendorGuid, ++ dentry->d_name.name, namelen)) ++ is_removable = true; ++ ++ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable); ++ if (!inode) { ++ err = -ENOMEM; ++ goto out; ++ } ++ + for (i = 0; i < namelen; i++) + var->var.VariableName[i] = dentry->d_name.name[i]; + +@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, + out: + if (err) { + kfree(var); +- iput(inode); ++ if (inode) ++ iput(inode); + } + return err; + } +diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h +index b5ff16addb7c..b4505188e799 100644 +--- a/fs/efivarfs/internal.h ++++ b/fs/efivarfs/internal.h +@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations; + extern const struct inode_operations efivarfs_dir_inode_operations; + extern bool efivarfs_valid_name(const char *str, int len); + extern struct inode *efivarfs_get_inode(struct super_block *sb, +- const struct inode *dir, int mode, dev_t dev); ++ const struct inode *dir, int mode, dev_t dev, ++ bool is_removable); + + extern struct list_head efivarfs_list; + +diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c +index becc725a1953..a3a30f84f917 100644 +--- a/fs/efivarfs/super.c ++++ b/fs/efivarfs/super.c +@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + struct dentry *dentry, *root = sb->s_root; + unsigned long size = 0; + char *name; +- int len, i; ++ int len; + int err = -ENOMEM; ++ bool is_removable = false; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) +@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + memcpy(entry->var.VariableName, name16, name_size); + memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); + +- len = ucs2_strlen(entry->var.VariableName); ++ len = ucs2_utf8size(entry->var.VariableName); + + /* name, plus '-', plus GUID, plus NUL*/ + name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); + if (!name) + goto fail; + +- for (i = 0; i < len; i++) +- name[i] = entry->var.VariableName[i] & 0xFF; ++ ucs2_as_utf8(name, entry->var.VariableName, len); ++ ++ if (efivar_variable_is_removable(entry->var.VendorGuid, name, len)) ++ is_removable = true; + + name[len] = '-'; + +@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + + name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; + +- inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0); ++ inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0, ++ is_removable); + if (!inode) + goto fail_name; + +@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) + sb->s_d_op = &efivarfs_d_ops; + sb->s_time_gran = 1; + +- inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0); ++ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true); + if (!inode) + return -ENOMEM; + inode->i_op = &efivarfs_dir_inode_operations; +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h +index 2f38daaab3d7..d61c11170213 100644 +--- a/include/crypto/if_alg.h ++++ b/include/crypto/if_alg.h +@@ -30,8 +30,6 @@ struct alg_sock { + + struct sock *parent; + +- unsigned int refcnt; +- + const struct af_alg_type *type; + void *private; + }; +@@ -66,7 +64,6 @@ int af_alg_register_type(const struct af_alg_type *type); + int af_alg_unregister_type(const struct af_alg_type *type); + + int af_alg_release(struct socket *sock); +-void af_alg_release_parent(struct sock *sk); + int af_alg_accept(struct sock *sk, struct socket *newsock); + + int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len, +@@ -83,6 +80,11 @@ static inline struct alg_sock *alg_sk(struct sock *sk) + return (struct alg_sock *)sk; + } + ++static inline void af_alg_release_parent(struct sock *sk) ++{ ++ sock_put(alg_sk(sk)->parent); ++} ++ + static inline void af_alg_init_completion(struct af_alg_completion *completion) + { + init_completion(&completion->completion); +diff --git a/include/linux/efi.h b/include/linux/efi.h +index 0a819e7a60c9..b39a4651360a 100644 +--- a/include/linux/efi.h ++++ b/include/linux/efi.h +@@ -792,8 +792,10 @@ struct efivars { + * and we use a page for reading/writing. + */ + ++#define EFI_VAR_NAME_LEN 1024 ++ + struct efi_variable { +- efi_char16_t VariableName[1024/sizeof(efi_char16_t)]; ++ efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)]; + efi_guid_t VendorGuid; + unsigned long DataSize; + __u8 Data[1024]; +@@ -864,7 +866,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), + struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, + struct list_head *head, bool remove); + +-bool efivar_validate(struct efi_variable *var, u8 *data, unsigned long len); ++bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, ++ unsigned long data_size); ++bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, ++ size_t len); + + extern struct work_struct efivar_work; + void efivar_run_worker(void); +diff --git a/include/linux/module.h b/include/linux/module.h +index eaf60ff9ba94..adea1d663a06 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -227,6 +227,12 @@ struct module_ref { + unsigned long decs; + } __attribute((aligned(2 * sizeof(unsigned long)))); + ++struct mod_kallsyms { ++ Elf_Sym *symtab; ++ unsigned int num_symtab; ++ char *strtab; ++}; ++ + struct module { + enum module_state state; + +@@ -314,14 +320,9 @@ struct module { + #endif + + #ifdef CONFIG_KALLSYMS +- /* +- * We keep the symbol and string tables for kallsyms. +- * The core_* fields below are temporary, loader-only (they +- * could really be discarded after module init). +- */ +- Elf_Sym *symtab, *core_symtab; +- unsigned int num_symtab, core_num_syms; +- char *strtab, *core_strtab; ++ /* Protected by RCU and/or module_mutex: use rcu_dereference() */ ++ struct mod_kallsyms *kallsyms; ++ struct mod_kallsyms core_kallsyms; + + /* Section attributes */ + struct module_sect_attrs *sect_attrs; +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h +index 97c8689c7e51..326f8b238c31 100644 +--- a/include/linux/tracepoint.h ++++ b/include/linux/tracepoint.h +@@ -129,9 +129,6 @@ static inline void tracepoint_synchronize_unregister(void) + void *it_func; \ + void *__data; \ + \ +- if (!cpu_online(raw_smp_processor_id())) \ +- return; \ +- \ + if (!(cond)) \ + return; \ + prercu; \ +@@ -265,15 +262,19 @@ static inline void tracepoint_synchronize_unregister(void) + * "void *__data, proto" as the callback prototype. + */ + #define DECLARE_TRACE_NOARGS(name) \ +- __DECLARE_TRACE(name, void, , 1, void *__data, __data) ++ __DECLARE_TRACE(name, void, , \ ++ cpu_online(raw_smp_processor_id()), \ ++ void *__data, __data) + + #define DECLARE_TRACE(name, proto, args) \ +- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \ +- PARAMS(void *__data, proto), \ +- PARAMS(__data, args)) ++ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ ++ cpu_online(raw_smp_processor_id()), \ ++ PARAMS(void *__data, proto), \ ++ PARAMS(__data, args)) + + #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ +- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ ++ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ ++ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) + +diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h +index cbb20afdbc01..bb679b48f408 100644 +--- a/include/linux/ucs2_string.h ++++ b/include/linux/ucs2_string.h +@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s); + unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); + int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); + ++unsigned long ucs2_utf8size(const ucs2_char_t *src); ++unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, ++ unsigned long maxlength); ++ + #endif /* _LINUX_UCS2_STRING_H_ */ +diff --git a/kernel/module.c b/kernel/module.c +index e40617fac8ad..3a311a1d26d7 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -180,6 +180,9 @@ struct load_info { + struct _ddebug *debug; + unsigned int num_debug; + bool sig_ok; ++#ifdef CONFIG_KALLSYMS ++ unsigned long mod_kallsyms_init_off; ++#endif + struct { + unsigned int sym, str, mod, vers, info, pcpu; + } index; +@@ -2320,8 +2323,20 @@ static void layout_symtab(struct module *mod, struct load_info *info) + strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, + info->index.str) | INIT_OFFSET_MASK; + pr_debug("\t%s\n", info->secstrings + strsect->sh_name); ++ ++ /* We'll tack temporary mod_kallsyms on the end. */ ++ mod->init_size = ALIGN(mod->init_size, ++ __alignof__(struct mod_kallsyms)); ++ info->mod_kallsyms_init_off = mod->init_size; ++ mod->init_size += sizeof(struct mod_kallsyms); ++ mod->init_size = debug_align(mod->init_size); + } + ++/* ++ * We use the full symtab and strtab which layout_symtab arranged to ++ * be appended to the init section. Later we switch to the cut-down ++ * core-only ones. ++ */ + static void add_kallsyms(struct module *mod, const struct load_info *info) + { + unsigned int i, ndst; +@@ -2330,28 +2345,33 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) + char *s; + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + +- mod->symtab = (void *)symsec->sh_addr; +- mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); ++ /* Set up to point into init section. */ ++ mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off; ++ ++ mod->kallsyms->symtab = (void *)symsec->sh_addr; ++ mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + /* Make sure we get permanent strtab: don't use info->strtab. */ +- mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; ++ mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + + /* Set types up while we still have access to sections. */ +- for (i = 0; i < mod->num_symtab; i++) +- mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); +- +- mod->core_symtab = dst = mod->module_core + info->symoffs; +- mod->core_strtab = s = mod->module_core + info->stroffs; +- src = mod->symtab; +- for (ndst = i = 0; i < mod->num_symtab; i++) { ++ for (i = 0; i < mod->kallsyms->num_symtab; i++) ++ mod->kallsyms->symtab[i].st_info ++ = elf_type(&mod->kallsyms->symtab[i], info); ++ ++ /* Now populate the cut down core kallsyms for after init. */ ++ mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs; ++ mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs; ++ src = mod->kallsyms->symtab; ++ for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { + if (i == 0 || + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { + dst[ndst] = src[i]; +- dst[ndst++].st_name = s - mod->core_strtab; +- s += strlcpy(s, &mod->strtab[src[i].st_name], ++ dst[ndst++].st_name = s - mod->core_kallsyms.strtab; ++ s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], + KSYM_NAME_LEN) + 1; + } + } +- mod->core_num_syms = ndst; ++ mod->core_kallsyms.num_symtab = ndst; + } + #else + static inline void layout_symtab(struct module *mod, struct load_info *info) +@@ -3089,9 +3109,8 @@ static int do_init_module(struct module *mod) + module_put(mod); + trim_init_extable(mod); + #ifdef CONFIG_KALLSYMS +- mod->num_symtab = mod->core_num_syms; +- mod->symtab = mod->core_symtab; +- mod->strtab = mod->core_strtab; ++ /* Switch to core kallsyms now init is done: kallsyms may be walking! */ ++ rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); + #endif + unset_module_init_ro_nx(mod); + module_free(mod, mod->module_init); +@@ -3381,9 +3400,9 @@ static inline int is_arm_mapping_symbol(const char *str) + && (str[2] == '\0' || str[2] == '.'); + } + +-static const char *symname(struct module *mod, unsigned int symnum) ++static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum) + { +- return mod->strtab + mod->symtab[symnum].st_name; ++ return kallsyms->strtab + kallsyms->symtab[symnum].st_name; + } + + static const char *get_ksymbol(struct module *mod, +@@ -3393,6 +3412,7 @@ static const char *get_ksymbol(struct module *mod, + { + unsigned int i, best = 0; + unsigned long nextval; ++ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + + /* At worse, next value is at end of module */ + if (within_module_init(addr, mod)) +@@ -3402,32 +3422,32 @@ static const char *get_ksymbol(struct module *mod, + + /* Scan for closest preceding symbol, and next symbol. (ELF + starts real symbols at 1). */ +- for (i = 1; i < mod->num_symtab; i++) { +- if (mod->symtab[i].st_shndx == SHN_UNDEF) ++ for (i = 1; i < kallsyms->num_symtab; i++) { ++ if (kallsyms->symtab[i].st_shndx == SHN_UNDEF) + continue; + + /* We ignore unnamed symbols: they're uninformative + * and inserted at a whim. */ +- if (*symname(mod, i) == '\0' +- || is_arm_mapping_symbol(symname(mod, i))) ++ if (*symname(kallsyms, i) == '\0' ++ || is_arm_mapping_symbol(symname(kallsyms, i))) + continue; + +- if (mod->symtab[i].st_value <= addr +- && mod->symtab[i].st_value > mod->symtab[best].st_value) ++ if (kallsyms->symtab[i].st_value <= addr ++ && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value) + best = i; +- if (mod->symtab[i].st_value > addr +- && mod->symtab[i].st_value < nextval) +- nextval = mod->symtab[i].st_value; ++ if (kallsyms->symtab[i].st_value > addr ++ && kallsyms->symtab[i].st_value < nextval) ++ nextval = kallsyms->symtab[i].st_value; + } + + if (!best) + return NULL; + + if (size) +- *size = nextval - mod->symtab[best].st_value; ++ *size = nextval - kallsyms->symtab[best].st_value; + if (offset) +- *offset = addr - mod->symtab[best].st_value; +- return symname(mod, best); ++ *offset = addr - kallsyms->symtab[best].st_value; ++ return symname(kallsyms, best); + } + + /* For kallsyms to ask for address resolution. NULL means not found. Careful +@@ -3523,18 +3543,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { ++ struct mod_kallsyms *kallsyms; ++ + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- if (symnum < mod->num_symtab) { +- *value = mod->symtab[symnum].st_value; +- *type = mod->symtab[symnum].st_info; +- strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN); ++ kallsyms = rcu_dereference_sched(mod->kallsyms); ++ if (symnum < kallsyms->num_symtab) { ++ *value = kallsyms->symtab[symnum].st_value; ++ *type = kallsyms->symtab[symnum].st_info; ++ strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN); + strlcpy(module_name, mod->name, MODULE_NAME_LEN); + *exported = is_exported(name, *value, mod); + preempt_enable(); + return 0; + } +- symnum -= mod->num_symtab; ++ symnum -= kallsyms->num_symtab; + } + preempt_enable(); + return -ERANGE; +@@ -3543,11 +3566,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + static unsigned long mod_find_symname(struct module *mod, const char *name) + { + unsigned int i; ++ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + +- for (i = 0; i < mod->num_symtab; i++) +- if (strcmp(name, symname(mod, i)) == 0 && +- mod->symtab[i].st_info != 'U') +- return mod->symtab[i].st_value; ++ for (i = 0; i < kallsyms->num_symtab; i++) ++ if (strcmp(name, symname(kallsyms, i)) == 0 && ++ kallsyms->symtab[i].st_info != 'U') ++ return kallsyms->symtab[i].st_value; + return 0; + } + +@@ -3584,11 +3608,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + int ret; + + list_for_each_entry(mod, &modules, list) { ++ /* We hold module_mutex: no need for rcu_dereference_sched */ ++ struct mod_kallsyms *kallsyms = mod->kallsyms; ++ + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- for (i = 0; i < mod->num_symtab; i++) { +- ret = fn(data, symname(mod, i), +- mod, mod->symtab[i].st_value); ++ for (i = 0; i < kallsyms->num_symtab; i++) { ++ ret = fn(data, symname(kallsyms, i), ++ mod, kallsyms->symtab[i].st_value); + if (ret != 0) + return ret; + } +diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c +index 6f500ef2301d..f0b323abb4c6 100644 +--- a/lib/ucs2_string.c ++++ b/lib/ucs2_string.c +@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) + } + } + EXPORT_SYMBOL(ucs2_strncmp); ++ ++unsigned long ++ucs2_utf8size(const ucs2_char_t *src) ++{ ++ unsigned long i; ++ unsigned long j = 0; ++ ++ for (i = 0; i < ucs2_strlen(src); i++) { ++ u16 c = src[i]; ++ ++ if (c >= 0x800) ++ j += 3; ++ else if (c >= 0x80) ++ j += 2; ++ else ++ j += 1; ++ } ++ ++ return j; ++} ++EXPORT_SYMBOL(ucs2_utf8size); ++ ++/* ++ * copy at most maxlength bytes of whole utf8 characters to dest from the ++ * ucs2 string src. ++ * ++ * The return value is the number of characters copied, not including the ++ * final NUL character. ++ */ ++unsigned long ++ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength) ++{ ++ unsigned int i; ++ unsigned long j = 0; ++ unsigned long limit = ucs2_strnlen(src, maxlength); ++ ++ for (i = 0; maxlength && i < limit; i++) { ++ u16 c = src[i]; ++ ++ if (c >= 0x800) { ++ if (maxlength < 3) ++ break; ++ maxlength -= 3; ++ dest[j++] = 0xe0 | (c & 0xf000) >> 12; ++ dest[j++] = 0x80 | (c & 0x0fc0) >> 6; ++ dest[j++] = 0x80 | (c & 0x003f); ++ } else if (c >= 0x80) { ++ if (maxlength < 2) ++ break; ++ maxlength -= 2; ++ dest[j++] = 0xc0 | (c & 0x7c0) >> 6; ++ dest[j++] = 0x80 | (c & 0x03f); ++ } else { ++ maxlength -= 1; ++ dest[j++] = c & 0x7f; ++ } ++ } ++ if (maxlength) ++ dest[j] = '\0'; ++ return j; ++} ++EXPORT_SYMBOL(ucs2_as_utf8); +diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c +index 31bf2586fb84..864408026202 100644 +--- a/net/mac80211/agg-rx.c ++++ b/net/mac80211/agg-rx.c +@@ -290,7 +290,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, + } + + /* prepare A-MPDU MLME for Rx aggregation */ +- tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); ++ tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL); + if (!tid_agg_rx) + goto end; + +diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c +index c1b5b73c5b91..c3111e2b6fa0 100644 +--- a/net/mac80211/rc80211_minstrel_ht.c ++++ b/net/mac80211/rc80211_minstrel_ht.c +@@ -463,7 +463,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) + if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) + return; + +- ieee80211_start_tx_ba_session(pubsta, tid, 5000); ++ ieee80211_start_tx_ba_session(pubsta, tid, 0); + } + + static void +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c +index c8717c1d082e..87dd619fb2e9 100644 +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -342,6 +342,39 @@ static const int compat_event_type_size[] = { + + /* IW event code */ + ++static void wireless_nlevent_flush(void) ++{ ++ struct sk_buff *skb; ++ struct net *net; ++ ++ ASSERT_RTNL(); ++ ++ for_each_net(net) { ++ while ((skb = skb_dequeue(&net->wext_nlevents))) ++ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, ++ GFP_KERNEL); ++ } ++} ++ ++static int wext_netdev_notifier_call(struct notifier_block *nb, ++ unsigned long state, void *ptr) ++{ ++ /* ++ * When a netdev changes state in any way, flush all pending messages ++ * to avoid them going out in a strange order, e.g. RTM_NEWLINK after ++ * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close() ++ * or similar - all of which could otherwise happen due to delays from ++ * schedule_work(). ++ */ ++ wireless_nlevent_flush(); ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block wext_netdev_notifier = { ++ .notifier_call = wext_netdev_notifier_call, ++}; ++ + static int __net_init wext_pernet_init(struct net *net) + { + skb_queue_head_init(&net->wext_nlevents); +@@ -360,7 +393,12 @@ static struct pernet_operations wext_pernet_ops = { + + static int __init wireless_nlevent_init(void) + { +- return register_pernet_subsys(&wext_pernet_ops); ++ int err = register_pernet_subsys(&wext_pernet_ops); ++ ++ if (err) ++ return err; ++ ++ return register_netdevice_notifier(&wext_netdev_notifier); + } + + subsys_initcall(wireless_nlevent_init); +@@ -368,17 +406,8 @@ subsys_initcall(wireless_nlevent_init); + /* Process events generated by the wireless layer or the driver. */ + static void wireless_nlevent_process(struct work_struct *work) + { +- struct sk_buff *skb; +- struct net *net; +- + rtnl_lock(); +- +- for_each_net(net) { +- while ((skb = skb_dequeue(&net->wext_nlevents))) +- rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, +- GFP_KERNEL); +- } +- ++ wireless_nlevent_flush(); + rtnl_unlock(); + } + +diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c +index d4248e00160e..4f736197f6ee 100644 +--- a/sound/soc/codecs/wm8958-dsp2.c ++++ b/sound/soc/codecs/wm8958-dsp2.c +@@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c +index 66aec0c3b7bb..355b07e4fe2a 100644 +--- a/sound/soc/codecs/wm8994.c ++++ b/sound/soc/codecs/wm8994.c +@@ -360,7 +360,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol, + struct wm8994 *control = wm8994->wm8994; + struct wm8994_pdata *pdata = &control->pdata; + int drc = wm8994_get_drc(kcontrol->id.name); +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + + if (drc < 0) + return drc; +@@ -467,7 +467,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol, + struct wm8994 *control = wm8994->wm8994; + struct wm8994_pdata *pdata = &control->pdata; + int block = wm8994_get_retune_mobile_block(kcontrol->id.name); +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + + if (block < 0) + return block; +diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh +index 77edcdcc016b..057278448515 100644 +--- a/tools/testing/selftests/efivarfs/efivarfs.sh ++++ b/tools/testing/selftests/efivarfs/efivarfs.sh +@@ -88,7 +88,11 @@ test_delete() + exit 1 + fi + +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + + if [ -e $file ]; then + echo "$file couldn't be deleted" >&2 +@@ -111,6 +115,7 @@ test_zero_size_delete() + exit 1 + fi + ++ chattr -i $file + printf "$attrs" > $file + + if [ -e $file ]; then +@@ -141,7 +146,11 @@ test_valid_filenames() + echo "$file could not be created" >&2 + ret=1 + else +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + fi + done + +@@ -174,7 +183,11 @@ test_invalid_filenames() + + if [ -e $file ]; then + echo "Creating $file should have failed" >&2 +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + ret=1 + fi + done +diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c +index 8c0764407b3c..4af74f733036 100644 +--- a/tools/testing/selftests/efivarfs/open-unlink.c ++++ b/tools/testing/selftests/efivarfs/open-unlink.c +@@ -1,10 +1,68 @@ ++#include + #include + #include + #include + #include ++#include + #include + #include + #include ++#include ++ ++static int set_immutable(const char *path, int immutable) ++{ ++ unsigned int flags; ++ int fd; ++ int rc; ++ int error; ++ ++ fd = open(path, O_RDONLY); ++ if (fd < 0) ++ return fd; ++ ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); ++ if (rc < 0) { ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++ } ++ ++ if (immutable) ++ flags |= FS_IMMUTABLE_FL; ++ else ++ flags &= ~FS_IMMUTABLE_FL; ++ ++ rc = ioctl(fd, FS_IOC_SETFLAGS, &flags); ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++} ++ ++static int get_immutable(const char *path) ++{ ++ unsigned int flags; ++ int fd; ++ int rc; ++ int error; ++ ++ fd = open(path, O_RDONLY); ++ if (fd < 0) ++ return fd; ++ ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); ++ if (rc < 0) { ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++ } ++ close(fd); ++ if (flags & FS_IMMUTABLE_FL) ++ return 1; ++ return 0; ++} + + int main(int argc, char **argv) + { +@@ -27,7 +85,7 @@ int main(int argc, char **argv) + buf[4] = 0; + + /* create a test variable */ +- fd = open(path, O_WRONLY | O_CREAT); ++ fd = open(path, O_WRONLY | O_CREAT, 0600); + if (fd < 0) { + perror("open(O_WRONLY)"); + return EXIT_FAILURE; +@@ -41,6 +99,18 @@ int main(int argc, char **argv) + + close(fd); + ++ rc = get_immutable(path); ++ if (rc < 0) { ++ perror("ioctl(FS_IOC_GETFLAGS)"); ++ return EXIT_FAILURE; ++ } else if (rc) { ++ rc = set_immutable(path, 0); ++ if (rc < 0) { ++ perror("ioctl(FS_IOC_SETFLAGS)"); ++ return EXIT_FAILURE; ++ } ++ } ++ + fd = open(path, O_RDONLY); + if (fd < 0) { + perror("open"); diff --git a/patch/kernel/odroidxu4-default/patch-3.10.98-99.patch b/patch/kernel/odroidxu4-default/patch-3.10.98-99.patch new file mode 100644 index 000000000..e405e5ac5 --- /dev/null +++ b/patch/kernel/odroidxu4-default/patch-3.10.98-99.patch @@ -0,0 +1,2573 @@ +diff --git a/Makefile b/Makefile +index dadd1edc6f84..f1e6491fd7d8 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 98 ++SUBLEVEL = 99 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c +index a8d02223da44..14558a9fa3b3 100644 +--- a/arch/arc/kernel/unwind.c ++++ b/arch/arc/kernel/unwind.c +@@ -984,42 +984,13 @@ int arc_unwind(struct unwind_frame_info *frame) + (const u8 *)(fde + + 1) + + *fde, ptrType); +- if (pc >= endLoc) ++ if (pc >= endLoc) { + fde = NULL; +- } else +- fde = NULL; +- } +- if (fde == NULL) { +- for (fde = table->address, tableSize = table->size; +- cie = NULL, tableSize > sizeof(*fde) +- && tableSize - sizeof(*fde) >= *fde; +- tableSize -= sizeof(*fde) + *fde, +- fde += 1 + *fde / sizeof(*fde)) { +- cie = cie_for_fde(fde, table); +- if (cie == &bad_cie) { + cie = NULL; +- break; + } +- if (cie == NULL +- || cie == ¬_fde +- || (ptrType = fde_pointer_type(cie)) < 0) +- continue; +- ptr = (const u8 *)(fde + 2); +- startLoc = read_pointer(&ptr, +- (const u8 *)(fde + 1) + +- *fde, ptrType); +- if (!startLoc) +- continue; +- if (!(ptrType & DW_EH_PE_indirect)) +- ptrType &= +- DW_EH_PE_FORM | DW_EH_PE_signed; +- endLoc = +- startLoc + read_pointer(&ptr, +- (const u8 *)(fde + +- 1) + +- *fde, ptrType); +- if (pc >= startLoc && pc < endLoc) +- break; ++ } else { ++ fde = NULL; ++ cie = NULL; + } + } + } +diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S +index 920b63210806..34c35f0e3290 100644 +--- a/arch/mips/kvm/kvm_locore.S ++++ b/arch/mips/kvm/kvm_locore.S +@@ -156,9 +156,11 @@ FEXPORT(__kvm_mips_vcpu_run) + + FEXPORT(__kvm_mips_load_asid) + /* Set the ASID for the Guest Kernel */ +- sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ +- /* addresses shift to 0x80000000 */ +- bltz t0, 1f /* If kernel */ ++ PTR_L t0, VCPU_COP0(k1) ++ LONG_L t0, COP0_STATUS(t0) ++ andi t0, KSU_USER | ST0_ERL | ST0_EXL ++ xori t0, KSU_USER ++ bnez t0, 1f /* If kernel */ + addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ + addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ + 1: +@@ -442,9 +444,11 @@ __kvm_mips_return_to_guest: + mtc0 t0, CP0_EPC + + /* Set the ASID for the Guest Kernel */ +- sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ +- /* addresses shift to 0x80000000 */ +- bltz t0, 1f /* If kernel */ ++ PTR_L t0, VCPU_COP0(k1) ++ LONG_L t0, COP0_STATUS(t0) ++ andi t0, KSU_USER | ST0_ERL | ST0_EXL ++ xori t0, KSU_USER ++ bnez t0, 1f /* If kernel */ + addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ + addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ + 1: +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c +index 843ec38fec7b..8aa5f30d8579 100644 +--- a/arch/mips/kvm/kvm_mips.c ++++ b/arch/mips/kvm/kvm_mips.c +@@ -308,7 +308,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) + + if (!gebase) { + err = -ENOMEM; +- goto out_free_cpu; ++ goto out_uninit_cpu; + } + kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n", + ALIGN(size, PAGE_SIZE), gebase); +@@ -368,6 +368,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) + out_free_gebase: + kfree(gebase); + ++out_uninit_cpu: ++ kvm_vcpu_uninit(vcpu); ++ + out_free_cpu: + kfree(vcpu); + +diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c +index c76f297b7149..33085819cd89 100644 +--- a/arch/mips/kvm/kvm_mips_emul.c ++++ b/arch/mips/kvm/kvm_mips_emul.c +@@ -935,7 +935,7 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, + + base = (inst >> 21) & 0x1f; + op_inst = (inst >> 16) & 0x1f; +- offset = inst & 0xffff; ++ offset = (int16_t)inst; + cache = (inst >> 16) & 0x3; + op = (inst >> 18) & 0x7; + +diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c +index 4d1ee88864e8..18c8b819b0aa 100644 +--- a/arch/s390/mm/extable.c ++++ b/arch/s390/mm/extable.c +@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start, + int i; + + /* Normalize entries to being relative to the start of the section */ +- for (p = start, i = 0; p < finish; p++, i += 8) ++ for (p = start, i = 0; p < finish; p++, i += 8) { + p->insn += i; ++ p->fixup += i + 4; ++ } + sort(start, finish - start, sizeof(*start), cmp_ex, NULL); + /* Denormalize all entries */ +- for (p = start, i = 0; p < finish; p++, i += 8) ++ for (p = start, i = 0; p < finish; p++, i += 8) { + p->insn -= i; ++ p->fixup -= i + 4; ++ } + } + + #ifdef CONFIG_MODULES +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c +index be8db9bb7878..666510b39870 100644 +--- a/arch/sparc/kernel/sys_sparc_64.c ++++ b/arch/sparc/kernel/sys_sparc_64.c +@@ -416,7 +416,7 @@ out: + + SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) + { +- int ret; ++ long ret; + + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) +diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c +index 337518c5042a..b412c62486f0 100644 +--- a/arch/um/os-Linux/start_up.c ++++ b/arch/um/os-Linux/start_up.c +@@ -95,6 +95,8 @@ static int start_ptraced_child(void) + { + int pid, n, status; + ++ fflush(stdout); ++ + pid = fork(); + if (pid == 0) + ptrace_child(); +diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c +index 6033be9ff81a..3c8bffdc71c8 100644 +--- a/arch/x86/platform/efi/efi.c ++++ b/arch/x86/platform/efi/efi.c +@@ -250,12 +250,19 @@ static efi_status_t __init phys_efi_set_virtual_address_map( + efi_memory_desc_t *virtual_map) + { + efi_status_t status; ++ unsigned long flags; + + efi_call_phys_prelog(); ++ ++ /* Disable interrupts around EFI calls: */ ++ local_irq_save(flags); + status = efi_call_phys4(efi_phys.set_virtual_address_map, + memory_map_size, descriptor_size, + descriptor_version, virtual_map); ++ local_irq_restore(flags); ++ + efi_call_phys_epilog(); ++ + return status; + } + +diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c +index 40e446941dd7..bebbee05e331 100644 +--- a/arch/x86/platform/efi/efi_32.c ++++ b/arch/x86/platform/efi/efi_32.c +@@ -33,19 +33,16 @@ + + /* + * To make EFI call EFI runtime service in physical addressing mode we need +- * prelog/epilog before/after the invocation to disable interrupt, to +- * claim EFI runtime service handler exclusively and to duplicate a memory in +- * low memory space say 0 - 3G. ++ * prolog/epilog before/after the invocation to claim the EFI runtime service ++ * handler exclusively and to duplicate a memory mapping in low memory space, ++ * say 0 - 3G. + */ + +-static unsigned long efi_rt_eflags; + + void efi_call_phys_prelog(void) + { + struct desc_ptr gdt_descr; + +- local_irq_save(efi_rt_eflags); +- + load_cr3(initial_page_table); + __flush_tlb_all(); + +@@ -64,6 +61,4 @@ void efi_call_phys_epilog(void) + + load_cr3(swapper_pg_dir); + __flush_tlb_all(); +- +- local_irq_restore(efi_rt_eflags); + } +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c +index 39a0e7f1f0a3..2f6c1a9734c8 100644 +--- a/arch/x86/platform/efi/efi_64.c ++++ b/arch/x86/platform/efi/efi_64.c +@@ -40,7 +40,6 @@ + #include + + static pgd_t *save_pgd __initdata; +-static unsigned long efi_flags __initdata; + + static void __init early_code_mapping_set_exec(int executable) + { +@@ -66,7 +65,6 @@ void __init efi_call_phys_prelog(void) + int n_pgds; + + early_code_mapping_set_exec(1); +- local_irq_save(efi_flags); + + n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); + save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); +@@ -90,7 +88,6 @@ void __init efi_call_phys_epilog(void) + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); + kfree(save_pgd); + __flush_tlb_all(); +- local_irq_restore(efi_flags); + early_code_mapping_set_exec(0); + } + +diff --git a/block/partitions/mac.c b/block/partitions/mac.c +index 76d8ba6379a9..bd5b91465230 100644 +--- a/block/partitions/mac.c ++++ b/block/partitions/mac.c +@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state) + Sector sect; + unsigned char *data; + int slot, blocks_in_map; +- unsigned secsize; ++ unsigned secsize, datasize, partoffset; + #ifdef CONFIG_PPC_PMAC + int found_root = 0; + int found_root_goodness = 0; +@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state) + } + secsize = be16_to_cpu(md->block_size); + put_dev_sector(sect); +- data = read_part_sector(state, secsize/512, §); ++ datasize = round_down(secsize, 512); ++ data = read_part_sector(state, datasize / 512, §); + if (!data) + return -1; +- part = (struct mac_partition *) (data + secsize%512); ++ partoffset = secsize % 512; ++ if (partoffset + sizeof(*part) > datasize) ++ return -1; ++ part = (struct mac_partition *) (data + partoffset); + if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { + put_dev_sector(sect); + return 0; /* not a MacOS disk */ +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c +index 136803c47cdb..96e5ed188636 100644 +--- a/drivers/ata/libata-sff.c ++++ b/drivers/ata/libata-sff.c +@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, + static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) + { + struct ata_port *ap = qc->ap; +- unsigned long flags; + + if (ap->ops->error_handler) { + if (in_wq) { +- spin_lock_irqsave(ap->lock, flags); +- + /* EH might have kicked in while host lock is + * released. + */ +@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) + } else + ata_port_freeze(ap); + } +- +- spin_unlock_irqrestore(ap->lock, flags); + } else { + if (likely(!(qc->err_mask & AC_ERR_HSM))) + ata_qc_complete(qc); +@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) + } + } else { + if (in_wq) { +- spin_lock_irqsave(ap->lock, flags); + ata_sff_irq_on(ap); + ata_qc_complete(qc); +- spin_unlock_irqrestore(ap->lock, flags); + } else + ata_qc_complete(qc); + } +@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, + { + struct ata_link *link = qc->dev->link; + struct ata_eh_info *ehi = &link->eh_info; +- unsigned long flags = 0; + int poll_next; + ++ lockdep_assert_held(ap->lock); ++ + WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); + + /* Make sure ata_sff_qc_issue() does not throw things +@@ -1112,14 +1106,6 @@ fsm_start: + } + } + +- /* Send the CDB (atapi) or the first data block (ata pio out). +- * During the state transition, interrupt handler shouldn't +- * be invoked before the data transfer is complete and +- * hsm_task_state is changed. Hence, the following locking. +- */ +- if (in_wq) +- spin_lock_irqsave(ap->lock, flags); +- + if (qc->tf.protocol == ATA_PROT_PIO) { + /* PIO data out protocol. + * send first data block. +@@ -1135,9 +1121,6 @@ fsm_start: + /* send CDB */ + atapi_send_cdb(ap, qc); + +- if (in_wq) +- spin_unlock_irqrestore(ap->lock, flags); +- + /* if polling, ata_sff_pio_task() handles the rest. + * otherwise, interrupt handler takes over from here. + */ +@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work) + u8 status; + int poll_next; + ++ spin_lock_irq(ap->lock); ++ + BUG_ON(ap->sff_pio_task_link == NULL); + /* qc can be NULL if timeout occurred */ + qc = ata_qc_from_tag(ap, link->active_tag); + if (!qc) { + ap->sff_pio_task_link = NULL; +- return; ++ goto out_unlock; + } + + fsm_start: +@@ -1381,11 +1366,14 @@ fsm_start: + */ + status = ata_sff_busy_wait(ap, ATA_BUSY, 5); + if (status & ATA_BUSY) { ++ spin_unlock_irq(ap->lock); + ata_msleep(ap, 2); ++ spin_lock_irq(ap->lock); ++ + status = ata_sff_busy_wait(ap, ATA_BUSY, 10); + if (status & ATA_BUSY) { + ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); +- return; ++ goto out_unlock; + } + } + +@@ -1402,6 +1390,8 @@ fsm_start: + */ + if (poll_next) + goto fsm_start; ++out_unlock: ++ spin_unlock_irq(ap->lock); + } + + /** +diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c +index dd1faa564eb2..cdfb98e70cfd 100644 +--- a/drivers/ata/sata_sil.c ++++ b/drivers/ata/sata_sil.c +@@ -631,6 +631,9 @@ static void sil_dev_config(struct ata_device *dev) + unsigned int n, quirks = 0; + unsigned char model_num[ATA_ID_PROD_LEN + 1]; + ++ /* This controller doesn't support trim */ ++ dev->horkage |= ATA_HORKAGE_NOTRIM; ++ + ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); + + for (n = 0; sil_blacklist[n].product; n++) +diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c +index 64f553f04fa4..5874ebf9dced 100644 +--- a/drivers/clocksource/vt8500_timer.c ++++ b/drivers/clocksource/vt8500_timer.c +@@ -50,6 +50,8 @@ + + #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) + ++#define MIN_OSCR_DELTA 16 ++ + static void __iomem *regbase; + + static cycle_t vt8500_timer_read(struct clocksource *cs) +@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles, + cpu_relax(); + writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL); + +- if ((signed)(alarm - clocksource.read(&clocksource)) <= 16) ++ if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA) + return -ETIME; + + writel(1, regbase + TIMER_IER_VAL); +@@ -162,7 +164,7 @@ static void __init vt8500_timer_init(struct device_node *np) + pr_err("%s: setup_irq failed for %s\n", __func__, + clockevent.name); + clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, +- 4, 0xf0000000); ++ MIN_OSCR_DELTA * 2, 0xf0000000); + } + + CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init); +diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h +index b6b7d70f2832..5cfc1765af74 100644 +--- a/drivers/gpu/drm/ast/ast_drv.h ++++ b/drivers/gpu/drm/ast/ast_drv.h +@@ -296,6 +296,7 @@ int ast_framebuffer_init(struct drm_device *dev, + int ast_fbdev_init(struct drm_device *dev); + void ast_fbdev_fini(struct drm_device *dev); + void ast_fbdev_set_suspend(struct drm_device *dev, int state); ++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr); + + struct ast_bo { + struct ttm_buffer_object bo; +diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c +index fbc0823cfa18..a298d8f72225 100644 +--- a/drivers/gpu/drm/ast/ast_fb.c ++++ b/drivers/gpu/drm/ast/ast_fb.c +@@ -366,3 +366,10 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state) + + fb_set_suspend(ast->fbdev->helper.fbdev, state); + } ++ ++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr) ++{ ++ ast->fbdev->helper.fbdev->fix.smem_start = ++ ast->fbdev->helper.fbdev->apertures->ranges[0].base + gpu_addr; ++ ast->fbdev->helper.fbdev->fix.smem_len = ast->vram_size - gpu_addr; ++} +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index 96f874a508e2..313ccaf25f49 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -359,6 +359,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) + dev->mode_config.min_height = 0; + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; ++ dev->mode_config.fb_base = pci_resource_start(ast->dev->pdev, 0); + + if (ast->chip == AST2100 || + ast->chip == AST2200 || +diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c +index e8f6418b6dec..f3a54ad77e3f 100644 +--- a/drivers/gpu/drm/ast/ast_mode.c ++++ b/drivers/gpu/drm/ast/ast_mode.c +@@ -509,6 +509,8 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc, + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); + if (ret) + DRM_ERROR("failed to kmap fbcon\n"); ++ else ++ ast_fbdev_set_base(ast, gpu_addr); + } + ast_bo_unreserve(bo); + +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +index ba2ab9a9b988..f3cce23f4a62 100644 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -452,7 +452,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, + } + + /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ +- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) && ++ if (((dev->pdev->device == 0x9802) || ++ (dev->pdev->device == 0x9805) || ++ (dev->pdev->device == 0x9806)) && + (dev->pdev->subsystem_vendor == 0x1734) && + (dev->pdev->subsystem_device == 0x11bd)) { + if (*connector_type == DRM_MODE_CONNECTOR_VGA) { +@@ -463,14 +465,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, + } + } + +- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */ +- if ((dev->pdev->device == 0x9805) && +- (dev->pdev->subsystem_vendor == 0x1734) && +- (dev->pdev->subsystem_device == 0x11bd)) { +- if (*connector_type == DRM_MODE_CONNECTOR_VGA) +- return false; +- } +- + return true; + } + +diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c +index db83d075606e..6acd3646ac08 100644 +--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c ++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c +@@ -73,6 +73,11 @@ static void radeon_hotplug_work_func(struct work_struct *work) + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + ++ /* we can race here at startup, some boards seem to trigger ++ * hotplug irqs when they shouldn't. */ ++ if (!rdev->mode_info.mode_config_initialized) ++ return; ++ + mutex_lock(&mode_config->mutex); + if (mode_config->num_connector) { + list_for_each_entry(connector, &mode_config->connector_list, head) +diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c +index f0bac68254b7..bb166849aa6e 100644 +--- a/drivers/gpu/drm/radeon/radeon_sa.c ++++ b/drivers/gpu/drm/radeon/radeon_sa.c +@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev, + /* see if we can skip over some allocations */ + } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); + ++ for (i = 0; i < RADEON_NUM_RINGS; ++i) ++ radeon_fence_ref(fences[i]); ++ + spin_unlock(&sa_manager->wq.lock); + r = radeon_fence_wait_any(rdev, fences, false); ++ for (i = 0; i < RADEON_NUM_RINGS; ++i) ++ radeon_fence_unref(&fences[i]); + spin_lock(&sa_manager->wq.lock); + /* if we have nothing to wait for block */ + if (r == -ENOENT && block) { +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c +index 4a14e113369d..f7015592544f 100644 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c +@@ -619,7 +619,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) + 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { +- while (--i) { ++ while (i--) { + pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + gtt->ttm.dma_address[i] = 0; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +index 6c44c69a5ba4..94a0baac93dd 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +@@ -25,6 +25,7 @@ + * + **************************************************************************/ + #include ++#include + + #include + #include "vmwgfx_drv.h" +@@ -1192,6 +1193,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + static int __init vmwgfx_init(void) + { + int ret; ++ ++#ifdef CONFIG_VGA_CONSOLE ++ if (vgacon_text_force()) ++ return -EINVAL; ++#endif ++ + ret = drm_pci_init(&driver, &vmw_pci_driver); + if (ret) + DRM_ERROR("Failed initializing DRM.\n"); +diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c +index e893f6e1937d..3c84e96a485a 100644 +--- a/drivers/gpu/vga/vgaarb.c ++++ b/drivers/gpu/vga/vgaarb.c +@@ -392,8 +392,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) + set_current_state(interruptible ? + TASK_INTERRUPTIBLE : + TASK_UNINTERRUPTIBLE); +- if (signal_pending(current)) { +- rc = -EINTR; ++ if (interruptible && signal_pending(current)) { ++ __set_current_state(TASK_RUNNING); ++ remove_wait_queue(&vga_wait_queue, &wait); ++ rc = -ERESTARTSYS; + break; + } + schedule(); +diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c +index 3e094cd6a0e3..a9194ef626cd 100644 +--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c ++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c +@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en + error = l2t_send(tdev, skb, l2e); + if (error < 0) + kfree_skb(skb); +- return error; ++ return error < 0 ? error : 0; + } + + int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb) +@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb) + error = cxgb3_ofld_send(tdev, skb); + if (error < 0) + kfree_skb(skb); +- return error; ++ return error < 0 ? error : 0; + } + + static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) +diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c +index dabb697b1c2a..48ba1c3e945a 100644 +--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c ++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c +@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + struct qib_ibdev *dev = to_idev(ibqp->device); + struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); + struct qib_mcast *mcast = NULL; +- struct qib_mcast_qp *p, *tmp; ++ struct qib_mcast_qp *p, *tmp, *delp = NULL; + struct rb_node *n; + int last = 0; + int ret; + +- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { +- ret = -EINVAL; +- goto bail; +- } ++ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) ++ return -EINVAL; + + spin_lock_irq(&ibp->lock); + +@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + while (1) { + if (n == NULL) { + spin_unlock_irq(&ibp->lock); +- ret = -EINVAL; +- goto bail; ++ return -EINVAL; + } + + mcast = rb_entry(n, struct qib_mcast, rb_node); +@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + */ + list_del_rcu(&p->list); + mcast->n_attached--; ++ delp = p; + + /* If this was the last attached QP, remove the GID too. */ + if (list_empty(&mcast->qp_list)) { +@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + } + + spin_unlock_irq(&ibp->lock); ++ /* QP not attached */ ++ if (!delp) ++ return -EINVAL; ++ /* ++ * Wait for any list walkers to finish before freeing the ++ * list element. ++ */ ++ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); ++ qib_mcast_qp_free(delp); + +- if (p) { +- /* +- * Wait for any list walkers to finish before freeing the +- * list element. +- */ +- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); +- qib_mcast_qp_free(p); +- } + if (last) { + atomic_dec(&mcast->refcount); + wait_event(mcast->wait, !atomic_read(&mcast->refcount)); +@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + dev->n_mcast_grps_allocated--; + spin_unlock_irq(&dev->n_mcast_grps_lock); + } +- +- ret = 0; +- +-bail: +- return ret; ++ return 0; + } + + int qib_mcast_tree_empty(struct qib_ibport *ibp) +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c +index b4713cea1913..2d2915fdbf02 100644 +--- a/drivers/md/bcache/super.c ++++ b/drivers/md/bcache/super.c +@@ -1959,8 +1959,10 @@ static int __init bcache_init(void) + closure_debug_init(); + + bcache_major = register_blkdev(0, "bcache"); +- if (bcache_major < 0) ++ if (bcache_major < 0) { ++ unregister_reboot_notifier(&reboot); + return bcache_major; ++ } + + if (!(bcache_wq = create_workqueue("bcache")) || + !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || +diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h +index 0b2536247cf5..84e27708ad97 100644 +--- a/drivers/md/dm-exception-store.h ++++ b/drivers/md/dm-exception-store.h +@@ -70,7 +70,7 @@ struct dm_exception_store_type { + * Update the metadata with this exception. + */ + void (*commit_exception) (struct dm_exception_store *store, +- struct dm_exception *e, ++ struct dm_exception *e, int valid, + void (*callback) (void *, int success), + void *callback_context); + +diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c +index 2d2b1b7588d7..8f6d3ea55401 100644 +--- a/drivers/md/dm-snap-persistent.c ++++ b/drivers/md/dm-snap-persistent.c +@@ -646,7 +646,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store, + } + + static void persistent_commit_exception(struct dm_exception_store *store, +- struct dm_exception *e, ++ struct dm_exception *e, int valid, + void (*callback) (void *, int success), + void *callback_context) + { +@@ -655,6 +655,9 @@ static void persistent_commit_exception(struct dm_exception_store *store, + struct core_exception ce; + struct commit_callback *cb; + ++ if (!valid) ++ ps->valid = 0; ++ + ce.old_chunk = e->old_chunk; + ce.new_chunk = e->new_chunk; + write_exception(ps, ps->current_committed++, &ce); +diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c +index 1ce9a2586e41..31439d53cf7e 100644 +--- a/drivers/md/dm-snap-transient.c ++++ b/drivers/md/dm-snap-transient.c +@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store, + } + + static void transient_commit_exception(struct dm_exception_store *store, +- struct dm_exception *e, ++ struct dm_exception *e, int valid, + void (*callback) (void *, int success), + void *callback_context) + { + /* Just succeed */ +- callback(callback_context, 1); ++ callback(callback_context, valid); + } + + static void transient_usage(struct dm_exception_store *store, +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c +index d892a05c84f4..dbd0f00f7395 100644 +--- a/drivers/md/dm-snap.c ++++ b/drivers/md/dm-snap.c +@@ -1388,8 +1388,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err) + dm_table_event(s->ti->table); + } + +-static void pending_complete(struct dm_snap_pending_exception *pe, int success) ++static void pending_complete(void *context, int success) + { ++ struct dm_snap_pending_exception *pe = context; + struct dm_exception *e; + struct dm_snapshot *s = pe->snap; + struct bio *origin_bios = NULL; +@@ -1459,24 +1460,13 @@ out: + free_pending_exception(pe); + } + +-static void commit_callback(void *context, int success) +-{ +- struct dm_snap_pending_exception *pe = context; +- +- pending_complete(pe, success); +-} +- + static void complete_exception(struct dm_snap_pending_exception *pe) + { + struct dm_snapshot *s = pe->snap; + +- if (unlikely(pe->copy_error)) +- pending_complete(pe, 0); +- +- else +- /* Update the metadata if we are persistent */ +- s->store->type->commit_exception(s->store, &pe->e, +- commit_callback, pe); ++ /* Update the metadata if we are persistent */ ++ s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error, ++ pending_complete, pe); + } + + /* +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c +index 43f6250baadd..4bf9211b2740 100644 +--- a/drivers/md/dm-thin-metadata.c ++++ b/drivers/md/dm-thin-metadata.c +@@ -1191,6 +1191,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd) + dm_block_t held_root; + + /* ++ * We commit to ensure the btree roots which we increment in a ++ * moment are up to date. ++ */ ++ __commit_transaction(pmd); ++ ++ /* + * Copy the superblock. + */ + dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION); +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c +index ec56072c6326..295f74d4f0ab 100644 +--- a/drivers/md/dm-thin.c ++++ b/drivers/md/dm-thin.c +@@ -2281,7 +2281,7 @@ static void pool_postsuspend(struct dm_target *ti) + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + +- cancel_delayed_work(&pool->waker); ++ cancel_delayed_work_sync(&pool->waker); + flush_workqueue(pool->wq); + (void) commit_or_fallback(pool); + } +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c +index 6d7f4d950b8f..b07fcda9ca71 100644 +--- a/drivers/md/persistent-data/dm-btree.c ++++ b/drivers/md/persistent-data/dm-btree.c +@@ -235,6 +235,16 @@ static bool is_internal_level(struct dm_btree_info *info, struct frame *f) + return f->level < (info->levels - 1); + } + ++static void unlock_all_frames(struct del_stack *s) ++{ ++ struct frame *f; ++ ++ while (unprocessed_frames(s)) { ++ f = s->spine + s->top--; ++ dm_tm_unlock(s->tm, f->b); ++ } ++} ++ + int dm_btree_del(struct dm_btree_info *info, dm_block_t root) + { + int r; +@@ -290,9 +300,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root) + f->current_child = f->nr_children; + } + } +- + out: ++ if (r) { ++ /* cleanup all frames of del_stack */ ++ unlock_all_frames(s); ++ } + kfree(s); ++ + return r; + } + EXPORT_SYMBOL_GPL(dm_btree_del); +diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c +index 1f925e856974..46a984291b7d 100644 +--- a/drivers/media/dvb-core/dvb_frontend.c ++++ b/drivers/media/dvb-core/dvb_frontend.c +@@ -2195,9 +2195,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file, + dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n", + __func__, c->delivery_system, fe->ops.info.type); + +- /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't +- * do it, it is done for it. */ +- info->caps |= FE_CAN_INVERSION_AUTO; ++ /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */ ++ if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT)) ++ info->caps |= FE_CAN_INVERSION_AUTO; + err = 0; + break; + } +diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c +index a2631be7ffac..08e0f0dd8728 100644 +--- a/drivers/media/dvb-frontends/tda1004x.c ++++ b/drivers/media/dvb-frontends/tda1004x.c +@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe) + { + struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache; + struct tda1004x_state* state = fe->demodulator_priv; ++ int status; + + dprintk("%s\n", __func__); + ++ status = tda1004x_read_byte(state, TDA1004X_STATUS_CD); ++ if (status == -1) ++ return -EIO; ++ ++ /* Only update the properties cache if device is locked */ ++ if (!(status & 8)) ++ return 0; ++ + // inversion status + fe_params->inversion = INVERSION_OFF; + if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20) +diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c +index 2e28c81a03ab..a5bee0d0d686 100644 +--- a/drivers/media/usb/gspca/ov534.c ++++ b/drivers/media/usb/gspca/ov534.c +@@ -1490,8 +1490,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev, + struct v4l2_fract *tpf = &cp->timeperframe; + struct sd *sd = (struct sd *) gspca_dev; + +- /* Set requested framerate */ +- sd->frame_rate = tpf->denominator / tpf->numerator; ++ if (tpf->numerator == 0 || tpf->denominator == 0) ++ /* Set default framerate */ ++ sd->frame_rate = 30; ++ else ++ /* Set requested framerate */ ++ sd->frame_rate = tpf->denominator / tpf->numerator; ++ + if (gspca_dev->streaming) + set_frame_rate(gspca_dev); + +diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c +index 4cb511ccc5f6..22ea6aefd22f 100644 +--- a/drivers/media/usb/gspca/topro.c ++++ b/drivers/media/usb/gspca/topro.c +@@ -4791,7 +4791,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev, + struct v4l2_fract *tpf = &cp->timeperframe; + int fr, i; + +- sd->framerate = tpf->denominator / tpf->numerator; ++ if (tpf->numerator == 0 || tpf->denominator == 0) ++ sd->framerate = 30; ++ else ++ sd->framerate = tpf->denominator / tpf->numerator; ++ + if (gspca_dev->streaming) + setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure)); + +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c +index 885ba4a19a6c..ebb40a292d67 100644 +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -59,8 +59,7 @@ MODULE_ALIAS("mmc:block"); + #define INAND_CMD38_ARG_SECTRIM2 0x88 + #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ + +-#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ +- (req->cmd_flags & REQ_META)) && \ ++#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ + (rq_data_dir(req) == WRITE)) + #define PACKED_CMD_VER 0x01 + #define PACKED_CMD_WR 0x02 +@@ -1300,13 +1299,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, + + /* + * Reliable writes are used to implement Forced Unit Access and +- * REQ_META accesses, and are supported only on MMCs. +- * +- * XXX: this really needs a good explanation of why REQ_META +- * is treated special. ++ * are supported only on MMCs. + */ +- bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || +- (req->cmd_flags & REQ_META)) && ++ bool do_rel_wr = (req->cmd_flags & REQ_FUA) && + (rq_data_dir(req) == WRITE) && + (md->flags & MMC_BLK_REL_WR); + +diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c +index f4f3038c1df0..faeda85e78fa 100644 +--- a/drivers/mmc/host/mmci.c ++++ b/drivers/mmc/host/mmci.c +@@ -1740,7 +1740,7 @@ static struct amba_id mmci_ids[] = { + { + .id = 0x00280180, + .mask = 0x00ffffff, +- .data = &variant_u300, ++ .data = &variant_nomadik, + }, + { + .id = 0x00480180, +diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c +index c2d0559115d3..732a8ed571c2 100644 +--- a/drivers/net/can/sja1000/sja1000.c ++++ b/drivers/net/can/sja1000/sja1000.c +@@ -187,6 +187,9 @@ static void sja1000_start(struct net_device *dev) + /* clear interrupt flags */ + priv->read_reg(priv, SJA1000_IR); + ++ /* clear interrupt flags */ ++ priv->read_reg(priv, SJA1000_IR); ++ + /* leave reset mode */ + set_normal_mode(dev); + } +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c +index 5f9a7ad9b964..d921416295ce 100644 +--- a/drivers/net/can/usb/ems_usb.c ++++ b/drivers/net/can/usb/ems_usb.c +@@ -118,6 +118,9 @@ MODULE_LICENSE("GPL v2"); + */ + #define EMS_USB_ARM7_CLOCK 8000000 + ++#define CPC_TX_QUEUE_TRIGGER_LOW 25 ++#define CPC_TX_QUEUE_TRIGGER_HIGH 35 ++ + /* + * CAN-Message representation in a CPC_MSG. Message object type is + * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or +@@ -279,6 +282,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb) + switch (urb->status) { + case 0: + dev->free_slots = dev->intr_in_buffer[1]; ++ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){ ++ if (netif_queue_stopped(netdev)){ ++ netif_wake_queue(netdev); ++ } ++ } + break; + + case -ECONNRESET: /* unlink */ +@@ -530,8 +538,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb) + /* Release context */ + context->echo_index = MAX_TX_URBS; + +- if (netif_queue_stopped(netdev)) +- netif_wake_queue(netdev); + } + + /* +@@ -591,7 +597,7 @@ static int ems_usb_start(struct ems_usb *dev) + int err, i; + + dev->intr_in_buffer[0] = 0; +- dev->free_slots = 15; /* initial size */ ++ dev->free_slots = 50; /* initial size */ + + for (i = 0; i < MAX_RX_URBS; i++) { + struct urb *urb = NULL; +@@ -841,7 +847,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne + + /* Slow down tx path */ + if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || +- dev->free_slots < 5) { ++ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) { + netif_stop_queue(netdev); + } + } +diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c +index 76ef634caf6f..b84e713445d0 100644 +--- a/drivers/pci/pcie/aer/aerdrv.c ++++ b/drivers/pci/pcie/aer/aerdrv.c +@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) + rpc->rpd = dev; + INIT_WORK(&rpc->dpc_handler, aer_isr); + mutex_init(&rpc->rpc_mutex); +- init_waitqueue_head(&rpc->wait_release); + + /* Use PCIe bus function to store rpc into PCIe device */ + set_service_data(dev, rpc); +@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev) + if (rpc->isr) + free_irq(dev->irq, dev); + +- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); +- ++ flush_work(&rpc->dpc_handler); + aer_disable_rootport(rpc); + kfree(rpc); + set_service_data(dev, NULL); +diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h +index d12c77cd6991..3b8766e1e51b 100644 +--- a/drivers/pci/pcie/aer/aerdrv.h ++++ b/drivers/pci/pcie/aer/aerdrv.h +@@ -76,7 +76,6 @@ struct aer_rpc { + * recovery on the same + * root port hierarchy + */ +- wait_queue_head_t wait_release; + }; + + struct aer_broadcast_data { +diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c +index 0f4554e48cc5..a017aac0d7ed 100644 +--- a/drivers/pci/pcie/aer/aerdrv_core.c ++++ b/drivers/pci/pcie/aer/aerdrv_core.c +@@ -817,8 +817,6 @@ void aer_isr(struct work_struct *work) + while (get_e_source(rpc, &e_src)) + aer_isr_one_error(p_device, &e_src); + mutex_unlock(&rpc->rpc_mutex); +- +- wake_up(&rpc->wait_release); + } + + /** +diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c +index f7197a790341..eb402f4f0e2f 100644 +--- a/drivers/pci/xen-pcifront.c ++++ b/drivers/pci/xen-pcifront.c +@@ -51,7 +51,7 @@ struct pcifront_device { + }; + + struct pcifront_sd { +- int domain; ++ struct pci_sysdata sd; + struct pcifront_device *pdev; + }; + +@@ -65,7 +65,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd, + unsigned int domain, unsigned int bus, + struct pcifront_device *pdev) + { +- sd->domain = domain; ++ /* Because we do not expose that information via XenBus. */ ++ sd->sd.node = first_online_node; ++ sd->sd.domain = domain; + sd->pdev = pdev; + } + +@@ -463,8 +465,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev, + dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", + domain, bus); + +- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); +- sd = kmalloc(sizeof(*sd), GFP_KERNEL); ++ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL); ++ sd = kzalloc(sizeof(*sd), GFP_KERNEL); + if (!bus_entry || !sd) { + err = -ENOMEM; + goto err_out; +diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c +index 3bed2f55cf7d..3ccadf631d45 100644 +--- a/drivers/power/wm831x_power.c ++++ b/drivers/power/wm831x_power.c +@@ -567,7 +567,7 @@ static int wm831x_power_probe(struct platform_device *pdev) + + irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO")); + ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq, +- IRQF_TRIGGER_RISING, "System power low", ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "System power low", + power); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n", +@@ -577,7 +577,7 @@ static int wm831x_power_probe(struct platform_device *pdev) + + irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC")); + ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq, +- IRQF_TRIGGER_RISING, "Power source", ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "Power source", + power); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n", +@@ -590,7 +590,7 @@ static int wm831x_power_probe(struct platform_device *pdev) + platform_get_irq_byname(pdev, + wm831x_bat_irqs[i])); + ret = request_threaded_irq(irq, NULL, wm831x_bat_irq, +- IRQF_TRIGGER_RISING, ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, + wm831x_bat_irqs[i], + power); + if (ret != 0) { +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c +index a2597e683e79..6a64e86e8ccd 100644 +--- a/drivers/s390/block/dasd_alias.c ++++ b/drivers/s390/block/dasd_alias.c +@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) + spin_unlock_irqrestore(&lcu->lock, flags); + cancel_work_sync(&lcu->suc_data.worker); + spin_lock_irqsave(&lcu->lock, flags); +- if (device == lcu->suc_data.device) ++ if (device == lcu->suc_data.device) { ++ dasd_put_device(device); + lcu->suc_data.device = NULL; ++ } + } + was_pending = 0; + if (device == lcu->ruac_data.device) { +@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) + was_pending = 1; + cancel_delayed_work_sync(&lcu->ruac_data.dwork); + spin_lock_irqsave(&lcu->lock, flags); +- if (device == lcu->ruac_data.device) ++ if (device == lcu->ruac_data.device) { ++ dasd_put_device(device); + lcu->ruac_data.device = NULL; ++ } + } + private->lcu = NULL; + spin_unlock_irqrestore(&lcu->lock, flags); +@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work) + if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { + DBF_DEV_EVENT(DBF_WARNING, device, "could not update" + " alias data in lcu (rc = %d), retry later", rc); +- schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ)) ++ dasd_put_device(device); + } else { ++ dasd_put_device(device); + lcu->ruac_data.device = NULL; + lcu->flags &= ~UPDATE_PENDING; + } +@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu, + */ + if (!usedev) + return -EINVAL; ++ dasd_get_device(usedev); + lcu->ruac_data.device = usedev; +- schedule_delayed_work(&lcu->ruac_data.dwork, 0); ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0)) ++ dasd_put_device(usedev); + return 0; + } + +@@ -722,7 +730,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu, + ASCEBC((char *) &cqr->magic, 4); + ccw = cqr->cpaddr; + ccw->cmd_code = DASD_ECKD_CCW_RSCK; +- ccw->flags = 0 ; ++ ccw->flags = CCW_FLAG_SLI; + ccw->count = 16; + ccw->cda = (__u32)(addr_t) cqr->data; + ((char *)cqr->data)[0] = reason; +@@ -926,6 +934,7 @@ static void summary_unit_check_handling_work(struct work_struct *work) + /* 3. read new alias configuration */ + _schedule_lcu_update(lcu, device); + lcu->suc_data.device = NULL; ++ dasd_put_device(device); + spin_unlock_irqrestore(&lcu->lock, flags); + } + +@@ -985,6 +994,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device, + } + lcu->suc_data.reason = reason; + lcu->suc_data.device = device; ++ dasd_get_device(device); + spin_unlock(&lcu->lock); +- schedule_work(&lcu->suc_data.worker); ++ if (!schedule_work(&lcu->suc_data.worker)) ++ dasd_put_device(device); + }; +diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h +index b52121358385..280e769a1686 100644 +--- a/drivers/scsi/megaraid/megaraid_sas.h ++++ b/drivers/scsi/megaraid/megaraid_sas.h +@@ -300,6 +300,8 @@ enum MR_EVT_ARGS { + MR_EVT_ARGS_GENERIC, + }; + ++ ++#define SGE_BUFFER_SIZE 4096 + /* + * define constants for device list query options + */ +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c +index 78b4fe845245..e6dfa8108301 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c +@@ -3602,7 +3602,7 @@ static int megasas_init_fw(struct megasas_instance *instance) + } + + instance->max_sectors_per_req = instance->max_num_sge * +- PAGE_SIZE / 512; ++ SGE_BUFFER_SIZE / 512; + if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) + instance->max_sectors_per_req = tmp_sectors; + +@@ -5051,6 +5051,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) + int i; + int error = 0; + compat_uptr_t ptr; ++ unsigned long local_raw_ptr; ++ u32 local_sense_off; ++ u32 local_sense_len; + + if (clear_user(ioc, sizeof(*ioc))) + return -EFAULT; +@@ -5068,9 +5071,15 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) + * sense_len is not null, so prepare the 64bit value under + * the same condition. + */ +- if (ioc->sense_len) { ++ if (get_user(local_raw_ptr, ioc->frame.raw) || ++ get_user(local_sense_off, &ioc->sense_off) || ++ get_user(local_sense_len, &ioc->sense_len)) ++ return -EFAULT; ++ ++ ++ if (local_sense_len) { + void __user **sense_ioc_ptr = +- (void __user **)(ioc->frame.raw + ioc->sense_off); ++ (void __user **)((u8*)local_raw_ptr + local_sense_off); + compat_uptr_t *sense_cioc_ptr = + (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off); + if (get_user(ptr, sense_cioc_ptr) || +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c +index eba183c428cf..3643bbf5456d 100644 +--- a/drivers/scsi/ses.c ++++ b/drivers/scsi/ses.c +@@ -70,6 +70,7 @@ static int ses_probe(struct device *dev) + static int ses_recv_diag(struct scsi_device *sdev, int page_code, + void *buf, int bufflen) + { ++ int ret; + unsigned char cmd[] = { + RECEIVE_DIAGNOSTIC, + 1, /* Set PCV bit */ +@@ -78,9 +79,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code, + bufflen & 0xff, + 0 + }; ++ unsigned char recv_page_code; + +- return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, ++ ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, + NULL, SES_TIMEOUT, SES_RETRIES, NULL); ++ if (unlikely(!ret)) ++ return ret; ++ ++ recv_page_code = ((unsigned char *)buf)[0]; ++ ++ if (likely(recv_page_code == page_code)) ++ return ret; ++ ++ /* successful diagnostic but wrong page code. This happens to some ++ * USB devices, just print a message and pretend there was an error */ ++ ++ sdev_printk(KERN_ERR, sdev, ++ "Wrong diagnostic page; asked for %d got %u\n", ++ page_code, recv_page_code); ++ ++ return -EINVAL; + } + + static int ses_send_diag(struct scsi_device *sdev, int page_code, +@@ -436,7 +454,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, + if (desc_ptr) + desc_ptr += len; + +- if (addl_desc_ptr) ++ if (addl_desc_ptr && ++ /* only find additional descriptions for specific devices */ ++ (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || ++ type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE || ++ type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER || ++ /* these elements are optional */ ++ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT || ++ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT || ++ type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) + addl_desc_ptr += addl_desc_ptr[1] + 2; + + } +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index d2ea64de92df..d6dab8adf60e 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1726,6 +1726,11 @@ static const struct usb_device_id acm_ids[] = { + }, + #endif + ++ /*Samsung phone in firmware update mode */ ++ { USB_DEVICE(0x04e8, 0x685d), ++ .driver_info = IGNORE_DEVICE, ++ }, ++ + /* Exclude Infineon Flash Loader utility */ + { USB_DEVICE(0x058b, 0x0041), + .driver_info = IGNORE_DEVICE, +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 89ba7cfba5bc..303f3b3fb65f 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ ++ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ ++ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 81f6a572f016..9bab34cf01d4 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb); + #define TOSHIBA_PRODUCT_G450 0x0d45 + + #define ALINK_VENDOR_ID 0x1e0e ++#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */ + #define ALINK_PRODUCT_PH300 0x9100 + #define ALINK_PRODUCT_3GU 0x9200 + +@@ -615,6 +616,10 @@ static const struct option_blacklist_info zte_1255_blacklist = { + .reserved = BIT(3) | BIT(4), + }; + ++static const struct option_blacklist_info simcom_sim7100e_blacklist = { ++ .reserved = BIT(5) | BIT(6), ++}; ++ + static const struct option_blacklist_info telit_le910_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(1) | BIT(2), +@@ -1130,6 +1135,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ ++ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */ ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ +@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, + { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, + { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), ++ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), + .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist + }, +diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c +index ee59b74768d9..beaa7cc4e857 100644 +--- a/drivers/virtio/virtio.c ++++ b/drivers/virtio/virtio.c +@@ -238,6 +238,7 @@ static int virtio_init(void) + static void __exit virtio_exit(void) + { + bus_unregister(&virtio_bus); ++ ida_destroy(&virtio_index_ida); + } + core_initcall(virtio_init); + module_exit(virtio_exit); +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 7360f03ddbe1..9612a01198df 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2437,6 +2437,7 @@ int open_ctree(struct super_block *sb, + "unsupported option features (%Lx).\n", + (unsigned long long)features); + err = -EINVAL; ++ brelse(bh); + goto fail_alloc; + } + +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index ae29b403a7e2..b5d13c4eea00 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -7477,15 +7477,28 @@ int btrfs_readpage(struct file *file, struct page *page) + static int btrfs_writepage(struct page *page, struct writeback_control *wbc) + { + struct extent_io_tree *tree; +- ++ struct inode *inode = page->mapping->host; ++ int ret; + + if (current->flags & PF_MEMALLOC) { + redirty_page_for_writepage(wbc, page); + unlock_page(page); + return 0; + } ++ ++ /* ++ * If we are under memory pressure we will call this directly from the ++ * VM, we need to make sure we have the inode referenced for the ordered ++ * extent. If not just return like we didn't do anything. ++ */ ++ if (!igrab(inode)) { ++ redirty_page_for_writepage(wbc, page); ++ return AOP_WRITEPAGE_ACTIVATE; ++ } + tree = &BTRFS_I(page->mapping->host)->io_tree; +- return extent_write_full_page(tree, page, btrfs_get_extent, wbc); ++ ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc); ++ btrfs_add_delayed_iput(inode); ++ return ret; + } + + static int btrfs_writepages(struct address_space *mapping, +@@ -8474,9 +8487,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, + /* + * 2 items for inode item and ref + * 2 items for dir items ++ * 1 item for updating parent inode item ++ * 1 item for the inline extent item + * 1 item for xattr if selinux is on + */ +- trans = btrfs_start_transaction(root, 5); ++ trans = btrfs_start_transaction(root, 7); + if (IS_ERR(trans)) + return PTR_ERR(trans); + +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index 414c1b9eb896..3104e0eec816 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -1338,7 +1338,21 @@ static int read_symlink(struct send_ctx *sctx, + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; +- BUG_ON(ret); ++ if (ret) { ++ /* ++ * An empty symlink inode. Can happen in rare error paths when ++ * creating a symlink (transaction committed before the inode ++ * eviction handler removed the symlink inode items and a crash ++ * happened in between or the subvol was snapshoted in between). ++ * Print an informative message to dmesg/syslog so that the user ++ * can delete the symlink. ++ */ ++ btrfs_err(root->fs_info, ++ "Found empty symlink inode %llu at root %llu", ++ ino, root->root_key.objectid); ++ ret = -EIO; ++ goto out; ++ } + + ei = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_file_extent_item); +diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c +index 32f35f187989..b58a9cbb9695 100644 +--- a/fs/hostfs/hostfs_kern.c ++++ b/fs/hostfs/hostfs_kern.c +@@ -720,15 +720,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, + + init_special_inode(inode, mode, dev); + err = do_mknod(name, mode, MAJOR(dev), MINOR(dev)); +- if (!err) ++ if (err) + goto out_free; + + err = read_name(inode, name); + __putname(name); + if (err) + goto out_put; +- if (err) +- goto out_put; + + d_instantiate(dentry, inode); + return 0; +diff --git a/fs/lockd/host.c b/fs/lockd/host.c +index 969d589c848d..b5f3c3ab0d5f 100644 +--- a/fs/lockd/host.c ++++ b/fs/lockd/host.c +@@ -116,7 +116,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni, + atomic_inc(&nsm->sm_count); + else { + host = NULL; +- nsm = nsm_get_handle(ni->sap, ni->salen, ++ nsm = nsm_get_handle(ni->net, ni->sap, ni->salen, + ni->hostname, ni->hostname_len); + if (unlikely(nsm == NULL)) { + dprintk("lockd: %s failed; no nsm handle\n", +@@ -534,17 +534,18 @@ static struct nlm_host *next_host_state(struct hlist_head *cache, + + /** + * nlm_host_rebooted - Release all resources held by rebooted host ++ * @net: network namespace + * @info: pointer to decoded results of NLM_SM_NOTIFY call + * + * We were notified that the specified host has rebooted. Release + * all resources held by that peer. + */ +-void nlm_host_rebooted(const struct nlm_reboot *info) ++void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info) + { + struct nsm_handle *nsm; + struct nlm_host *host; + +- nsm = nsm_reboot_lookup(info); ++ nsm = nsm_reboot_lookup(net, info); + if (unlikely(nsm == NULL)) + return; + +diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c +index 6ae664b489af..13fac49aff7f 100644 +--- a/fs/lockd/mon.c ++++ b/fs/lockd/mon.c +@@ -51,7 +51,6 @@ struct nsm_res { + }; + + static const struct rpc_program nsm_program; +-static LIST_HEAD(nsm_handles); + static DEFINE_SPINLOCK(nsm_lock); + + /* +@@ -259,33 +258,35 @@ void nsm_unmonitor(const struct nlm_host *host) + } + } + +-static struct nsm_handle *nsm_lookup_hostname(const char *hostname, +- const size_t len) ++static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles, ++ const char *hostname, const size_t len) + { + struct nsm_handle *nsm; + +- list_for_each_entry(nsm, &nsm_handles, sm_link) ++ list_for_each_entry(nsm, nsm_handles, sm_link) + if (strlen(nsm->sm_name) == len && + memcmp(nsm->sm_name, hostname, len) == 0) + return nsm; + return NULL; + } + +-static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap) ++static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles, ++ const struct sockaddr *sap) + { + struct nsm_handle *nsm; + +- list_for_each_entry(nsm, &nsm_handles, sm_link) ++ list_for_each_entry(nsm, nsm_handles, sm_link) + if (rpc_cmp_addr(nsm_addr(nsm), sap)) + return nsm; + return NULL; + } + +-static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv) ++static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles, ++ const struct nsm_private *priv) + { + struct nsm_handle *nsm; + +- list_for_each_entry(nsm, &nsm_handles, sm_link) ++ list_for_each_entry(nsm, nsm_handles, sm_link) + if (memcmp(nsm->sm_priv.data, priv->data, + sizeof(priv->data)) == 0) + return nsm; +@@ -350,6 +351,7 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap, + + /** + * nsm_get_handle - Find or create a cached nsm_handle ++ * @net: network namespace + * @sap: pointer to socket address of handle to find + * @salen: length of socket address + * @hostname: pointer to C string containing hostname to find +@@ -362,11 +364,13 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap, + * @hostname cannot be found in the handle cache. Returns NULL if + * an error occurs. + */ +-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, ++struct nsm_handle *nsm_get_handle(const struct net *net, ++ const struct sockaddr *sap, + const size_t salen, const char *hostname, + const size_t hostname_len) + { + struct nsm_handle *cached, *new = NULL; ++ struct lockd_net *ln = net_generic(net, lockd_net_id); + + if (hostname && memchr(hostname, '/', hostname_len) != NULL) { + if (printk_ratelimit()) { +@@ -381,9 +385,10 @@ retry: + spin_lock(&nsm_lock); + + if (nsm_use_hostnames && hostname != NULL) +- cached = nsm_lookup_hostname(hostname, hostname_len); ++ cached = nsm_lookup_hostname(&ln->nsm_handles, ++ hostname, hostname_len); + else +- cached = nsm_lookup_addr(sap); ++ cached = nsm_lookup_addr(&ln->nsm_handles, sap); + + if (cached != NULL) { + atomic_inc(&cached->sm_count); +@@ -397,7 +402,7 @@ retry: + } + + if (new != NULL) { +- list_add(&new->sm_link, &nsm_handles); ++ list_add(&new->sm_link, &ln->nsm_handles); + spin_unlock(&nsm_lock); + dprintk("lockd: created nsm_handle for %s (%s)\n", + new->sm_name, new->sm_addrbuf); +@@ -414,19 +419,22 @@ retry: + + /** + * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle ++ * @net: network namespace + * @info: pointer to NLMPROC_SM_NOTIFY arguments + * + * Returns a matching nsm_handle if found in the nsm cache. The returned + * nsm_handle's reference count is bumped. Otherwise returns NULL if some + * error occurred. + */ +-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info) ++struct nsm_handle *nsm_reboot_lookup(const struct net *net, ++ const struct nlm_reboot *info) + { + struct nsm_handle *cached; ++ struct lockd_net *ln = net_generic(net, lockd_net_id); + + spin_lock(&nsm_lock); + +- cached = nsm_lookup_priv(&info->priv); ++ cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv); + if (unlikely(cached == NULL)) { + spin_unlock(&nsm_lock); + dprintk("lockd: never saw rebooted peer '%.*s' before\n", +diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h +index 5010b55628b4..414da99744e9 100644 +--- a/fs/lockd/netns.h ++++ b/fs/lockd/netns.h +@@ -16,6 +16,7 @@ struct lockd_net { + spinlock_t nsm_clnt_lock; + unsigned int nsm_users; + struct rpc_clnt *nsm_clnt; ++ struct list_head nsm_handles; + }; + + extern int lockd_net_id; +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c +index 7a318480ab7a..ce05c60ff06d 100644 +--- a/fs/lockd/svc.c ++++ b/fs/lockd/svc.c +@@ -583,6 +583,7 @@ static int lockd_init_net(struct net *net) + INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender); + INIT_LIST_HEAD(&ln->grace_list); + spin_lock_init(&ln->nsm_clnt_lock); ++ INIT_LIST_HEAD(&ln->nsm_handles); + return 0; + } + +diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c +index b147d1ae71fd..09c576f26c7b 100644 +--- a/fs/lockd/svc4proc.c ++++ b/fs/lockd/svc4proc.c +@@ -421,7 +421,7 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, + return rpc_system_err; + } + +- nlm_host_rebooted(argp); ++ nlm_host_rebooted(SVC_NET(rqstp), argp); + return rpc_success; + } + +diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c +index 21171f0c6477..fb26b9f522e7 100644 +--- a/fs/lockd/svcproc.c ++++ b/fs/lockd/svcproc.c +@@ -464,7 +464,7 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, + return rpc_system_err; + } + +- nlm_host_rebooted(argp); ++ nlm_host_rebooted(SVC_NET(rqstp), argp); + return rpc_success; + } + +diff --git a/fs/namei.c b/fs/namei.c +index 157c3dbacf6c..c87e15ee9255 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -2917,6 +2917,10 @@ opened: + goto exit_fput; + } + out: ++ if (unlikely(error > 0)) { ++ WARN_ON(1); ++ error = -EINVAL; ++ } + if (got_write) + mnt_drop_write(nd->path.mnt); + path_put(&save_parent); +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index cfa9163b3bb7..2bdaf57c82d0 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -1452,7 +1452,7 @@ restart: + } + spin_unlock(&state->state_lock); + nfs4_put_open_state(state); +- clear_bit(NFS4CLNT_RECLAIM_NOGRACE, ++ clear_bit(NFS_STATE_RECLAIM_NOGRACE, + &state->flags); + spin_lock(&sp->so_lock); + goto restart; +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index 9f285fb9bab3..b86db1236c7c 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -170,7 +170,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) + if (!priv->task) + return ERR_PTR(-ESRCH); + +- mm = mm_access(priv->task, PTRACE_MODE_READ); ++ mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS); + if (!mm || IS_ERR(mm)) + return mm; + down_read(&mm->mmap_sem); +@@ -1044,7 +1044,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, + if (!pm.buffer) + goto out_task; + +- mm = mm_access(task, PTRACE_MODE_READ); ++ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); + ret = PTR_ERR(mm); + if (!mm || IS_ERR(mm)) + goto out_free; +diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c +index 56123a6f462e..123c19890b14 100644 +--- a/fs/proc/task_nommu.c ++++ b/fs/proc/task_nommu.c +@@ -223,7 +223,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) + if (!priv->task) + return ERR_PTR(-ESRCH); + +- mm = mm_access(priv->task, PTRACE_MODE_READ); ++ mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS); + if (!mm || IS_ERR(mm)) { + put_task_struct(priv->task); + priv->task = NULL; +diff --git a/fs/splice.c b/fs/splice.c +index f183f1342c01..3b94a6bba29f 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -949,6 +949,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, + + splice_from_pipe_begin(sd); + do { ++ cond_resched(); + ret = splice_from_pipe_next(pipe, sd); + if (ret > 0) + ret = splice_from_pipe_feed(pipe, sd, actor); +@@ -1189,7 +1190,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, + long ret, bytes; + umode_t i_mode; + size_t len; +- int i, flags; ++ int i, flags, more; + + /* + * We require the input being a regular file, as we don't want to +@@ -1232,6 +1233,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, + * Don't block on output, we have to drain the direct pipe. + */ + sd->flags &= ~SPLICE_F_NONBLOCK; ++ more = sd->flags & SPLICE_F_MORE; + + while (len) { + size_t read_len; +@@ -1245,6 +1247,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, + sd->total_len = read_len; + + /* ++ * If more data is pending, set SPLICE_F_MORE ++ * If this is the last data and SPLICE_F_MORE was not set ++ * initially, clears it. ++ */ ++ if (read_len < len) ++ sd->flags |= SPLICE_F_MORE; ++ else if (!more) ++ sd->flags &= ~SPLICE_F_MORE; ++ /* + * NOTE: nonblocking mode only applies to the input. We + * must not do the output in nonblocking mode as then we + * could get stuck data in the internal pipe: +diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h +index 9a33c5f7e126..f6c229e2bffa 100644 +--- a/include/linux/enclosure.h ++++ b/include/linux/enclosure.h +@@ -29,7 +29,11 @@ + /* A few generic types ... taken from ses-2 */ + enum enclosure_component_type { + ENCLOSURE_COMPONENT_DEVICE = 0x01, ++ ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07, ++ ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14, ++ ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15, + ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, ++ ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18, + }; + + /* ses-2 common element status */ +diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h +index dcaad79f54ed..0adf073f13b3 100644 +--- a/include/linux/lockd/lockd.h ++++ b/include/linux/lockd/lockd.h +@@ -236,7 +236,8 @@ void nlm_rebind_host(struct nlm_host *); + struct nlm_host * nlm_get_host(struct nlm_host *); + void nlm_shutdown_hosts(void); + void nlm_shutdown_hosts_net(struct net *net); +-void nlm_host_rebooted(const struct nlm_reboot *); ++void nlm_host_rebooted(const struct net *net, ++ const struct nlm_reboot *); + + /* + * Host monitoring +@@ -244,11 +245,13 @@ void nlm_host_rebooted(const struct nlm_reboot *); + int nsm_monitor(const struct nlm_host *host); + void nsm_unmonitor(const struct nlm_host *host); + +-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, ++struct nsm_handle *nsm_get_handle(const struct net *net, ++ const struct sockaddr *sap, + const size_t salen, + const char *hostname, + const size_t hostname_len); +-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info); ++struct nsm_handle *nsm_reboot_lookup(const struct net *net, ++ const struct nlm_reboot *info); + void nsm_release(struct nsm_handle *nsm); + + /* +diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h +index fc01d5cb4cf1..7d2021d3ee08 100644 +--- a/include/linux/nfs_fs.h ++++ b/include/linux/nfs_fs.h +@@ -578,9 +578,7 @@ static inline void nfs3_forget_cached_acls(struct inode *inode) + + static inline loff_t nfs_size_to_loff_t(__u64 size) + { +- if (size > (__u64) OFFSET_MAX - 1) +- return OFFSET_MAX - 1; +- return (loff_t) size; ++ return min_t(u64, size, OFFSET_MAX); + } + + static inline ino_t +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h +index ba605015c4d8..0b2d0cbe0bab 100644 +--- a/include/linux/tracepoint.h ++++ b/include/linux/tracepoint.h +@@ -14,8 +14,11 @@ + * See the file COPYING for more details. + */ + ++#include + #include + #include ++#include ++#include + #include + #include + +@@ -126,6 +129,9 @@ static inline void tracepoint_synchronize_unregister(void) + void *it_func; \ + void *__data; \ + \ ++ if (!cpu_online(raw_smp_processor_id())) \ ++ return; \ ++ \ + if (!(cond)) \ + return; \ + prercu; \ +diff --git a/include/net/af_unix.h b/include/net/af_unix.h +index 686760024572..6278e4d32612 100644 +--- a/include/net/af_unix.h ++++ b/include/net/af_unix.h +@@ -6,8 +6,8 @@ + #include + #include + +-extern void unix_inflight(struct file *fp); +-extern void unix_notinflight(struct file *fp); ++extern void unix_inflight(struct user_struct *user, struct file *fp); ++extern void unix_notinflight(struct user_struct *user, struct file *fp); + extern void unix_gc(void); + extern void wait_for_unix_gc(void); + extern struct sock *unix_get_socket(struct file *filp); +diff --git a/include/net/scm.h b/include/net/scm.h +index 8de2d37d2077..d00cd43a990c 100644 +--- a/include/net/scm.h ++++ b/include/net/scm.h +@@ -21,6 +21,7 @@ struct scm_creds { + struct scm_fp_list { + short count; + short max; ++ struct user_struct *user; + struct file *fp[SCM_MAX_FD]; + }; + +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index a79d267b64ec..7b0d31b67f6a 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -1229,6 +1229,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) + if (!desc) + return NULL; + ++ chip_bus_lock(desc); + raw_spin_lock_irqsave(&desc->lock, flags); + + /* +@@ -1242,7 +1243,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) + if (!action) { + WARN(1, "Trying to free already-free IRQ %d\n", irq); + raw_spin_unlock_irqrestore(&desc->lock, flags); +- ++ chip_bus_sync_unlock(desc); + return NULL; + } + +@@ -1265,6 +1266,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) + #endif + + raw_spin_unlock_irqrestore(&desc->lock, flags); ++ chip_bus_sync_unlock(desc); + + unregister_handler_proc(irq, action); + +@@ -1338,9 +1340,7 @@ void free_irq(unsigned int irq, void *dev_id) + desc->affinity_notify = NULL; + #endif + +- chip_bus_lock(desc); + kfree(__free_irq(irq, dev_id)); +- chip_bus_sync_unlock(desc); + } + EXPORT_SYMBOL(free_irq); + +diff --git a/kernel/resource.c b/kernel/resource.c +index d7386986e10e..b8422b135b68 100644 +--- a/kernel/resource.c ++++ b/kernel/resource.c +@@ -961,9 +961,10 @@ struct resource * __request_region(struct resource *parent, + if (!conflict) + break; + if (conflict != parent) { +- parent = conflict; +- if (!(conflict->flags & IORESOURCE_BUSY)) ++ if (!(conflict->flags & IORESOURCE_BUSY)) { ++ parent = conflict; + continue; ++ } + } + if (conflict->flags & flags & IORESOURCE_MUXED) { + add_wait_queue(&muxed_resource_wait, &wait); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index c771f2547bef..f234c84d36c8 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1587,7 +1587,6 @@ out: + */ + int wake_up_process(struct task_struct *p) + { +- WARN_ON(task_is_stopped_or_traced(p)); + return try_to_wake_up(p, TASK_NORMAL, 0); + } + EXPORT_SYMBOL(wake_up_process); +diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c +index ce033c7aa2e8..9cff0ab82b63 100644 +--- a/kernel/time/posix-clock.c ++++ b/kernel/time/posix-clock.c +@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf, + static unsigned int posix_clock_poll(struct file *fp, poll_table *wait) + { + struct posix_clock *clk = get_posix_clock(fp); +- int result = 0; ++ unsigned int result = 0; + + if (!clk) +- return -ENODEV; ++ return POLLERR; + + if (clk->ops.poll) + result = clk->ops.poll(clk, fp, wait); +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index ab21b8c66535..cb73c4e0741e 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -1948,12 +1948,6 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) + goto again; + } + +-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) +-{ +- cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; +- cpu_buffer->reader_page->read = 0; +-} +- + static void rb_inc_iter(struct ring_buffer_iter *iter) + { + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; +@@ -3591,7 +3585,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) + + /* Finally update the reader page to the new head */ + cpu_buffer->reader_page = reader; +- rb_reset_reader_page(cpu_buffer); ++ cpu_buffer->reader_page->read = 0; + + if (overwrite != cpu_buffer->last_overrun) { + cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; +@@ -3601,6 +3595,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) + goto again; + + out: ++ /* Update the read_stamp on the first event */ ++ if (reader && reader->read == 0) ++ cpu_buffer->read_stamp = reader->page->time_stamp; ++ + arch_spin_unlock(&cpu_buffer->lock); + local_irq_restore(flags); + +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index 5a898f15bfc6..7d054b7671ec 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -602,7 +602,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) + * The ftrace subsystem is for showing formats only. + * They can not be enabled or disabled via the event files. + */ +- if (call->class && call->class->reg) ++ if (call->class && call->class->reg && ++ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) + return file; + } + +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index fa927fd5778d..fe7c4b91d2e7 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -1450,13 +1450,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, + timer_stats_timer_set_start_info(&dwork->timer); + + dwork->wq = wq; +- /* timer isn't guaranteed to run in this cpu, record earlier */ +- if (cpu == WORK_CPU_UNBOUND) +- cpu = raw_smp_processor_id(); + dwork->cpu = cpu; + timer->expires = jiffies + delay; + +- add_timer_on(timer, cpu); ++ if (unlikely(cpu != WORK_CPU_UNBOUND)) ++ add_timer_on(timer, cpu); ++ else ++ add_timer(timer); + } + + /** +diff --git a/lib/devres.c b/lib/devres.c +index 823533138fa0..20afaf181b27 100644 +--- a/lib/devres.c ++++ b/lib/devres.c +@@ -423,7 +423,7 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) + if (!iomap) + return; + +- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { ++ for (i = 0; i < PCIM_IOMAP_MAX; i++) { + if (!(mask & (1 << i))) + continue; + +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c +index e3bea2e0821a..025ced8fbb57 100644 +--- a/net/ceph/messenger.c ++++ b/net/ceph/messenger.c +@@ -2277,7 +2277,7 @@ static int read_partial_message(struct ceph_connection *con) + con->in_base_pos = -front_len - middle_len - data_len - + sizeof(m->footer); + con->in_tag = CEPH_MSGR_TAG_READY; +- return 0; ++ return 1; + } else if ((s64)seq - (s64)con->in_seq > 1) { + pr_err("read_partial_message bad seq %lld expected %lld\n", + seq, con->in_seq + 1); +@@ -2310,7 +2310,7 @@ static int read_partial_message(struct ceph_connection *con) + sizeof(m->footer); + con->in_tag = CEPH_MSGR_TAG_READY; + con->in_seq++; +- return 0; ++ return 1; + } + + BUG_ON(!con->in_msg); +diff --git a/net/core/scm.c b/net/core/scm.c +index dbc6bfcdf446..7a6cf8351cde 100644 +--- a/net/core/scm.c ++++ b/net/core/scm.c +@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) + *fplp = fpl; + fpl->count = 0; + fpl->max = SCM_MAX_FD; ++ fpl->user = NULL; + } + fpp = &fpl->fp[fpl->count]; + +@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) + *fpp++ = file; + fpl->count++; + } ++ ++ if (!fpl->user) ++ fpl->user = get_uid(current_user()); ++ + return num; + } + +@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm) + scm->fp = NULL; + for (i=fpl->count-1; i>=0; i--) + fput(fpl->fp[i]); ++ free_uid(fpl->user); + kfree(fpl); + } + } +@@ -337,6 +343,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) + for (i = 0; i < fpl->count; i++) + get_file(fpl->fp[i]); + new_fpl->max = new_fpl->count; ++ new_fpl->user = get_uid(fpl->user); + } + return new_fpl; + } +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c +index 89aacfd2756d..9ba6d8c7c793 100644 +--- a/net/mac80211/mesh_pathtbl.c ++++ b/net/mac80211/mesh_pathtbl.c +@@ -747,10 +747,8 @@ void mesh_plink_broken(struct sta_info *sta) + static void mesh_path_node_reclaim(struct rcu_head *rp) + { + struct mpath_node *node = container_of(rp, struct mpath_node, rcu); +- struct ieee80211_sub_if_data *sdata = node->mpath->sdata; + + del_timer_sync(&node->mpath->timer); +- atomic_dec(&sdata->u.mesh.mpaths); + kfree(node->mpath); + kfree(node); + } +@@ -758,8 +756,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) + /* needs to be called with the corresponding hashwlock taken */ + static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) + { +- struct mesh_path *mpath; +- mpath = node->mpath; ++ struct mesh_path *mpath = node->mpath; ++ struct ieee80211_sub_if_data *sdata = node->mpath->sdata; ++ + spin_lock(&mpath->state_lock); + mpath->flags |= MESH_PATH_RESOLVING; + if (mpath->is_gate) +@@ -767,6 +766,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) + hlist_del_rcu(&node->list); + call_rcu(&node->rcu, mesh_path_node_reclaim); + spin_unlock(&mpath->state_lock); ++ atomic_dec(&sdata->u.mesh.mpaths); + atomic_dec(&tbl->entries); + } + +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index 0283baedcdfb..9dc979abb461 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -311,6 +311,21 @@ static void death_by_timeout(unsigned long ul_conntrack) + nf_ct_put(ct); + } + ++static inline bool ++nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, ++ const struct nf_conntrack_tuple *tuple, ++ u16 zone) ++{ ++ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); ++ ++ /* A conntrack can be recreated with the equal tuple, ++ * so we need to check that the conntrack is confirmed ++ */ ++ return nf_ct_tuple_equal(tuple, &h->tuple) && ++ nf_ct_zone(ct) == zone && ++ nf_ct_is_confirmed(ct); ++} ++ + /* + * Warning : + * - Caller must take a reference on returned object +@@ -332,8 +347,7 @@ ____nf_conntrack_find(struct net *net, u16 zone, + local_bh_disable(); + begin: + hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { +- if (nf_ct_tuple_equal(tuple, &h->tuple) && +- nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { ++ if (nf_ct_key_equal(h, tuple, zone)) { + NF_CT_STAT_INC(net, found); + local_bh_enable(); + return h; +@@ -380,8 +394,7 @@ begin: + !atomic_inc_not_zero(&ct->ct_general.use))) + h = NULL; + else { +- if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || +- nf_ct_zone(ct) != zone)) { ++ if (unlikely(!nf_ct_key_equal(h, tuple, zone))) { + nf_ct_put(ct); + goto begin; + } +diff --git a/net/rds/connection.c b/net/rds/connection.c +index e88bf3976e54..642ad42c416b 100644 +--- a/net/rds/connection.c ++++ b/net/rds/connection.c +@@ -177,12 +177,6 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, + } + } + +- if (trans == NULL) { +- kmem_cache_free(rds_conn_slab, conn); +- conn = ERR_PTR(-ENODEV); +- goto out; +- } +- + conn->c_trans = trans; + + ret = trans->conn_alloc(conn, gfp); +diff --git a/net/rds/send.c b/net/rds/send.c +index 88eace57dd6b..31c9fa464b11 100644 +--- a/net/rds/send.c ++++ b/net/rds/send.c +@@ -955,11 +955,13 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, + release_sock(sk); + } + +- /* racing with another thread binding seems ok here */ ++ lock_sock(sk); + if (daddr == 0 || rs->rs_bound_addr == 0) { ++ release_sock(sk); + ret = -ENOTCONN; /* XXX not a great errno */ + goto out; + } ++ release_sock(sk); + + /* size of rm including all sgs */ + ret = rds_rm_size(msg, payload_len); +diff --git a/net/rfkill/core.c b/net/rfkill/core.c +index 1cec5e4f3a5e..6563cc04c578 100644 +--- a/net/rfkill/core.c ++++ b/net/rfkill/core.c +@@ -51,7 +51,6 @@ + struct rfkill { + spinlock_t lock; + +- const char *name; + enum rfkill_type type; + + unsigned long state; +@@ -75,6 +74,7 @@ struct rfkill { + struct delayed_work poll_work; + struct work_struct uevent_work; + struct work_struct sync_work; ++ char name[]; + }; + #define to_rfkill(d) container_of(d, struct rfkill, dev) + +@@ -871,14 +871,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name, + if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) + return NULL; + +- rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); ++ rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL); + if (!rfkill) + return NULL; + + spin_lock_init(&rfkill->lock); + INIT_LIST_HEAD(&rfkill->node); + rfkill->type = type; +- rfkill->name = name; ++ strcpy(rfkill->name, name); + rfkill->ops = ops; + rfkill->data = ops_data; + +@@ -1088,17 +1088,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) + return res; + } + +-static bool rfkill_readable(struct rfkill_data *data) +-{ +- bool r; +- +- mutex_lock(&data->mtx); +- r = !list_empty(&data->events); +- mutex_unlock(&data->mtx); +- +- return r; +-} +- + static ssize_t rfkill_fop_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) + { +@@ -1115,8 +1104,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf, + goto out; + } + mutex_unlock(&data->mtx); ++ /* since we re-check and it just compares pointers, ++ * using !list_empty() without locking isn't a problem ++ */ + ret = wait_event_interruptible(data->read_wait, +- rfkill_readable(data)); ++ !list_empty(&data->events)); + mutex_lock(&data->mtx); + + if (ret) +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c +index 231b71944c52..a4266b9b2429 100644 +--- a/net/sunrpc/cache.c ++++ b/net/sunrpc/cache.c +@@ -1221,7 +1221,7 @@ int qword_get(char **bpp, char *dest, int bufsize) + if (bp[0] == '\\' && bp[1] == 'x') { + /* HEX STRING */ + bp += 2; +- while (len < bufsize) { ++ while (len < bufsize - 1) { + int h, l; + + h = hex_to_bin(bp[0]); +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index a673c1f4f638..8f118c7c19e1 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -1466,7 +1466,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) + UNIXCB(skb).fp = NULL; + + for (i = scm->fp->count-1; i >= 0; i--) +- unix_notinflight(scm->fp->fp[i]); ++ unix_notinflight(scm->fp->user, scm->fp->fp[i]); + } + + static void unix_destruct_scm(struct sk_buff *skb) +@@ -1531,7 +1531,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) + return -ENOMEM; + + for (i = scm->fp->count - 1; i >= 0; i--) +- unix_inflight(scm->fp->fp[i]); ++ unix_inflight(scm->fp->user, scm->fp->fp[i]); + return max_level; + } + +diff --git a/net/unix/garbage.c b/net/unix/garbage.c +index 06730fe6ad9d..a72182d6750f 100644 +--- a/net/unix/garbage.c ++++ b/net/unix/garbage.c +@@ -122,7 +122,7 @@ struct sock *unix_get_socket(struct file *filp) + * descriptor if it is for an AF_UNIX socket. + */ + +-void unix_inflight(struct file *fp) ++void unix_inflight(struct user_struct *user, struct file *fp) + { + struct sock *s = unix_get_socket(fp); + +@@ -139,11 +139,11 @@ void unix_inflight(struct file *fp) + } + unix_tot_inflight++; + } +- fp->f_cred->user->unix_inflight++; ++ user->unix_inflight++; + spin_unlock(&unix_gc_lock); + } + +-void unix_notinflight(struct file *fp) ++void unix_notinflight(struct user_struct *user, struct file *fp) + { + struct sock *s = unix_get_socket(fp); + +@@ -157,7 +157,7 @@ void unix_notinflight(struct file *fp) + list_del_init(&u->link); + unix_tot_inflight--; + } +- fp->f_cred->user->unix_inflight--; ++ user->unix_inflight--; + spin_unlock(&unix_gc_lock); + } + +diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c +index 9c22317778eb..ee625e3a56ba 100644 +--- a/scripts/recordmcount.c ++++ b/scripts/recordmcount.c +@@ -189,6 +189,20 @@ static void *mmap_file(char const *fname) + addr = umalloc(sb.st_size); + uread(fd_map, addr, sb.st_size); + } ++ if (sb.st_nlink != 1) { ++ /* file is hard-linked, break the hard link */ ++ close(fd_map); ++ if (unlink(fname) < 0) { ++ perror(fname); ++ fail_file(); ++ } ++ fd_map = open(fname, O_RDWR | O_CREAT, sb.st_mode); ++ if (fd_map < 0) { ++ perror(fname); ++ fail_file(); ++ } ++ uwrite(fd_map, addr, sb.st_size); ++ } + return addr; + } + +diff --git a/tools/Makefile b/tools/Makefile +index 41067f304215..b82a15b92b1c 100644 +--- a/tools/Makefile ++++ b/tools/Makefile +@@ -22,6 +22,10 @@ help: + @echo ' from the kernel command line to build and install one of' + @echo ' the tools above' + @echo '' ++ @echo ' $$ make tools/all' ++ @echo '' ++ @echo ' builds all tools.' ++ @echo '' + @echo ' $$ make tools/install' + @echo '' + @echo ' installs all tools.' +@@ -50,6 +54,10 @@ selftests: FORCE + turbostat x86_energy_perf_policy: FORCE + $(call descend,power/x86/$@) + ++all: cgroup cpupower firewire lguest \ ++ perf selftests turbostat usb \ ++ virtio vm net x86_energy_perf_policy ++ + cpupower_install: + $(call descend,power/$(@:_install=),install) + +diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c +index ea475cd03511..ca2d05a07b57 100644 +--- a/virt/kvm/async_pf.c ++++ b/virt/kvm/async_pf.c +@@ -158,7 +158,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, + * do alloc nowait since if we are going to sleep anyway we + * may as well sleep faulting in page + */ +- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); ++ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); + if (!work) + return 0; + diff --git a/patch/kernel/odroidxu4-default/patch-3.10.99-100.patch b/patch/kernel/odroidxu4-default/patch-3.10.99-100.patch new file mode 100644 index 000000000..da39546c5 --- /dev/null +++ b/patch/kernel/odroidxu4-default/patch-3.10.99-100.patch @@ -0,0 +1,1156 @@ +diff --git a/Makefile b/Makefile +index f1e6491fd7d8..40d4d3bf52c3 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 99 ++SUBLEVEL = 100 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S +index c9305ef1d411..e73982b93537 100644 +--- a/arch/x86/ia32/ia32entry.S ++++ b/arch/x86/ia32/ia32entry.S +@@ -422,6 +422,7 @@ ENTRY(ia32_syscall) + /*CFI_REL_OFFSET cs,CS-RIP*/ + CFI_REL_OFFSET rip,RIP-RIP + PARAVIRT_ADJUST_EXCEPTION_FRAME ++ ASM_CLAC /* Do this early to minimize exposure */ + SWAPGS + /* + * No need to follow this irqs on/off section: the syscall +diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c +index ec94e11807dc..ca0805633f26 100644 +--- a/arch/x86/kernel/acpi/sleep.c ++++ b/arch/x86/kernel/acpi/sleep.c +@@ -16,6 +16,7 @@ + #include + #include + ++#include + #include "../../realmode/rm/wakeup.h" + #include "sleep.h" + +@@ -96,7 +97,13 @@ int acpi_suspend_lowlevel(void) + saved_magic = 0x123456789abcdef0L; + #endif /* CONFIG_64BIT */ + ++ /* ++ * Pause/unpause graph tracing around do_suspend_lowlevel as it has ++ * inconsistent call/return info after it jumps to the wakeup vector. ++ */ ++ pause_graph_tracing(); + do_suspend_lowlevel(); ++ unpause_graph_tracing(); + return 0; + } + +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 04e7db668362..f3f0801a0e81 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -673,19 +673,18 @@ static int ata_ioc32(struct ata_port *ap) + int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, + int cmd, void __user *arg) + { +- int val = -EINVAL, rc = -EINVAL; ++ unsigned long val; ++ int rc = -EINVAL; + unsigned long flags; + + switch (cmd) { +- case ATA_IOC_GET_IO32: ++ case HDIO_GET_32BIT: + spin_lock_irqsave(ap->lock, flags); + val = ata_ioc32(ap); + spin_unlock_irqrestore(ap->lock, flags); +- if (copy_to_user(arg, &val, 1)) +- return -EFAULT; +- return 0; ++ return put_user(val, (unsigned long __user *)arg); + +- case ATA_IOC_SET_IO32: ++ case HDIO_SET_32BIT: + val = (unsigned long) arg; + rc = 0; + spin_lock_irqsave(ap->lock, flags); +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c +index 88cd940ece63..453c816b4537 100644 +--- a/drivers/edac/edac_mc_sysfs.c ++++ b/drivers/edac/edac_mc_sysfs.c +@@ -973,21 +973,26 @@ nomem: + */ + int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) + { ++ char *name; + int i, err; + + /* + * The memory controller needs its own bus, in order to avoid + * namespace conflicts at /sys/bus/edac. + */ +- mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); +- if (!mci->bus->name) ++ name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); ++ if (!name) + return -ENOMEM; + ++ mci->bus->name = name; ++ + edac_dbg(0, "creating bus %s\n", mci->bus->name); + + err = bus_register(mci->bus); +- if (err < 0) ++ if (err < 0) { ++ kfree(name); + return err; ++ } + + /* get the /sys/devices/system/edac subsys reference */ + mci->dev.type = &mci_attr_type; +@@ -1071,7 +1076,8 @@ fail: + fail2: + device_unregister(&mci->dev); + bus_unregister(mci->bus); +- kfree(mci->bus->name); ++ kfree(name); ++ + return err; + } + +@@ -1102,10 +1108,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) + + void edac_unregister_sysfs(struct mem_ctl_info *mci) + { ++ const char *name = mci->bus->name; ++ + edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); + device_unregister(&mci->dev); + bus_unregister(mci->bus); +- kfree(mci->bus->name); ++ kfree(name); + } + + static void mc_attr_release(struct device *dev) +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index 313ccaf25f49..62834322b337 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -124,7 +124,7 @@ static int ast_get_dram_info(struct drm_device *dev) + } while (ast_read32(ast, 0x10000) != 0x01); + data = ast_read32(ast, 0x10004); + +- if (data & 0x400) ++ if (data & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; +diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c +index bb166849aa6e..f0bac68254b7 100644 +--- a/drivers/gpu/drm/radeon/radeon_sa.c ++++ b/drivers/gpu/drm/radeon/radeon_sa.c +@@ -349,13 +349,8 @@ int radeon_sa_bo_new(struct radeon_device *rdev, + /* see if we can skip over some allocations */ + } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); + +- for (i = 0; i < RADEON_NUM_RINGS; ++i) +- radeon_fence_ref(fences[i]); +- + spin_unlock(&sa_manager->wq.lock); + r = radeon_fence_wait_any(rdev, fences, false); +- for (i = 0; i < RADEON_NUM_RINGS; ++i) +- radeon_fence_unref(&fences[i]); + spin_lock(&sa_manager->wq.lock); + /* if we have nothing to wait for block */ + if (r == -ENOENT && block) { +diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c +index 2a1b6e037e1a..0134ba32a057 100644 +--- a/drivers/mtd/ubi/upd.c ++++ b/drivers/mtd/ubi/upd.c +@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, + vol->changing_leb = 1; + vol->ch_lnum = req->lnum; + +- vol->upd_buf = vmalloc(req->bytes); ++ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size)); + if (!vol->upd_buf) + return -ENOMEM; + +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 303f3b3fb65f..84b770461655 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -164,6 +164,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ + { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ + { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ ++ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 9bab34cf01d4..24366a2afea6 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -271,6 +271,7 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_UE910_V2 0x1012 + #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 + #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 ++#define TELIT_PRODUCT_LE922_USBCFG5 0x1045 + #define TELIT_PRODUCT_LE920 0x1200 + #define TELIT_PRODUCT_LE910 0x1201 + +@@ -1140,6 +1141,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), +@@ -1191,6 +1194,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index d17c5d72cd29..4c86850bd627 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -1389,11 +1389,10 @@ openRetry: + * current bigbuf. + */ + static int +-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++discard_remaining_data(struct TCP_Server_Info *server) + { + unsigned int rfclen = get_rfc1002_length(server->smallbuf); + int remaining = rfclen + 4 - server->total_read; +- struct cifs_readdata *rdata = mid->callback_data; + + while (remaining > 0) { + int length; +@@ -1407,10 +1406,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) + remaining -= length; + } + +- dequeue_mid(mid, rdata->result); + return 0; + } + ++static int ++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++{ ++ int length; ++ struct cifs_readdata *rdata = mid->callback_data; ++ ++ length = discard_remaining_data(server); ++ dequeue_mid(mid, rdata->result); ++ return length; ++} ++ + int + cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + { +@@ -1439,6 +1448,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + return length; + server->total_read += length; + ++ if (server->ops->is_status_pending && ++ server->ops->is_status_pending(buf, server, 0)) { ++ discard_remaining_data(server); ++ return -1; ++ } ++ + /* Was the SMB read successful? */ + rdata->result = server->ops->map_error(buf, false); + if (rdata->result != 0) { +diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking +index 3ea36554107f..8918ac905a3b 100644 +--- a/fs/jffs2/README.Locking ++++ b/fs/jffs2/README.Locking +@@ -2,10 +2,6 @@ + JFFS2 LOCKING DOCUMENTATION + --------------------------- + +-At least theoretically, JFFS2 does not require the Big Kernel Lock +-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS +-code. It has its own locking, as described below. +- + This document attempts to describe the existing locking rules for + JFFS2. It is not expected to remain perfectly up to date, but ought to + be fairly close. +@@ -69,6 +65,7 @@ Ordering constraints: + any f->sem held. + 2. Never attempt to lock two file mutexes in one thread. + No ordering rules have been made for doing so. ++ 3. Never lock a page cache page with f->sem held. + + + erase_completion_lock spinlock +diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c +index a3750f902adc..c1f04947d7dc 100644 +--- a/fs/jffs2/build.c ++++ b/fs/jffs2/build.c +@@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) + + + static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, +- struct jffs2_inode_cache *ic) ++ struct jffs2_inode_cache *ic, ++ int *dir_hardlinks) + { + struct jffs2_full_dirent *fd; + +@@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", + fd->name, fd->ino, ic->ino); + jffs2_mark_node_obsolete(c, fd->raw); ++ /* Clear the ic/raw union so it doesn't cause problems later. */ ++ fd->ic = NULL; + continue; + } + ++ /* From this point, fd->raw is no longer used so we can set fd->ic */ ++ fd->ic = child_ic; ++ child_ic->pino_nlink++; ++ /* If we appear (at this stage) to have hard-linked directories, ++ * set a flag to trigger a scan later */ + if (fd->type == DT_DIR) { +- if (child_ic->pino_nlink) { +- JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", +- fd->name, fd->ino, ic->ino); +- /* TODO: What do we do about it? */ +- } else { +- child_ic->pino_nlink = ic->ino; +- } +- } else +- child_ic->pino_nlink++; ++ child_ic->flags |= INO_FLAGS_IS_DIR; ++ if (child_ic->pino_nlink > 1) ++ *dir_hardlinks = 1; ++ } + + dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); + /* Can't free scan_dents so far. We might need them in pass 2 */ +@@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + */ + static int jffs2_build_filesystem(struct jffs2_sb_info *c) + { +- int ret; +- int i; ++ int ret, i, dir_hardlinks = 0; + struct jffs2_inode_cache *ic; + struct jffs2_full_dirent *fd; + struct jffs2_full_dirent *dead_fds = NULL; +@@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + /* Now scan the directory tree, increasing nlink according to every dirent found. */ + for_each_inode(i, c, ic) { + if (ic->scan_dents) { +- jffs2_build_inode_pass1(c, ic); ++ jffs2_build_inode_pass1(c, ic, &dir_hardlinks); + cond_resched(); + } + } +@@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + } + + dbg_fsbuild("pass 2a complete\n"); ++ ++ if (dir_hardlinks) { ++ /* If we detected directory hardlinks earlier, *hopefully* ++ * they are gone now because some of the links were from ++ * dead directories which still had some old dirents lying ++ * around and not yet garbage-collected, but which have ++ * been discarded above. So clear the pino_nlink field ++ * in each directory, so that the final scan below can ++ * print appropriate warnings. */ ++ for_each_inode(i, c, ic) { ++ if (ic->flags & INO_FLAGS_IS_DIR) ++ ic->pino_nlink = 0; ++ } ++ } + dbg_fsbuild("freeing temporary data structures\n"); + + /* Finally, we can scan again and free the dirent structs */ +@@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + while(ic->scan_dents) { + fd = ic->scan_dents; + ic->scan_dents = fd->next; ++ /* We do use the pino_nlink field to count nlink of ++ * directories during fs build, so set it to the ++ * parent ino# now. Now that there's hopefully only ++ * one. */ ++ if (fd->type == DT_DIR) { ++ if (!fd->ic) { ++ /* We'll have complained about it and marked the coresponding ++ raw node obsolete already. Just skip it. */ ++ continue; ++ } ++ ++ /* We *have* to have set this in jffs2_build_inode_pass1() */ ++ BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR)); ++ ++ /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks ++ * is set. Otherwise, we know this should never trigger anyway, so ++ * we don't do the check. And ic->pino_nlink still contains the nlink ++ * value (which is 1). */ ++ if (dir_hardlinks && fd->ic->pino_nlink) { ++ JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n", ++ fd->name, fd->ino, ic->ino, fd->ic->pino_nlink); ++ /* Should we unlink it from its previous parent? */ ++ } ++ ++ /* For directories, ic->pino_nlink holds that parent inode # */ ++ fd->ic->pino_nlink = ic->ino; ++ } + jffs2_free_full_dirent(fd); + } + ic->scan_dents = NULL; +@@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, + + /* Reduce nlink of the child. If it's now zero, stick it on the + dead_fds list to be cleaned up later. Else just free the fd */ +- +- if (fd->type == DT_DIR) +- child_ic->pino_nlink = 0; +- else +- child_ic->pino_nlink--; ++ child_ic->pino_nlink--; + + if (!child_ic->pino_nlink) { + dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", +diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c +index 1506673c087e..60ef3fb707ff 100644 +--- a/fs/jffs2/file.c ++++ b/fs/jffs2/file.c +@@ -138,39 +138,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + struct page *pg; + struct inode *inode = mapping->host; + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); +- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); +- struct jffs2_raw_inode ri; +- uint32_t alloc_len = 0; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + uint32_t pageofs = index << PAGE_CACHE_SHIFT; + int ret = 0; + +- jffs2_dbg(1, "%s()\n", __func__); +- +- if (pageofs > inode->i_size) { +- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, +- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); +- if (ret) +- return ret; +- } +- +- mutex_lock(&f->sem); + pg = grab_cache_page_write_begin(mapping, index, flags); +- if (!pg) { +- if (alloc_len) +- jffs2_complete_reservation(c); +- mutex_unlock(&f->sem); ++ if (!pg) + return -ENOMEM; +- } + *pagep = pg; + +- if (alloc_len) { ++ jffs2_dbg(1, "%s()\n", __func__); ++ ++ if (pageofs > inode->i_size) { + /* Make new hole frag from old EOF to new page */ ++ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); ++ struct jffs2_raw_inode ri; + struct jffs2_full_dnode *fn; ++ uint32_t alloc_len; + + jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", + (unsigned int)inode->i_size, pageofs); + ++ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, ++ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); ++ if (ret) ++ goto out_page; ++ ++ mutex_lock(&f->sem); + memset(&ri, 0, sizeof(ri)); + + ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); +@@ -197,6 +191,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + if (IS_ERR(fn)) { + ret = PTR_ERR(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + ret = jffs2_add_full_dnode_to_inode(c, f, fn); +@@ -211,10 +206,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + jffs2_mark_node_obsolete(c, fn->raw); + jffs2_free_full_dnode(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + jffs2_complete_reservation(c); + inode->i_size = pageofs; ++ mutex_unlock(&f->sem); + } + + /* +@@ -223,18 +220,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + * case of a short-copy. + */ + if (!PageUptodate(pg)) { ++ mutex_lock(&f->sem); + ret = jffs2_do_readpage_nolock(inode, pg); ++ mutex_unlock(&f->sem); + if (ret) + goto out_page; + } +- mutex_unlock(&f->sem); + jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); + return ret; + + out_page: + unlock_page(pg); + page_cache_release(pg); +- mutex_unlock(&f->sem); + return ret; + } + +diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c +index 5a2dec2b064c..95d5880a63ee 100644 +--- a/fs/jffs2/gc.c ++++ b/fs/jffs2/gc.c +@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era + BUG_ON(start > orig_start); + } + +- /* First, use readpage() to read the appropriate page into the page cache */ +- /* Q: What happens if we actually try to GC the _same_ page for which commit_write() +- * triggered garbage collection in the first place? +- * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the +- * page OK. We'll actually write it out again in commit_write, which is a little +- * suboptimal, but at least we're correct. +- */ ++ /* The rules state that we must obtain the page lock *before* f->sem, so ++ * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's ++ * actually going to *change* so we're safe; we only allow reading. ++ * ++ * It is important to note that jffs2_write_begin() will ensure that its ++ * page is marked Uptodate before allocating space. That means that if we ++ * end up here trying to GC the *same* page that jffs2_write_begin() is ++ * trying to write out, read_cache_page() will not deadlock. */ ++ mutex_unlock(&f->sem); + pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); ++ mutex_lock(&f->sem); + + if (IS_ERR(pg_ptr)) { + pr_warn("read_cache_page() returned error: %ld\n", +diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h +index fa35ff79ab35..0637271f3770 100644 +--- a/fs/jffs2/nodelist.h ++++ b/fs/jffs2/nodelist.h +@@ -194,6 +194,7 @@ struct jffs2_inode_cache { + #define INO_STATE_CLEARING 6 /* In clear_inode() */ + + #define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */ ++#define INO_FLAGS_IS_DIR 0x02 /* is a directory */ + + #define RAWNODE_CLASS_INODE_CACHE 0 + #define RAWNODE_CLASS_XATTR_DATUM 1 +@@ -249,7 +250,10 @@ struct jffs2_readinode_info + + struct jffs2_full_dirent + { +- struct jffs2_raw_node_ref *raw; ++ union { ++ struct jffs2_raw_node_ref *raw; ++ struct jffs2_inode_cache *ic; /* Just during part of build */ ++ }; + struct jffs2_full_dirent *next; + uint32_t version; + uint32_t ino; /* == zero for unlink */ +diff --git a/fs/locks.c b/fs/locks.c +index 0274c953b07d..f7b1de7e6735 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -1852,7 +1852,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, + goto out; + } + +-again: + error = flock_to_posix_lock(filp, file_lock, &flock); + if (error) + goto out; +@@ -1883,19 +1882,22 @@ again: + * Attempt to detect a close/fcntl race and recover by + * releasing the lock that was just acquired. + */ +- /* +- * we need that spin_lock here - it prevents reordering between +- * update of inode->i_flock and check for it done in close(). +- * rcu_read_lock() wouldn't do. +- */ +- spin_lock(¤t->files->file_lock); +- f = fcheck(fd); +- spin_unlock(¤t->files->file_lock); +- if (!error && f != filp && flock.l_type != F_UNLCK) { +- flock.l_type = F_UNLCK; +- goto again; ++ if (!error && file_lock->fl_type != F_UNLCK) { ++ /* ++ * We need that spin_lock here - it prevents reordering between ++ * update of inode->i_flock and check for it done in ++ * close(). rcu_read_lock() wouldn't do. ++ */ ++ spin_lock(¤t->files->file_lock); ++ f = fcheck(fd); ++ spin_unlock(¤t->files->file_lock); ++ if (f != filp) { ++ file_lock->fl_type = F_UNLCK; ++ error = do_lock_file_wait(filp, cmd, file_lock); ++ WARN_ON_ONCE(error); ++ error = -EBADF; ++ } + } +- + out: + locks_free_lock(file_lock); + return error; +@@ -1970,7 +1972,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, + goto out; + } + +-again: + error = flock64_to_posix_lock(filp, file_lock, &flock); + if (error) + goto out; +@@ -2001,14 +2002,22 @@ again: + * Attempt to detect a close/fcntl race and recover by + * releasing the lock that was just acquired. + */ +- spin_lock(¤t->files->file_lock); +- f = fcheck(fd); +- spin_unlock(¤t->files->file_lock); +- if (!error && f != filp && flock.l_type != F_UNLCK) { +- flock.l_type = F_UNLCK; +- goto again; ++ if (!error && file_lock->fl_type != F_UNLCK) { ++ /* ++ * We need that spin_lock here - it prevents reordering between ++ * update of inode->i_flock and check for it done in ++ * close(). rcu_read_lock() wouldn't do. ++ */ ++ spin_lock(¤t->files->file_lock); ++ f = fcheck(fd); ++ spin_unlock(¤t->files->file_lock); ++ if (f != filp) { ++ file_lock->fl_type = F_UNLCK; ++ error = do_lock_file_wait(filp, cmd, file_lock); ++ WARN_ON_ONCE(error); ++ error = -EBADF; ++ } + } +- + out: + locks_free_lock(file_lock); + return error; +diff --git a/include/linux/ata.h b/include/linux/ata.h +index ee0bd9524055..f60ffe29b3a1 100644 +--- a/include/linux/ata.h ++++ b/include/linux/ata.h +@@ -477,8 +477,8 @@ enum ata_tf_protocols { + }; + + enum ata_ioctls { +- ATA_IOC_GET_IO32 = 0x309, +- ATA_IOC_SET_IO32 = 0x324, ++ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */ ++ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */ + }; + + /* core structures */ +diff --git a/include/linux/libata.h b/include/linux/libata.h +index 8ad0771b88ab..8017e5c459cf 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -666,7 +666,7 @@ struct ata_device { + union { + u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ + u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ +- }; ++ } ____cacheline_aligned; + + /* DEVSLP Timing Variables from Identify Device Data Log */ + u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; +diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c +index 2bb95a7a8809..c14565bde887 100644 +--- a/sound/core/control_compat.c ++++ b/sound/core/control_compat.c +@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 { + unsigned char reserved[128]; + }; + ++#ifdef CONFIG_X86_X32 ++/* x32 has a different alignment for 64bit values from ia32 */ ++struct snd_ctl_elem_value_x32 { ++ struct snd_ctl_elem_id id; ++ unsigned int indirect; /* bit-field causes misalignment */ ++ union { ++ s32 integer[128]; ++ unsigned char data[512]; ++ s64 integer64[64]; ++ } value; ++ unsigned char reserved[128]; ++}; ++#endif /* CONFIG_X86_X32 */ + + /* get the value type and count of the control */ + static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id, +@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count) + + static int copy_ctl_value_from_user(struct snd_card *card, + struct snd_ctl_elem_value *data, +- struct snd_ctl_elem_value32 __user *data32, ++ void __user *userdata, ++ void __user *valuep, + int *typep, int *countp) + { ++ struct snd_ctl_elem_value32 __user *data32 = userdata; + int i, type, size; + int uninitialized_var(count); + unsigned int indirect; +@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; +- if (get_user(val, &data32->value.integer[i])) ++ if (get_user(val, &intp[i])) + return -EFAULT; + data->value.integer.value[i] = val; + } +@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card, + printk(KERN_ERR "snd_ioctl32_ctl_elem_value: unknown type %d\n", type); + return -EINVAL; + } +- if (copy_from_user(data->value.bytes.data, +- data32->value.data, size)) ++ if (copy_from_user(data->value.bytes.data, valuep, size)) + return -EFAULT; + } + +@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card, + } + + /* restore the value to 32bit */ +-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, ++static int copy_ctl_value_to_user(void __user *userdata, ++ void __user *valuep, + struct snd_ctl_elem_value *data, + int type, int count) + { +@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; + val = data->value.integer.value[i]; +- if (put_user(val, &data32->value.integer[i])) ++ if (put_user(val, &intp[i])) + return -EFAULT; + } + } else { + size = get_elem_size(type, count); +- if (copy_to_user(data32->value.data, +- data->value.bytes.data, size)) ++ if (copy_to_user(valuep, data->value.bytes.data, size)) + return -EFAULT; + } + return 0; + } + +-static int snd_ctl_elem_read_user_compat(struct snd_card *card, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_read_user(struct snd_card *card, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + int err, type, count; +@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + err = snd_ctl_elem_read(card, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + +-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_write_user(struct snd_ctl_file *file, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + struct snd_card *card = file->card; +@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + err = snd_ctl_elem_write(card, file, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + ++static int snd_ctl_elem_read_user_compat(struct snd_card *card, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++ ++#ifdef CONFIG_X86_X32 ++static int snd_ctl_elem_read_user_x32(struct snd_card *card, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++#endif /* CONFIG_X86_X32 */ ++ + /* add or replace a user control */ + static int snd_ctl_elem_add_compat(struct snd_ctl_file *file, + struct snd_ctl_elem_info32 __user *data32, +@@ -393,6 +441,10 @@ enum { + SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32), + SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32), + SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32), ++ SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_ctl_elem_add_compat(ctl, argp, 0); + case SNDRV_CTL_IOCTL_ELEM_REPLACE32: + return snd_ctl_elem_add_compat(ctl, argp, 1); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_CTL_IOCTL_ELEM_READ_X32: ++ return snd_ctl_elem_read_user_x32(ctl->card, argp); ++ case SNDRV_CTL_IOCTL_ELEM_WRITE_X32: ++ return snd_ctl_elem_write_user_x32(ctl, argp); ++#endif /* CONFIG_X86_X32 */ + } + + down_read(&snd_ioctl_rwsem); +diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c +index 5268c1f58c25..09a89094dcf7 100644 +--- a/sound/core/rawmidi_compat.c ++++ b/sound/core/rawmidi_compat.c +@@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has 64bit timespec and 64bit alignment */ ++struct snd_rawmidi_status_x32 { ++ s32 stream; ++ u32 rsvd; /* alignment */ ++ struct timespec tstamp; ++ u32 avail; ++ u32 xruns; ++ unsigned char reserved[16]; ++} __attribute__((packed)); ++ ++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst)) ++ ++static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile, ++ struct snd_rawmidi_status_x32 __user *src) ++{ ++ int err; ++ struct snd_rawmidi_status status; ++ ++ if (rfile->output == NULL) ++ return -EINVAL; ++ if (get_user(status.stream, &src->stream)) ++ return -EFAULT; ++ ++ switch (status.stream) { ++ case SNDRV_RAWMIDI_STREAM_OUTPUT: ++ err = snd_rawmidi_output_status(rfile->output, &status); ++ break; ++ case SNDRV_RAWMIDI_STREAM_INPUT: ++ err = snd_rawmidi_input_status(rfile->input, &status); ++ break; ++ default: ++ return -EINVAL; ++ } ++ if (err < 0) ++ return err; ++ ++ if (put_timespec(&status.tstamp, &src->tstamp) || ++ put_user(status.avail, &src->avail) || ++ put_user(status.xruns, &src->xruns)) ++ return -EFAULT; ++ ++ return 0; ++} ++#endif /* CONFIG_X86_X32 */ ++ + enum { + SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32), + SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign + return snd_rawmidi_ioctl_params_compat(rfile, argp); + case SNDRV_RAWMIDI_IOCTL_STATUS32: + return snd_rawmidi_ioctl_status_compat(rfile, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_RAWMIDI_IOCTL_STATUS_X32: ++ return snd_rawmidi_ioctl_status_x32(rfile, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c +index 8d4d5e853efe..ab774954c985 100644 +--- a/sound/core/seq/oss/seq_oss.c ++++ b/sound/core/seq/oss/seq_oss.c +@@ -150,8 +150,6 @@ odev_release(struct inode *inode, struct file *file) + if ((dp = file->private_data) == NULL) + return 0; + +- snd_seq_oss_drain_write(dp); +- + mutex_lock(®ister_mutex); + snd_seq_oss_release(dp); + mutex_unlock(®ister_mutex); +diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h +index c0154a959d55..2464112b08ad 100644 +--- a/sound/core/seq/oss/seq_oss_device.h ++++ b/sound/core/seq/oss/seq_oss_device.h +@@ -131,7 +131,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co + unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait); + + void snd_seq_oss_reset(struct seq_oss_devinfo *dp); +-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp); + + /* */ + void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time); +diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c +index b3f39b5ed742..f9e09e458227 100644 +--- a/sound/core/seq/oss/seq_oss_init.c ++++ b/sound/core/seq/oss/seq_oss_init.c +@@ -457,23 +457,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp) + + + /* +- * Wait until the queue is empty (if we don't have nonblock) +- */ +-void +-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp) +-{ +- if (! dp->timer->running) +- return; +- if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) && +- dp->writeq) { +- debug_printk(("syncing..\n")); +- while (snd_seq_oss_writeq_sync(dp->writeq)) +- ; +- } +-} +- +- +-/* + * reset sequencer devices + */ + void +diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c +index e05802ae6e1b..2e908225d754 100644 +--- a/sound/core/timer_compat.c ++++ b/sound/core/timer_compat.c +@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file, + struct snd_timer_status32 __user *_status) + { + struct snd_timer_user *tu; +- struct snd_timer_status status; ++ struct snd_timer_status32 status; + + tu = file->private_data; + if (snd_BUG_ON(!tu->timeri)) + return -ENXIO; + memset(&status, 0, sizeof(status)); +- status.tstamp = tu->tstamp; ++ status.tstamp.tv_sec = tu->tstamp.tv_sec; ++ status.tstamp.tv_nsec = tu->tstamp.tv_nsec; + status.resolution = snd_timer_resolution(tu->timeri); + status.lost = tu->timeri->lost; + status.overrun = tu->overrun; +@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has the same struct as x86-64 */ ++#define snd_timer_user_status_x32(file, s) \ ++ snd_timer_user_status(file, s) ++#endif /* CONFIG_X86_X32 */ ++ + /* + */ + + enum { + SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32), + SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_timer_user_info_compat(file, argp); + case SNDRV_TIMER_IOCTL_STATUS32: + return snd_timer_user_status_compat(file, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_TIMER_IOCTL_STATUS_X32: ++ return snd_timer_user_status_x32(file, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c +index 94084cdb130c..9a281f45eb9c 100644 +--- a/sound/pci/rme9652/hdsp.c ++++ b/sound/pci/rme9652/hdsp.c +@@ -2923,7 +2923,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + { + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp); ++ ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp); + return 0; + } + +@@ -2935,7 +2935,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + + if (!snd_hdsp_use_is_exclusive(hdsp)) + return -EBUSY; +- val = ucontrol->value.enumerated.item[0]; ++ val = ucontrol->value.integer.value[0]; + spin_lock_irq(&hdsp->lock); + if (val != hdsp_dds_offset(hdsp)) + change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0; +diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c +index dd910d249987..8444098d2a8e 100644 +--- a/sound/pci/rme9652/hdspm.c ++++ b/sound/pci/rme9652/hdspm.c +@@ -1423,6 +1423,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate) + { + u64 n; + ++ if (snd_BUG_ON(rate <= 0)) ++ return; ++ + if (rate >= 112000) + rate /= 4; + else if (rate >= 56000) +@@ -2045,6 +2048,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm) + } else { + /* slave mode, return external sample rate */ + rate = hdspm_external_sample_rate(hdspm); ++ if (!rate) ++ rate = hdspm->system_sample_rate; + } + } + +@@ -2090,8 +2095,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol, + ucontrol) + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); ++ int rate = ucontrol->value.integer.value[0]; + +- hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]); ++ if (rate < 27000 || rate > 207000) ++ return -EINVAL; ++ hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]); + return 0; + } + +@@ -4199,7 +4207,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdspm->tco->term; ++ ucontrol->value.integer.value[0] = hdspm->tco->term; + + return 0; + } +@@ -4210,8 +4218,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) { +- hdspm->tco->term = ucontrol->value.enumerated.item[0]; ++ if (hdspm->tco->term != ucontrol->value.integer.value[0]) { ++ hdspm->tco->term = ucontrol->value.integer.value[0]; + + hdspm_tco_write(hdspm); + diff --git a/patch/kernel/odroidxu4-default/rpatch-3.10.100-101.patch b/patch/kernel/odroidxu4-default/rpatch-3.10.100-101.patch new file mode 100644 index 000000000..d9647df43 --- /dev/null +++ b/patch/kernel/odroidxu4-default/rpatch-3.10.100-101.patch @@ -0,0 +1,1493 @@ +diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt +index c477af086e65..686a64bba775 100644 +--- a/Documentation/filesystems/efivarfs.txt ++++ b/Documentation/filesystems/efivarfs.txt +@@ -14,3 +14,10 @@ filesystem. + efivarfs is typically mounted like this, + + mount -t efivarfs none /sys/firmware/efi/efivars ++ ++Due to the presence of numerous firmware bugs where removing non-standard ++UEFI variables causes the system firmware to fail to POST, efivarfs ++files that are not well-known standardized variables are created ++as immutable files. This doesn't prevent removal - "chattr -i" will work - ++but it does prevent this kind of failure from being accomplished ++accidentally. +diff --git a/Makefile b/Makefile +index 40d4d3bf52c3..4be9e643cef0 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 100 ++SUBLEVEL = 101 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c +index 6ee59a0eb268..48b4cf6b2a24 100644 +--- a/arch/powerpc/kernel/module_64.c ++++ b/arch/powerpc/kernel/module_64.c +@@ -192,7 +192,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) + if (syms[i].st_shndx == SHN_UNDEF) { + char *name = strtab + syms[i].st_name; + if (name[0] == '.') +- memmove(name, name+1, strlen(name)); ++ syms[i].st_name++; + } + } + } +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 04cc2fa7744f..335fe70967a8 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1487,6 +1487,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, + return; + } + break; ++ case MSR_IA32_PEBS_ENABLE: ++ /* PEBS needs a quiescent period after being disabled (to write ++ * a record). Disabling PEBS through VMX MSR swapping doesn't ++ * provide that period, so a CPU could write host's record into ++ * guest's memory. ++ */ ++ wrmsrl(MSR_IA32_PEBS_ENABLE, 0); + } + + for (i = 0; i < m->nr; ++i) +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 41ba726c1ce2..7f2b6dec4b2b 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1941,6 +1941,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu) + + static void record_steal_time(struct kvm_vcpu *vcpu) + { ++ accumulate_steal_time(vcpu); ++ + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) + return; + +@@ -2074,12 +2076,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + if (!(data & KVM_MSR_ENABLED)) + break; + +- vcpu->arch.st.last_steal = current->sched_info.run_delay; +- +- preempt_disable(); +- accumulate_steal_time(vcpu); +- preempt_enable(); +- + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + + break; +@@ -2758,7 +2754,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + vcpu->cpu = cpu; + } + +- accumulate_steal_time(vcpu); + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + } + +diff --git a/crypto/af_alg.c b/crypto/af_alg.c +index 0ca108f3c840..1aaa555fab56 100644 +--- a/crypto/af_alg.c ++++ b/crypto/af_alg.c +@@ -125,23 +125,6 @@ int af_alg_release(struct socket *sock) + } + EXPORT_SYMBOL_GPL(af_alg_release); + +-void af_alg_release_parent(struct sock *sk) +-{ +- struct alg_sock *ask = alg_sk(sk); +- bool last; +- +- sk = ask->parent; +- ask = alg_sk(sk); +- +- lock_sock(sk); +- last = !--ask->refcnt; +- release_sock(sk); +- +- if (last) +- sock_put(sk); +-} +-EXPORT_SYMBOL_GPL(af_alg_release_parent); +- + static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + { + struct sock *sk = sock->sk; +@@ -149,7 +132,6 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + struct sockaddr_alg *sa = (void *)uaddr; + const struct af_alg_type *type; + void *private; +- int err; + + if (sock->state == SS_CONNECTED) + return -EINVAL; +@@ -175,22 +157,16 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + return PTR_ERR(private); + } + +- err = -EBUSY; + lock_sock(sk); +- if (ask->refcnt) +- goto unlock; + + swap(ask->type, type); + swap(ask->private, private); + +- err = 0; +- +-unlock: + release_sock(sk); + + alg_do_release(type, private); + +- return err; ++ return 0; + } + + static int alg_setkey(struct sock *sk, char __user *ukey, +@@ -223,15 +199,11 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type; +- int err = -EBUSY; ++ int err = -ENOPROTOOPT; + + lock_sock(sk); +- if (ask->refcnt) +- goto unlock; +- + type = ask->type; + +- err = -ENOPROTOOPT; + if (level != SOL_ALG || !type) + goto unlock; + +@@ -280,8 +252,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) + + sk2->sk_family = PF_ALG; + +- if (!ask->refcnt++) +- sock_hold(sk); ++ sock_hold(sk); + alg_sk(sk2)->parent = sk; + alg_sk(sk2)->type = type; + +diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c +index 8bd1bb6dbe47..24ae2a694e9b 100644 +--- a/drivers/firmware/efi/efivars.c ++++ b/drivers/firmware/efi/efivars.c +@@ -219,7 +219,8 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) + } + + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || +- efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) { ++ efivar_validate(new_var->VendorGuid, new_var->VariableName, ++ new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } +@@ -334,7 +335,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, + return -EACCES; + + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || +- efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) { ++ efivar_validate(new_var->VendorGuid, new_var->VariableName, ++ new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } +@@ -405,35 +407,27 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) + { + int i, short_name_size; + char *short_name; +- unsigned long variable_name_size; +- efi_char16_t *variable_name; +- +- variable_name = new_var->var.VariableName; +- variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t); ++ unsigned long utf8_name_size; ++ efi_char16_t *variable_name = new_var->var.VariableName; + + /* +- * Length of the variable bytes in ASCII, plus the '-' separator, ++ * Length of the variable bytes in UTF8, plus the '-' separator, + * plus the GUID, plus trailing NUL + */ +- short_name_size = variable_name_size / sizeof(efi_char16_t) +- + 1 + EFI_VARIABLE_GUID_LEN + 1; +- +- short_name = kzalloc(short_name_size, GFP_KERNEL); ++ utf8_name_size = ucs2_utf8size(variable_name); ++ short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1; + ++ short_name = kmalloc(short_name_size, GFP_KERNEL); + if (!short_name) + return 1; + +- /* Convert Unicode to normal chars (assume top bits are 0), +- ala UTF-8 */ +- for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) { +- short_name[i] = variable_name[i] & 0xFF; +- } ++ ucs2_as_utf8(short_name, variable_name, short_name_size); ++ + /* This is ugly, but necessary to separate one vendor's + private variables from another's. */ +- +- *(short_name + strlen(short_name)) = '-'; ++ short_name[utf8_name_size] = '-'; + efi_guid_unparse(&new_var->var.VendorGuid, +- short_name + strlen(short_name)); ++ short_name + utf8_name_size + 1); + + new_var->kobj.kset = efivars_kset; + +diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c +index 7dbc319e1cf5..9f82b5545edd 100644 +--- a/drivers/firmware/efi/vars.c ++++ b/drivers/firmware/efi/vars.c +@@ -42,7 +42,7 @@ DECLARE_WORK(efivar_work, NULL); + EXPORT_SYMBOL_GPL(efivar_work); + + static bool +-validate_device_path(struct efi_variable *var, int match, u8 *buffer, ++validate_device_path(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + struct efi_generic_dev_path *node; +@@ -75,7 +75,7 @@ validate_device_path(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_boot_order(struct efi_variable *var, int match, u8 *buffer, ++validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + /* An array of 16-bit integers */ +@@ -86,18 +86,18 @@ validate_boot_order(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_load_option(struct efi_variable *var, int match, u8 *buffer, ++validate_load_option(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + u16 filepathlength; + int i, desclength = 0, namelen; + +- namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName)); ++ namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN); + + /* Either "Boot" or "Driver" followed by four digits of hex */ + for (i = match; i < match+4; i++) { +- if (var->VariableName[i] > 127 || +- hex_to_bin(var->VariableName[i] & 0xff) < 0) ++ if (var_name[i] > 127 || ++ hex_to_bin(var_name[i] & 0xff) < 0) + return true; + } + +@@ -132,12 +132,12 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, + /* + * And, finally, check the filepath + */ +- return validate_device_path(var, match, buffer + desclength + 6, ++ return validate_device_path(var_name, match, buffer + desclength + 6, + filepathlength); + } + + static bool +-validate_uint16(struct efi_variable *var, int match, u8 *buffer, ++validate_uint16(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + /* A single 16-bit integer */ +@@ -148,7 +148,7 @@ validate_uint16(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, ++validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + int i; +@@ -165,67 +165,133 @@ validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, + } + + struct variable_validate { ++ efi_guid_t vendor; + char *name; +- bool (*validate)(struct efi_variable *var, int match, u8 *data, ++ bool (*validate)(efi_char16_t *var_name, int match, u8 *data, + unsigned long len); + }; + ++/* ++ * This is the list of variables we need to validate, as well as the ++ * whitelist for what we think is safe not to default to immutable. ++ * ++ * If it has a validate() method that's not NULL, it'll go into the ++ * validation routine. If not, it is assumed valid, but still used for ++ * whitelisting. ++ * ++ * Note that it's sorted by {vendor,name}, but globbed names must come after ++ * any other name with the same prefix. ++ */ + static const struct variable_validate variable_validate[] = { +- { "BootNext", validate_uint16 }, +- { "BootOrder", validate_boot_order }, +- { "DriverOrder", validate_boot_order }, +- { "Boot*", validate_load_option }, +- { "Driver*", validate_load_option }, +- { "ConIn", validate_device_path }, +- { "ConInDev", validate_device_path }, +- { "ConOut", validate_device_path }, +- { "ConOutDev", validate_device_path }, +- { "ErrOut", validate_device_path }, +- { "ErrOutDev", validate_device_path }, +- { "Timeout", validate_uint16 }, +- { "Lang", validate_ascii_string }, +- { "PlatformLang", validate_ascii_string }, +- { "", NULL }, ++ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 }, ++ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option }, ++ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string }, ++ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL }, ++ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 }, ++ { LINUX_EFI_CRASH_GUID, "*", NULL }, ++ { NULL_GUID, "", NULL }, + }; + ++static bool ++variable_matches(const char *var_name, size_t len, const char *match_name, ++ int *match) ++{ ++ for (*match = 0; ; (*match)++) { ++ char c = match_name[*match]; ++ char u = var_name[*match]; ++ ++ /* Wildcard in the matching name means we've matched */ ++ if (c == '*') ++ return true; ++ ++ /* Case sensitive match */ ++ if (!c && *match == len) ++ return true; ++ ++ if (c != u) ++ return false; ++ ++ if (!c) ++ return true; ++ } ++ return true; ++} ++ + bool +-efivar_validate(struct efi_variable *var, u8 *data, unsigned long len) ++efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, ++ unsigned long data_size) + { + int i; +- u16 *unicode_name = var->VariableName; ++ unsigned long utf8_size; ++ u8 *utf8_name; + +- for (i = 0; variable_validate[i].validate != NULL; i++) { +- const char *name = variable_validate[i].name; +- int match; ++ utf8_size = ucs2_utf8size(var_name); ++ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL); ++ if (!utf8_name) ++ return false; + +- for (match = 0; ; match++) { +- char c = name[match]; +- u16 u = unicode_name[match]; ++ ucs2_as_utf8(utf8_name, var_name, utf8_size); ++ utf8_name[utf8_size] = '\0'; + +- /* All special variables are plain ascii */ +- if (u > 127) +- return true; ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { ++ const char *name = variable_validate[i].name; ++ int match = 0; + +- /* Wildcard in the matching name means we've matched */ +- if (c == '*') +- return variable_validate[i].validate(var, +- match, data, len); ++ if (efi_guidcmp(vendor, variable_validate[i].vendor)) ++ continue; + +- /* Case sensitive match */ +- if (c != u) ++ if (variable_matches(utf8_name, utf8_size+1, name, &match)) { ++ if (variable_validate[i].validate == NULL) + break; +- +- /* Reached the end of the string while matching */ +- if (!c) +- return variable_validate[i].validate(var, +- match, data, len); ++ kfree(utf8_name); ++ return variable_validate[i].validate(var_name, match, ++ data, data_size); + } + } +- ++ kfree(utf8_name); + return true; + } + EXPORT_SYMBOL_GPL(efivar_validate); + ++bool ++efivar_variable_is_removable(efi_guid_t vendor, const char *var_name, ++ size_t len) ++{ ++ int i; ++ bool found = false; ++ int match = 0; ++ ++ /* ++ * Check if our variable is in the validated variables list ++ */ ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { ++ if (efi_guidcmp(variable_validate[i].vendor, vendor)) ++ continue; ++ ++ if (variable_matches(var_name, len, ++ variable_validate[i].name, &match)) { ++ found = true; ++ break; ++ } ++ } ++ ++ /* ++ * If it's in our list, it is removable. ++ */ ++ return found; ++} ++EXPORT_SYMBOL_GPL(efivar_variable_is_removable); ++ + static efi_status_t + check_var_size(u32 attributes, unsigned long size) + { +@@ -797,7 +863,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, + + *set = false; + +- if (efivar_validate(&entry->var, data, *size) == false) ++ if (efivar_validate(*vendor, name, data, *size) == false) + return -EINVAL; + + /* +diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c +index 8dd524f32284..08f105a06fbf 100644 +--- a/fs/efivarfs/file.c ++++ b/fs/efivarfs/file.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include "internal.h" + +@@ -108,9 +109,79 @@ out_free: + return size; + } + ++static int ++efivarfs_ioc_getxflags(struct file *file, void __user *arg) ++{ ++ struct inode *inode = file->f_mapping->host; ++ unsigned int i_flags; ++ unsigned int flags = 0; ++ ++ i_flags = inode->i_flags; ++ if (i_flags & S_IMMUTABLE) ++ flags |= FS_IMMUTABLE_FL; ++ ++ if (copy_to_user(arg, &flags, sizeof(flags))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ++efivarfs_ioc_setxflags(struct file *file, void __user *arg) ++{ ++ struct inode *inode = file->f_mapping->host; ++ unsigned int flags; ++ unsigned int i_flags = 0; ++ int error; ++ ++ if (!inode_owner_or_capable(inode)) ++ return -EACCES; ++ ++ if (copy_from_user(&flags, arg, sizeof(flags))) ++ return -EFAULT; ++ ++ if (flags & ~FS_IMMUTABLE_FL) ++ return -EOPNOTSUPP; ++ ++ if (!capable(CAP_LINUX_IMMUTABLE)) ++ return -EPERM; ++ ++ if (flags & FS_IMMUTABLE_FL) ++ i_flags |= S_IMMUTABLE; ++ ++ ++ error = mnt_want_write_file(file); ++ if (error) ++ return error; ++ ++ mutex_lock(&inode->i_mutex); ++ inode->i_flags &= ~S_IMMUTABLE; ++ inode->i_flags |= i_flags; ++ mutex_unlock(&inode->i_mutex); ++ ++ mnt_drop_write_file(file); ++ ++ return 0; ++} ++ ++long ++efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p) ++{ ++ void __user *arg = (void __user *)p; ++ ++ switch (cmd) { ++ case FS_IOC_GETFLAGS: ++ return efivarfs_ioc_getxflags(file, arg); ++ case FS_IOC_SETFLAGS: ++ return efivarfs_ioc_setxflags(file, arg); ++ } ++ ++ return -ENOTTY; ++} ++ + const struct file_operations efivarfs_file_operations = { + .open = simple_open, + .read = efivarfs_file_read, + .write = efivarfs_file_write, + .llseek = no_llseek, ++ .unlocked_ioctl = efivarfs_file_ioctl, + }; +diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c +index 7e787fb90293..d0351bc7b533 100644 +--- a/fs/efivarfs/inode.c ++++ b/fs/efivarfs/inode.c +@@ -15,7 +15,8 @@ + #include "internal.h" + + struct inode *efivarfs_get_inode(struct super_block *sb, +- const struct inode *dir, int mode, dev_t dev) ++ const struct inode *dir, int mode, ++ dev_t dev, bool is_removable) + { + struct inode *inode = new_inode(sb); + +@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb, + inode->i_ino = get_next_ino(); + inode->i_mode = mode; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; ++ inode->i_flags = is_removable ? 0 : S_IMMUTABLE; + switch (mode & S_IFMT) { + case S_IFREG: + inode->i_fop = &efivarfs_file_operations; +@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid) + static int efivarfs_create(struct inode *dir, struct dentry *dentry, + umode_t mode, bool excl) + { +- struct inode *inode; ++ struct inode *inode = NULL; + struct efivar_entry *var; + int namelen, i = 0, err = 0; ++ bool is_removable = false; + + if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len)) + return -EINVAL; + +- inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0); +- if (!inode) +- return -ENOMEM; +- + var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL); +- if (!var) { +- err = -ENOMEM; +- goto out; +- } ++ if (!var) ++ return -ENOMEM; + + /* length of the variable name itself: remove GUID and separator */ + namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1; +@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, + efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1, + &var->var.VendorGuid); + ++ if (efivar_variable_is_removable(var->var.VendorGuid, ++ dentry->d_name.name, namelen)) ++ is_removable = true; ++ ++ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable); ++ if (!inode) { ++ err = -ENOMEM; ++ goto out; ++ } ++ + for (i = 0; i < namelen; i++) + var->var.VariableName[i] = dentry->d_name.name[i]; + +@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, + out: + if (err) { + kfree(var); +- iput(inode); ++ if (inode) ++ iput(inode); + } + return err; + } +diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h +index b5ff16addb7c..b4505188e799 100644 +--- a/fs/efivarfs/internal.h ++++ b/fs/efivarfs/internal.h +@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations; + extern const struct inode_operations efivarfs_dir_inode_operations; + extern bool efivarfs_valid_name(const char *str, int len); + extern struct inode *efivarfs_get_inode(struct super_block *sb, +- const struct inode *dir, int mode, dev_t dev); ++ const struct inode *dir, int mode, dev_t dev, ++ bool is_removable); + + extern struct list_head efivarfs_list; + +diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c +index 141aee31884f..5a3655f690d9 100644 +--- a/fs/efivarfs/super.c ++++ b/fs/efivarfs/super.c +@@ -128,8 +128,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + struct dentry *dentry, *root = sb->s_root; + unsigned long size = 0; + char *name; +- int len, i; ++ int len; + int err = -ENOMEM; ++ bool is_removable = false; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) +@@ -138,15 +139,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + memcpy(entry->var.VariableName, name16, name_size); + memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); + +- len = ucs2_strlen(entry->var.VariableName); ++ len = ucs2_utf8size(entry->var.VariableName); + + /* name, plus '-', plus GUID, plus NUL*/ + name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); + if (!name) + goto fail; + +- for (i = 0; i < len; i++) +- name[i] = entry->var.VariableName[i] & 0xFF; ++ ucs2_as_utf8(name, entry->var.VariableName, len); ++ ++ if (efivar_variable_is_removable(entry->var.VendorGuid, name, len)) ++ is_removable = true; + + name[len] = '-'; + +@@ -154,7 +157,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + + name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; + +- inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0); ++ inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0, ++ is_removable); + if (!inode) + goto fail_name; + +@@ -210,7 +214,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) + sb->s_d_op = &efivarfs_d_ops; + sb->s_time_gran = 1; + +- inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0); ++ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true); + if (!inode) + return -ENOMEM; + inode->i_op = &efivarfs_dir_inode_operations; +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h +index 2f38daaab3d7..d61c11170213 100644 +--- a/include/crypto/if_alg.h ++++ b/include/crypto/if_alg.h +@@ -30,8 +30,6 @@ struct alg_sock { + + struct sock *parent; + +- unsigned int refcnt; +- + const struct af_alg_type *type; + void *private; + }; +@@ -66,7 +64,6 @@ int af_alg_register_type(const struct af_alg_type *type); + int af_alg_unregister_type(const struct af_alg_type *type); + + int af_alg_release(struct socket *sock); +-void af_alg_release_parent(struct sock *sk); + int af_alg_accept(struct sock *sk, struct socket *newsock); + + int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len, +@@ -83,6 +80,11 @@ static inline struct alg_sock *alg_sk(struct sock *sk) + return (struct alg_sock *)sk; + } + ++static inline void af_alg_release_parent(struct sock *sk) ++{ ++ sock_put(alg_sk(sk)->parent); ++} ++ + static inline void af_alg_init_completion(struct af_alg_completion *completion) + { + init_completion(&completion->completion); +diff --git a/include/linux/efi.h b/include/linux/efi.h +index 2bc0ad78d058..63fa51c864ec 100644 +--- a/include/linux/efi.h ++++ b/include/linux/efi.h +@@ -769,8 +769,10 @@ struct efivars { + * and we use a page for reading/writing. + */ + ++#define EFI_VAR_NAME_LEN 1024 ++ + struct efi_variable { +- efi_char16_t VariableName[1024/sizeof(efi_char16_t)]; ++ efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)]; + efi_guid_t VendorGuid; + unsigned long DataSize; + __u8 Data[1024]; +@@ -832,7 +834,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), + struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, + struct list_head *head, bool remove); + +-bool efivar_validate(struct efi_variable *var, u8 *data, unsigned long len); ++bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, ++ unsigned long data_size); ++bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, ++ size_t len); + + extern struct work_struct efivar_work; + void efivar_run_worker(void); +diff --git a/include/linux/module.h b/include/linux/module.h +index 46f1ea01e6f6..761dc2848ffa 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -220,6 +220,12 @@ struct module_ref { + unsigned long decs; + } __attribute((aligned(2 * sizeof(unsigned long)))); + ++struct mod_kallsyms { ++ Elf_Sym *symtab; ++ unsigned int num_symtab; ++ char *strtab; ++}; ++ + struct module + { + enum module_state state; +@@ -308,14 +314,9 @@ struct module + #endif + + #ifdef CONFIG_KALLSYMS +- /* +- * We keep the symbol and string tables for kallsyms. +- * The core_* fields below are temporary, loader-only (they +- * could really be discarded after module init). +- */ +- Elf_Sym *symtab, *core_symtab; +- unsigned int num_symtab, core_num_syms; +- char *strtab, *core_strtab; ++ /* Protected by RCU and/or module_mutex: use rcu_dereference() */ ++ struct mod_kallsyms *kallsyms; ++ struct mod_kallsyms core_kallsyms; + + /* Section attributes */ + struct module_sect_attrs *sect_attrs; +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h +index 0b2d0cbe0bab..36e5e9998865 100644 +--- a/include/linux/tracepoint.h ++++ b/include/linux/tracepoint.h +@@ -129,9 +129,6 @@ static inline void tracepoint_synchronize_unregister(void) + void *it_func; \ + void *__data; \ + \ +- if (!cpu_online(raw_smp_processor_id())) \ +- return; \ +- \ + if (!(cond)) \ + return; \ + prercu; \ +@@ -265,15 +262,19 @@ static inline void tracepoint_synchronize_unregister(void) + * "void *__data, proto" as the callback prototype. + */ + #define DECLARE_TRACE_NOARGS(name) \ +- __DECLARE_TRACE(name, void, , 1, void *__data, __data) ++ __DECLARE_TRACE(name, void, , \ ++ cpu_online(raw_smp_processor_id()), \ ++ void *__data, __data) + + #define DECLARE_TRACE(name, proto, args) \ +- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \ +- PARAMS(void *__data, proto), \ +- PARAMS(__data, args)) ++ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ ++ cpu_online(raw_smp_processor_id()), \ ++ PARAMS(void *__data, proto), \ ++ PARAMS(__data, args)) + + #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ +- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ ++ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ ++ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) + +diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h +index cbb20afdbc01..bb679b48f408 100644 +--- a/include/linux/ucs2_string.h ++++ b/include/linux/ucs2_string.h +@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s); + unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); + int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); + ++unsigned long ucs2_utf8size(const ucs2_char_t *src); ++unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, ++ unsigned long maxlength); ++ + #endif /* _LINUX_UCS2_STRING_H_ */ +diff --git a/kernel/module.c b/kernel/module.c +index 70a4754c001f..f8a4f48b48a9 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -179,6 +179,9 @@ struct load_info { + struct _ddebug *debug; + unsigned int num_debug; + bool sig_ok; ++#ifdef CONFIG_KALLSYMS ++ unsigned long mod_kallsyms_init_off; ++#endif + struct { + unsigned int sym, str, mod, vers, info, pcpu; + } index; +@@ -2346,8 +2349,20 @@ static void layout_symtab(struct module *mod, struct load_info *info) + strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, + info->index.str) | INIT_OFFSET_MASK; + pr_debug("\t%s\n", info->secstrings + strsect->sh_name); ++ ++ /* We'll tack temporary mod_kallsyms on the end. */ ++ mod->init_size = ALIGN(mod->init_size, ++ __alignof__(struct mod_kallsyms)); ++ info->mod_kallsyms_init_off = mod->init_size; ++ mod->init_size += sizeof(struct mod_kallsyms); ++ mod->init_size = debug_align(mod->init_size); + } + ++/* ++ * We use the full symtab and strtab which layout_symtab arranged to ++ * be appended to the init section. Later we switch to the cut-down ++ * core-only ones. ++ */ + static void add_kallsyms(struct module *mod, const struct load_info *info) + { + unsigned int i, ndst; +@@ -2356,28 +2371,33 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) + char *s; + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + +- mod->symtab = (void *)symsec->sh_addr; +- mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); ++ /* Set up to point into init section. */ ++ mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off; ++ ++ mod->kallsyms->symtab = (void *)symsec->sh_addr; ++ mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + /* Make sure we get permanent strtab: don't use info->strtab. */ +- mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; ++ mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + + /* Set types up while we still have access to sections. */ +- for (i = 0; i < mod->num_symtab; i++) +- mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); +- +- mod->core_symtab = dst = mod->module_core + info->symoffs; +- mod->core_strtab = s = mod->module_core + info->stroffs; +- src = mod->symtab; +- for (ndst = i = 0; i < mod->num_symtab; i++) { ++ for (i = 0; i < mod->kallsyms->num_symtab; i++) ++ mod->kallsyms->symtab[i].st_info ++ = elf_type(&mod->kallsyms->symtab[i], info); ++ ++ /* Now populate the cut down core kallsyms for after init. */ ++ mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs; ++ mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs; ++ src = mod->kallsyms->symtab; ++ for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { + if (i == 0 || + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { + dst[ndst] = src[i]; +- dst[ndst++].st_name = s - mod->core_strtab; +- s += strlcpy(s, &mod->strtab[src[i].st_name], ++ dst[ndst++].st_name = s - mod->core_kallsyms.strtab; ++ s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], + KSYM_NAME_LEN) + 1; + } + } +- mod->core_num_syms = ndst; ++ mod->core_kallsyms.num_symtab = ndst; + } + #else + static inline void layout_symtab(struct module *mod, struct load_info *info) +@@ -3117,9 +3137,8 @@ static int do_init_module(struct module *mod) + module_put(mod); + trim_init_extable(mod); + #ifdef CONFIG_KALLSYMS +- mod->num_symtab = mod->core_num_syms; +- mod->symtab = mod->core_symtab; +- mod->strtab = mod->core_strtab; ++ /* Switch to core kallsyms now init is done: kallsyms may be walking! */ ++ rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); + #endif + unset_module_init_ro_nx(mod); + module_free(mod, mod->module_init); +@@ -3398,9 +3417,9 @@ static inline int is_arm_mapping_symbol(const char *str) + && (str[2] == '\0' || str[2] == '.'); + } + +-static const char *symname(struct module *mod, unsigned int symnum) ++static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum) + { +- return mod->strtab + mod->symtab[symnum].st_name; ++ return kallsyms->strtab + kallsyms->symtab[symnum].st_name; + } + + static const char *get_ksymbol(struct module *mod, +@@ -3410,6 +3429,7 @@ static const char *get_ksymbol(struct module *mod, + { + unsigned int i, best = 0; + unsigned long nextval; ++ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + + /* At worse, next value is at end of module */ + if (within_module_init(addr, mod)) +@@ -3419,32 +3439,32 @@ static const char *get_ksymbol(struct module *mod, + + /* Scan for closest preceding symbol, and next symbol. (ELF + starts real symbols at 1). */ +- for (i = 1; i < mod->num_symtab; i++) { +- if (mod->symtab[i].st_shndx == SHN_UNDEF) ++ for (i = 1; i < kallsyms->num_symtab; i++) { ++ if (kallsyms->symtab[i].st_shndx == SHN_UNDEF) + continue; + + /* We ignore unnamed symbols: they're uninformative + * and inserted at a whim. */ +- if (*symname(mod, i) == '\0' +- || is_arm_mapping_symbol(symname(mod, i))) ++ if (*symname(kallsyms, i) == '\0' ++ || is_arm_mapping_symbol(symname(kallsyms, i))) + continue; + +- if (mod->symtab[i].st_value <= addr +- && mod->symtab[i].st_value > mod->symtab[best].st_value) ++ if (kallsyms->symtab[i].st_value <= addr ++ && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value) + best = i; +- if (mod->symtab[i].st_value > addr +- && mod->symtab[i].st_value < nextval) +- nextval = mod->symtab[i].st_value; ++ if (kallsyms->symtab[i].st_value > addr ++ && kallsyms->symtab[i].st_value < nextval) ++ nextval = kallsyms->symtab[i].st_value; + } + + if (!best) + return NULL; + + if (size) +- *size = nextval - mod->symtab[best].st_value; ++ *size = nextval - kallsyms->symtab[best].st_value; + if (offset) +- *offset = addr - mod->symtab[best].st_value; +- return symname(mod, best); ++ *offset = addr - kallsyms->symtab[best].st_value; ++ return symname(kallsyms, best); + } + + /* For kallsyms to ask for address resolution. NULL means not found. Careful +@@ -3540,18 +3560,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { ++ struct mod_kallsyms *kallsyms; ++ + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- if (symnum < mod->num_symtab) { +- *value = mod->symtab[symnum].st_value; +- *type = mod->symtab[symnum].st_info; +- strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN); ++ kallsyms = rcu_dereference_sched(mod->kallsyms); ++ if (symnum < kallsyms->num_symtab) { ++ *value = kallsyms->symtab[symnum].st_value; ++ *type = kallsyms->symtab[symnum].st_info; ++ strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN); + strlcpy(module_name, mod->name, MODULE_NAME_LEN); + *exported = is_exported(name, *value, mod); + preempt_enable(); + return 0; + } +- symnum -= mod->num_symtab; ++ symnum -= kallsyms->num_symtab; + } + preempt_enable(); + return -ERANGE; +@@ -3560,11 +3583,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + static unsigned long mod_find_symname(struct module *mod, const char *name) + { + unsigned int i; ++ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + +- for (i = 0; i < mod->num_symtab; i++) +- if (strcmp(name, symname(mod, i)) == 0 && +- mod->symtab[i].st_info != 'U') +- return mod->symtab[i].st_value; ++ for (i = 0; i < kallsyms->num_symtab; i++) ++ if (strcmp(name, symname(kallsyms, i)) == 0 && ++ kallsyms->symtab[i].st_info != 'U') ++ return kallsyms->symtab[i].st_value; + return 0; + } + +@@ -3603,11 +3627,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + int ret; + + list_for_each_entry(mod, &modules, list) { ++ /* We hold module_mutex: no need for rcu_dereference_sched */ ++ struct mod_kallsyms *kallsyms = mod->kallsyms; ++ + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- for (i = 0; i < mod->num_symtab; i++) { +- ret = fn(data, symname(mod, i), +- mod, mod->symtab[i].st_value); ++ for (i = 0; i < kallsyms->num_symtab; i++) { ++ ret = fn(data, symname(kallsyms, i), ++ mod, kallsyms->symtab[i].st_value); + if (ret != 0) + return ret; + } +diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c +index 6f500ef2301d..f0b323abb4c6 100644 +--- a/lib/ucs2_string.c ++++ b/lib/ucs2_string.c +@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) + } + } + EXPORT_SYMBOL(ucs2_strncmp); ++ ++unsigned long ++ucs2_utf8size(const ucs2_char_t *src) ++{ ++ unsigned long i; ++ unsigned long j = 0; ++ ++ for (i = 0; i < ucs2_strlen(src); i++) { ++ u16 c = src[i]; ++ ++ if (c >= 0x800) ++ j += 3; ++ else if (c >= 0x80) ++ j += 2; ++ else ++ j += 1; ++ } ++ ++ return j; ++} ++EXPORT_SYMBOL(ucs2_utf8size); ++ ++/* ++ * copy at most maxlength bytes of whole utf8 characters to dest from the ++ * ucs2 string src. ++ * ++ * The return value is the number of characters copied, not including the ++ * final NUL character. ++ */ ++unsigned long ++ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength) ++{ ++ unsigned int i; ++ unsigned long j = 0; ++ unsigned long limit = ucs2_strnlen(src, maxlength); ++ ++ for (i = 0; maxlength && i < limit; i++) { ++ u16 c = src[i]; ++ ++ if (c >= 0x800) { ++ if (maxlength < 3) ++ break; ++ maxlength -= 3; ++ dest[j++] = 0xe0 | (c & 0xf000) >> 12; ++ dest[j++] = 0x80 | (c & 0x0fc0) >> 6; ++ dest[j++] = 0x80 | (c & 0x003f); ++ } else if (c >= 0x80) { ++ if (maxlength < 2) ++ break; ++ maxlength -= 2; ++ dest[j++] = 0xc0 | (c & 0x7c0) >> 6; ++ dest[j++] = 0x80 | (c & 0x03f); ++ } else { ++ maxlength -= 1; ++ dest[j++] = c & 0x7f; ++ } ++ } ++ if (maxlength) ++ dest[j] = '\0'; ++ return j; ++} ++EXPORT_SYMBOL(ucs2_as_utf8); +diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c +index 31bf2586fb84..864408026202 100644 +--- a/net/mac80211/agg-rx.c ++++ b/net/mac80211/agg-rx.c +@@ -290,7 +290,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, + } + + /* prepare A-MPDU MLME for Rx aggregation */ +- tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); ++ tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL); + if (!tid_agg_rx) + goto end; + +diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c +index f3bbea1eb9e7..13f10aab9213 100644 +--- a/net/mac80211/rc80211_minstrel_ht.c ++++ b/net/mac80211/rc80211_minstrel_ht.c +@@ -454,7 +454,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) + if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) + return; + +- ieee80211_start_tx_ba_session(pubsta, tid, 5000); ++ ieee80211_start_tx_ba_session(pubsta, tid, 0); + } + + static void +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c +index c8717c1d082e..87dd619fb2e9 100644 +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -342,6 +342,39 @@ static const int compat_event_type_size[] = { + + /* IW event code */ + ++static void wireless_nlevent_flush(void) ++{ ++ struct sk_buff *skb; ++ struct net *net; ++ ++ ASSERT_RTNL(); ++ ++ for_each_net(net) { ++ while ((skb = skb_dequeue(&net->wext_nlevents))) ++ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, ++ GFP_KERNEL); ++ } ++} ++ ++static int wext_netdev_notifier_call(struct notifier_block *nb, ++ unsigned long state, void *ptr) ++{ ++ /* ++ * When a netdev changes state in any way, flush all pending messages ++ * to avoid them going out in a strange order, e.g. RTM_NEWLINK after ++ * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close() ++ * or similar - all of which could otherwise happen due to delays from ++ * schedule_work(). ++ */ ++ wireless_nlevent_flush(); ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block wext_netdev_notifier = { ++ .notifier_call = wext_netdev_notifier_call, ++}; ++ + static int __net_init wext_pernet_init(struct net *net) + { + skb_queue_head_init(&net->wext_nlevents); +@@ -360,7 +393,12 @@ static struct pernet_operations wext_pernet_ops = { + + static int __init wireless_nlevent_init(void) + { +- return register_pernet_subsys(&wext_pernet_ops); ++ int err = register_pernet_subsys(&wext_pernet_ops); ++ ++ if (err) ++ return err; ++ ++ return register_netdevice_notifier(&wext_netdev_notifier); + } + + subsys_initcall(wireless_nlevent_init); +@@ -368,17 +406,8 @@ subsys_initcall(wireless_nlevent_init); + /* Process events generated by the wireless layer or the driver. */ + static void wireless_nlevent_process(struct work_struct *work) + { +- struct sk_buff *skb; +- struct net *net; +- + rtnl_lock(); +- +- for_each_net(net) { +- while ((skb = skb_dequeue(&net->wext_nlevents))) +- rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, +- GFP_KERNEL); +- } +- ++ wireless_nlevent_flush(); + rtnl_unlock(); + } + +diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c +index 754f88e1fdab..4892966fc1b8 100644 +--- a/sound/soc/codecs/wm8958-dsp2.c ++++ b/sound/soc/codecs/wm8958-dsp2.c +@@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c +index 6e746c7474bf..cda3cf23474b 100644 +--- a/sound/soc/codecs/wm8994.c ++++ b/sound/soc/codecs/wm8994.c +@@ -361,7 +361,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol, + struct wm8994 *control = wm8994->wm8994; + struct wm8994_pdata *pdata = &control->pdata; + int drc = wm8994_get_drc(kcontrol->id.name); +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + + if (drc < 0) + return drc; +@@ -468,7 +468,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol, + struct wm8994 *control = wm8994->wm8994; + struct wm8994_pdata *pdata = &control->pdata; + int block = wm8994_get_retune_mobile_block(kcontrol->id.name); +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + + if (block < 0) + return block; +diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh +index 77edcdcc016b..057278448515 100644 +--- a/tools/testing/selftests/efivarfs/efivarfs.sh ++++ b/tools/testing/selftests/efivarfs/efivarfs.sh +@@ -88,7 +88,11 @@ test_delete() + exit 1 + fi + +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + + if [ -e $file ]; then + echo "$file couldn't be deleted" >&2 +@@ -111,6 +115,7 @@ test_zero_size_delete() + exit 1 + fi + ++ chattr -i $file + printf "$attrs" > $file + + if [ -e $file ]; then +@@ -141,7 +146,11 @@ test_valid_filenames() + echo "$file could not be created" >&2 + ret=1 + else +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + fi + done + +@@ -174,7 +183,11 @@ test_invalid_filenames() + + if [ -e $file ]; then + echo "Creating $file should have failed" >&2 +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + ret=1 + fi + done +diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c +index 8c0764407b3c..4af74f733036 100644 +--- a/tools/testing/selftests/efivarfs/open-unlink.c ++++ b/tools/testing/selftests/efivarfs/open-unlink.c +@@ -1,10 +1,68 @@ ++#include + #include + #include + #include + #include ++#include + #include + #include + #include ++#include ++ ++static int set_immutable(const char *path, int immutable) ++{ ++ unsigned int flags; ++ int fd; ++ int rc; ++ int error; ++ ++ fd = open(path, O_RDONLY); ++ if (fd < 0) ++ return fd; ++ ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); ++ if (rc < 0) { ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++ } ++ ++ if (immutable) ++ flags |= FS_IMMUTABLE_FL; ++ else ++ flags &= ~FS_IMMUTABLE_FL; ++ ++ rc = ioctl(fd, FS_IOC_SETFLAGS, &flags); ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++} ++ ++static int get_immutable(const char *path) ++{ ++ unsigned int flags; ++ int fd; ++ int rc; ++ int error; ++ ++ fd = open(path, O_RDONLY); ++ if (fd < 0) ++ return fd; ++ ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); ++ if (rc < 0) { ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++ } ++ close(fd); ++ if (flags & FS_IMMUTABLE_FL) ++ return 1; ++ return 0; ++} + + int main(int argc, char **argv) + { +@@ -27,7 +85,7 @@ int main(int argc, char **argv) + buf[4] = 0; + + /* create a test variable */ +- fd = open(path, O_WRONLY | O_CREAT); ++ fd = open(path, O_WRONLY | O_CREAT, 0600); + if (fd < 0) { + perror("open(O_WRONLY)"); + return EXIT_FAILURE; +@@ -41,6 +99,18 @@ int main(int argc, char **argv) + + close(fd); + ++ rc = get_immutable(path); ++ if (rc < 0) { ++ perror("ioctl(FS_IOC_GETFLAGS)"); ++ return EXIT_FAILURE; ++ } else if (rc) { ++ rc = set_immutable(path, 0); ++ if (rc < 0) { ++ perror("ioctl(FS_IOC_SETFLAGS)"); ++ return EXIT_FAILURE; ++ } ++ } ++ + fd = open(path, O_RDONLY); + if (fd < 0) { + perror("open"); diff --git a/patch/kernel/udoo-default/patch-3.14.63-64.patch b/patch/kernel/udoo-default/patch-3.14.63-64.patch new file mode 100644 index 000000000..4e1bc88a6 --- /dev/null +++ b/patch/kernel/udoo-default/patch-3.14.63-64.patch @@ -0,0 +1,2749 @@ +diff --git a/Makefile b/Makefile +index 0843ef4cc0a4..de41fa82652f 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 63 ++SUBLEVEL = 64 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c +index 81e6ae0220bc..887535c4c93d 100644 +--- a/arch/mips/kernel/traps.c ++++ b/arch/mips/kernel/traps.c +@@ -688,15 +688,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode) + asmlinkage void do_ov(struct pt_regs *regs) + { + enum ctx_state prev_state; +- siginfo_t info; ++ siginfo_t info = { ++ .si_signo = SIGFPE, ++ .si_code = FPE_INTOVF, ++ .si_addr = (void __user *)regs->cp0_epc, ++ }; + + prev_state = exception_enter(); + die_if_kernel("Integer overflow", regs); + +- info.si_code = FPE_INTOVF; +- info.si_signo = SIGFPE; +- info.si_errno = 0; +- info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); + exception_exit(prev_state); + } +@@ -797,7 +797,7 @@ out: + static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + const char *str) + { +- siginfo_t info; ++ siginfo_t info = { 0 }; + char b[40]; + + #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP +@@ -825,7 +825,6 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + else + info.si_code = FPE_INTOVF; + info.si_signo = SIGFPE; +- info.si_errno = 0; + info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); + break; +diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S +index 92a2e9333620..b74ac9c5710b 100644 +--- a/arch/x86/ia32/ia32entry.S ++++ b/arch/x86/ia32/ia32entry.S +@@ -422,6 +422,7 @@ ENTRY(ia32_syscall) + /*CFI_REL_OFFSET cs,CS-RIP*/ + CFI_REL_OFFSET rip,RIP-RIP + PARAVIRT_ADJUST_EXCEPTION_FRAME ++ ASM_CLAC /* Do this early to minimize exposure */ + SWAPGS + /* + * No need to follow this irqs on/off section: the syscall +diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c +index 3a2ae4c88948..398c7a908c17 100644 +--- a/arch/x86/kernel/acpi/sleep.c ++++ b/arch/x86/kernel/acpi/sleep.c +@@ -16,6 +16,7 @@ + #include + #include + ++#include + #include "../../realmode/rm/wakeup.h" + #include "sleep.h" + +@@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void) + saved_magic = 0x123456789abcdef0L; + #endif /* CONFIG_64BIT */ + ++ /* ++ * Pause/unpause graph tracing around do_suspend_lowlevel as it has ++ * inconsistent call/return info after it jumps to the wakeup vector. ++ */ ++ pause_graph_tracing(); + do_suspend_lowlevel(); ++ unpause_graph_tracing(); + return 0; + } + +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 6fecf0bde105..1e82d2a1e205 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -674,19 +674,18 @@ static int ata_ioc32(struct ata_port *ap) + int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, + int cmd, void __user *arg) + { +- int val = -EINVAL, rc = -EINVAL; ++ unsigned long val; ++ int rc = -EINVAL; + unsigned long flags; + + switch (cmd) { +- case ATA_IOC_GET_IO32: ++ case HDIO_GET_32BIT: + spin_lock_irqsave(ap->lock, flags); + val = ata_ioc32(ap); + spin_unlock_irqrestore(ap->lock, flags); +- if (copy_to_user(arg, &val, 1)) +- return -EFAULT; +- return 0; ++ return put_user(val, (unsigned long __user *)arg); + +- case ATA_IOC_SET_IO32: ++ case HDIO_SET_32BIT: + val = (unsigned long) arg; + rc = 0; + spin_lock_irqsave(ap->lock, flags); +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c +index b335c6ab5efe..b2dd473237e8 100644 +--- a/drivers/edac/edac_mc_sysfs.c ++++ b/drivers/edac/edac_mc_sysfs.c +@@ -973,21 +973,26 @@ nomem: + */ + int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) + { ++ char *name; + int i, err; + + /* + * The memory controller needs its own bus, in order to avoid + * namespace conflicts at /sys/bus/edac. + */ +- mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); +- if (!mci->bus->name) ++ name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); ++ if (!name) + return -ENOMEM; + ++ mci->bus->name = name; ++ + edac_dbg(0, "creating bus %s\n", mci->bus->name); + + err = bus_register(mci->bus); +- if (err < 0) ++ if (err < 0) { ++ kfree(name); + return err; ++ } + + /* get the /sys/devices/system/edac subsys reference */ + mci->dev.type = &mci_attr_type; +@@ -1071,7 +1076,8 @@ fail: + fail2: + device_unregister(&mci->dev); + bus_unregister(mci->bus); +- kfree(mci->bus->name); ++ kfree(name); ++ + return err; + } + +@@ -1102,10 +1108,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) + + void edac_unregister_sysfs(struct mem_ctl_info *mci) + { ++ const char *name = mci->bus->name; ++ + edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); + device_unregister(&mci->dev); + bus_unregister(mci->bus); +- kfree(mci->bus->name); ++ kfree(name); + } + + static void mc_attr_release(struct device *dev) +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index c0f284230a39..ed8d93cbd1ca 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -124,7 +124,7 @@ static int ast_get_dram_info(struct drm_device *dev) + } while (ast_read32(ast, 0x10000) != 0x01); + data = ast_read32(ast, 0x10004); + +- if (data & 0x400) ++ if (data & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index a49ce4a6e72f..1320a81df792 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -2071,6 +2071,24 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) + return isert_post_response(isert_conn, isert_cmd); + } + ++static void ++isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) ++{ ++ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); ++ struct isert_conn *isert_conn = (struct isert_conn *)conn->context; ++ struct isert_device *device = isert_conn->conn_device; ++ ++ spin_lock_bh(&conn->cmd_lock); ++ if (!list_empty(&cmd->i_conn_node)) ++ list_del_init(&cmd->i_conn_node); ++ spin_unlock_bh(&conn->cmd_lock); ++ ++ if (cmd->data_direction == DMA_TO_DEVICE) ++ iscsit_stop_dataout_timer(cmd); ++ ++ device->unreg_rdma_mem(isert_cmd, isert_conn); ++} ++ + static int + isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, + bool nopout_response) +@@ -2999,6 +3017,7 @@ static struct iscsit_transport iser_target_transport = { + .iscsit_get_dataout = isert_get_dataout, + .iscsit_queue_data_in = isert_put_datain, + .iscsit_queue_status = isert_put_response, ++ .iscsit_aborted_task = isert_aborted_task, + }; + + static int __init isert_init(void) +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c +index 0097b8dae5bc..727a88d9a708 100644 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c +@@ -3093,6 +3093,14 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd) + srpt_queue_response(cmd); + } + ++static void srpt_aborted_task(struct se_cmd *cmd) ++{ ++ struct srpt_send_ioctx *ioctx = container_of(cmd, ++ struct srpt_send_ioctx, cmd); ++ ++ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); ++} ++ + static int srpt_queue_status(struct se_cmd *cmd) + { + struct srpt_send_ioctx *ioctx; +@@ -3940,6 +3948,7 @@ static struct target_core_fabric_ops srpt_template = { + .queue_data_in = srpt_queue_data_in, + .queue_status = srpt_queue_status, + .queue_tm_rsp = srpt_queue_tm_rsp, ++ .aborted_task = srpt_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c +index 28b4bea7c109..a8e52ef6e266 100644 +--- a/drivers/iommu/amd_iommu_init.c ++++ b/drivers/iommu/amd_iommu_init.c +@@ -226,6 +226,10 @@ static enum iommu_init_state init_state = IOMMU_START_STATE; + static int amd_iommu_enable_interrupts(void); + static int __init iommu_go_to_state(enum iommu_init_state state); + ++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, ++ u8 bank, u8 cntr, u8 fxn, ++ u64 *value, bool is_write); ++ + static inline void update_last_devid(u16 devid) + { + if (devid > amd_iommu_last_bdf) +@@ -1182,8 +1186,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu) + amd_iommu_pc_present = true; + + /* Check if the performance counters can be written to */ +- if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || +- (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || ++ if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) || ++ (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) || + (val != val2)) { + pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); + amd_iommu_pc_present = false; +@@ -2314,22 +2318,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid) + } + EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); + +-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, ++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, ++ u8 bank, u8 cntr, u8 fxn, + u64 *value, bool is_write) + { +- struct amd_iommu *iommu; + u32 offset; + u32 max_offset_lim; + +- /* Make sure the IOMMU PC resource is available */ +- if (!amd_iommu_pc_present) +- return -ENODEV; +- +- /* Locate the iommu associated with the device ID */ +- iommu = amd_iommu_rlookup_table[devid]; +- + /* Check for valid iommu and pc register indexing */ +- if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) ++ if (WARN_ON((fxn > 0x28) || (fxn & 7))) + return -ENODEV; + + offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); +@@ -2353,3 +2350,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, + return 0; + } + EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val); ++ ++int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, ++ u64 *value, bool is_write) ++{ ++ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; ++ ++ /* Make sure the IOMMU PC resource is available */ ++ if (!amd_iommu_pc_present || iommu == NULL) ++ return -ENODEV; ++ ++ return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn, ++ value, is_write); ++} +diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c +index 2a1b6e037e1a..0134ba32a057 100644 +--- a/drivers/mtd/ubi/upd.c ++++ b/drivers/mtd/ubi/upd.c +@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, + vol->changing_leb = 1; + vol->ch_lnum = req->lnum; + +- vol->upd_buf = vmalloc(req->bytes); ++ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size)); + if (!vol->upd_buf) + return -ENOMEM; + +diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +index 1817f3f2b02d..f46c0a6c5016 100644 +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +@@ -684,6 +684,20 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) + qlt_xmit_tm_rsp(mcmd); + } + ++static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) ++{ ++ struct qla_tgt_cmd *cmd = container_of(se_cmd, ++ struct qla_tgt_cmd, se_cmd); ++ struct scsi_qla_host *vha = cmd->vha; ++ struct qla_hw_data *ha = vha->hw; ++ ++ if (!cmd->sg_mapped) ++ return; ++ ++ pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); ++ cmd->sg_mapped = 0; ++} ++ + /* Local pointer to allocated TCM configfs fabric module */ + struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; + struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; +@@ -1886,6 +1900,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { + .queue_data_in = tcm_qla2xxx_queue_data_in, + .queue_status = tcm_qla2xxx_queue_status, + .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, ++ .aborted_task = tcm_qla2xxx_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c +@@ -1935,6 +1950,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { + .queue_data_in = tcm_qla2xxx_queue_data_in, + .queue_status = tcm_qla2xxx_queue_status, + .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, ++ .aborted_task = tcm_qla2xxx_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index c066e6e298c3..5600eab07865 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -500,6 +500,18 @@ static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) + return 0; + } + ++static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) ++{ ++ bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); ++ ++ spin_lock_bh(&conn->cmd_lock); ++ if (!list_empty(&cmd->i_conn_node)) ++ list_del_init(&cmd->i_conn_node); ++ spin_unlock_bh(&conn->cmd_lock); ++ ++ __iscsit_free_cmd(cmd, scsi_cmd, true); ++} ++ + static struct iscsit_transport iscsi_target_transport = { + .name = "iSCSI/TCP", + .transport_type = ISCSI_TCP, +@@ -514,6 +526,7 @@ static struct iscsit_transport iscsi_target_transport = { + .iscsit_response_queue = iscsit_response_queue, + .iscsit_queue_data_in = iscsit_queue_rsp, + .iscsit_queue_status = iscsit_queue_rsp, ++ .iscsit_aborted_task = iscsit_aborted_task, + }; + + static int __init iscsi_target_init_module(void) +diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c +index 83465617ad6d..4a28c5f0dfd1 100644 +--- a/drivers/target/iscsi/iscsi_target_configfs.c ++++ b/drivers/target/iscsi/iscsi_target_configfs.c +@@ -1815,6 +1815,13 @@ static void lio_queue_tm_rsp(struct se_cmd *se_cmd) + iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); + } + ++static void lio_aborted_task(struct se_cmd *se_cmd) ++{ ++ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); ++ ++ cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd); ++} ++ + static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg) + { + struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; +@@ -2013,6 +2020,7 @@ int iscsi_target_register_configfs(void) + fabric->tf_ops.queue_data_in = &lio_queue_data_in; + fabric->tf_ops.queue_status = &lio_queue_status; + fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; ++ fabric->tf_ops.aborted_task = &lio_aborted_task; + /* + * Setup function pointers for generic logic in target_core_fabric_configfs.c + */ +diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c +index 1e406af4ee47..2e96ae6cf3c1 100644 +--- a/drivers/target/iscsi/iscsi_target_util.c ++++ b/drivers/target/iscsi/iscsi_target_util.c +@@ -705,8 +705,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) + } + EXPORT_SYMBOL(iscsit_release_cmd); + +-static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, +- bool check_queues) ++void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, ++ bool check_queues) + { + struct iscsi_conn *conn = cmd->conn; + +diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h +index 561a424d1980..a68508c4fec8 100644 +--- a/drivers/target/iscsi/iscsi_target_util.h ++++ b/drivers/target/iscsi/iscsi_target_util.h +@@ -30,6 +30,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co + extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); + extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); + extern void iscsit_release_cmd(struct iscsi_cmd *); ++extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool); + extern void iscsit_free_cmd(struct iscsi_cmd *, bool); + extern int iscsit_check_session_usage_count(struct iscsi_session *); + extern void iscsit_dec_session_usage_count(struct iscsi_session *); +diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c +index 67c802c93ef3..fd974d69458b 100644 +--- a/drivers/target/loopback/tcm_loop.c ++++ b/drivers/target/loopback/tcm_loop.c +@@ -892,6 +892,11 @@ static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) + wake_up(&tl_tmr->tl_tmr_wait); + } + ++static void tcm_loop_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) + { + switch (tl_hba->tl_proto_id) { +@@ -1456,6 +1461,7 @@ static int tcm_loop_register_configfs(void) + fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; + fabric->tf_ops.queue_status = &tcm_loop_queue_status; + fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; ++ fabric->tf_ops.aborted_task = &tcm_loop_aborted_task; + + /* + * Setup function pointers for generic logic in target_core_fabric_configfs.c +diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c +index 24884cac19ce..ad04ea928e4f 100644 +--- a/drivers/target/sbp/sbp_target.c ++++ b/drivers/target/sbp/sbp_target.c +@@ -1846,6 +1846,11 @@ static void sbp_queue_tm_rsp(struct se_cmd *se_cmd) + { + } + ++static void sbp_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static int sbp_check_stop_free(struct se_cmd *se_cmd) + { + struct sbp_target_request *req = container_of(se_cmd, +@@ -2526,6 +2531,7 @@ static struct target_core_fabric_ops sbp_ops = { + .queue_data_in = sbp_queue_data_in, + .queue_status = sbp_queue_status, + .queue_tm_rsp = sbp_queue_tm_rsp, ++ .aborted_task = sbp_aborted_task, + .check_stop_free = sbp_check_stop_free, + + .fabric_make_wwn = sbp_make_tport, +diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c +index f30385385544..756def38c77a 100644 +--- a/drivers/target/target_core_configfs.c ++++ b/drivers/target/target_core_configfs.c +@@ -457,6 +457,10 @@ static int target_fabric_tf_ops_check( + pr_err("Missing tfo->queue_tm_rsp()\n"); + return -EINVAL; + } ++ if (!tfo->aborted_task) { ++ pr_err("Missing tfo->aborted_task()\n"); ++ return -EINVAL; ++ } + /* + * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() + * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index 093b8cb85de7..e366b812f0e1 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -1577,6 +1577,49 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) + return dev; + } + ++/* ++ * Check if the underlying struct block_device request_queue supports ++ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM ++ * in ATA and we need to set TPE=1 ++ */ ++bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, ++ struct request_queue *q, int block_size) ++{ ++ if (!blk_queue_discard(q)) ++ return false; ++ ++ attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) / ++ block_size; ++ /* ++ * Currently hardcoded to 1 in Linux/SCSI code.. ++ */ ++ attrib->max_unmap_block_desc_count = 1; ++ attrib->unmap_granularity = q->limits.discard_granularity / block_size; ++ attrib->unmap_granularity_alignment = q->limits.discard_alignment / ++ block_size; ++ return true; ++} ++EXPORT_SYMBOL(target_configure_unmap_from_queue); ++ ++/* ++ * Convert from blocksize advertised to the initiator to the 512 byte ++ * units unconditionally used by the Linux block layer. ++ */ ++sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) ++{ ++ switch (dev->dev_attrib.block_size) { ++ case 4096: ++ return lb << 3; ++ case 2048: ++ return lb << 2; ++ case 1024: ++ return lb << 1; ++ default: ++ return lb; ++ } ++} ++EXPORT_SYMBOL(target_to_linux_sector); ++ + int target_configure_device(struct se_device *dev) + { + struct se_hba *hba = dev->se_hba; +diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c +index b199f1e21d0e..6fe5b503f6e1 100644 +--- a/drivers/target/target_core_file.c ++++ b/drivers/target/target_core_file.c +@@ -164,25 +164,11 @@ static int fd_configure_device(struct se_device *dev) + " block_device blocks: %llu logical_block_size: %d\n", + dev_size, div_u64(dev_size, fd_dev->fd_block_size), + fd_dev->fd_block_size); +- /* +- * Check if the underlying struct block_device request_queue supports +- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM +- * in ATA and we need to set TPE=1 +- */ +- if (blk_queue_discard(q)) { +- dev->dev_attrib.max_unmap_lba_count = +- q->limits.max_discard_sectors; +- /* +- * Currently hardcoded to 1 in Linux/SCSI code.. +- */ +- dev->dev_attrib.max_unmap_block_desc_count = 1; +- dev->dev_attrib.unmap_granularity = +- q->limits.discard_granularity >> 9; +- dev->dev_attrib.unmap_granularity_alignment = +- q->limits.discard_alignment; ++ ++ if (target_configure_unmap_from_queue(&dev->dev_attrib, q, ++ fd_dev->fd_block_size)) + pr_debug("IFILE: BLOCK Discard support available," +- " disabled by default\n"); +- } ++ " disabled by default\n"); + /* + * Enable write same emulation for IBLOCK and use 0xFFFF as + * the smaller WRITE_SAME(10) only has a two-byte block count. +@@ -545,9 +531,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) + if (S_ISBLK(inode->i_mode)) { + /* The backend is block device, use discard */ + struct block_device *bdev = inode->i_bdev; ++ struct se_device *dev = cmd->se_dev; + +- ret = blkdev_issue_discard(bdev, lba, +- nolb, GFP_KERNEL, 0); ++ ret = blkdev_issue_discard(bdev, ++ target_to_linux_sector(dev, lba), ++ target_to_linux_sector(dev, nolb), ++ GFP_KERNEL, 0); + if (ret < 0) { + pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", + ret); +diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c +index feefe24a88f7..357b9fb61499 100644 +--- a/drivers/target/target_core_iblock.c ++++ b/drivers/target/target_core_iblock.c +@@ -126,27 +126,11 @@ static int iblock_configure_device(struct se_device *dev) + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); + dev->dev_attrib.hw_queue_depth = q->nr_requests; + +- /* +- * Check if the underlying struct block_device request_queue supports +- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM +- * in ATA and we need to set TPE=1 +- */ +- if (blk_queue_discard(q)) { +- dev->dev_attrib.max_unmap_lba_count = +- q->limits.max_discard_sectors; +- +- /* +- * Currently hardcoded to 1 in Linux/SCSI code.. +- */ +- dev->dev_attrib.max_unmap_block_desc_count = 1; +- dev->dev_attrib.unmap_granularity = +- q->limits.discard_granularity >> 9; +- dev->dev_attrib.unmap_granularity_alignment = +- q->limits.discard_alignment; +- ++ if (target_configure_unmap_from_queue(&dev->dev_attrib, q, ++ dev->dev_attrib.hw_block_size)) + pr_debug("IBLOCK: BLOCK Discard support available," +- " disabled by default\n"); +- } ++ " disabled by default\n"); ++ + /* + * Enable write same emulation for IBLOCK and use 0xFFFF as + * the smaller WRITE_SAME(10) only has a two-byte block count. +@@ -418,9 +402,13 @@ iblock_do_unmap(struct se_cmd *cmd, void *priv, + sector_t lba, sector_t nolb) + { + struct block_device *bdev = priv; ++ struct se_device *dev = cmd->se_dev; + int ret; + +- ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); ++ ret = blkdev_issue_discard(bdev, ++ target_to_linux_sector(dev, lba), ++ target_to_linux_sector(dev, nolb), ++ GFP_KERNEL, 0); + if (ret < 0) { + pr_err("blkdev_issue_discard() failed: %d\n", ret); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +@@ -460,8 +448,10 @@ iblock_execute_write_same(struct se_cmd *cmd) + struct scatterlist *sg; + struct bio *bio; + struct bio_list list; +- sector_t block_lba = cmd->t_task_lba; +- sector_t sectors = sbc_get_write_same_sectors(cmd); ++ struct se_device *dev = cmd->se_dev; ++ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); ++ sector_t sectors = target_to_linux_sector(dev, ++ sbc_get_write_same_sectors(cmd)); + + sg = &cmd->t_data_sg[0]; + +@@ -670,12 +660,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) + { + struct se_device *dev = cmd->se_dev; ++ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); + struct iblock_req *ibr; + struct bio *bio, *bio_start; + struct bio_list list; + struct scatterlist *sg; + u32 sg_num = sgl_nents; +- sector_t block_lba; + unsigned bio_cnt; + int rw = 0; + int i; +@@ -701,24 +691,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + rw = READ; + } + +- /* +- * Convert the blocksize advertised to the initiator to the 512 byte +- * units unconditionally used by the Linux block layer. +- */ +- if (dev->dev_attrib.block_size == 4096) +- block_lba = (cmd->t_task_lba << 3); +- else if (dev->dev_attrib.block_size == 2048) +- block_lba = (cmd->t_task_lba << 2); +- else if (dev->dev_attrib.block_size == 1024) +- block_lba = (cmd->t_task_lba << 1); +- else if (dev->dev_attrib.block_size == 512) +- block_lba = cmd->t_task_lba; +- else { +- pr_err("Unsupported SCSI -> BLOCK LBA conversion:" +- " %u\n", dev->dev_attrib.block_size); +- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +- } +- + ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); + if (!ibr) + goto fail; +diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c +index 70c638f730af..47a90d631a90 100644 +--- a/drivers/target/target_core_tmr.c ++++ b/drivers/target/target_core_tmr.c +@@ -76,25 +76,29 @@ void core_tmr_release_req( + } + + spin_lock_irqsave(&dev->se_tmr_lock, flags); +- list_del(&tmr->tmr_list); ++ list_del_init(&tmr->tmr_list); + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + + kfree(tmr); + } + +-static void core_tmr_handle_tas_abort( +- struct se_node_acl *tmr_nacl, +- struct se_cmd *cmd, +- int tas) ++static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) + { ++ unsigned long flags; ++ bool remove = true, send_tas; + /* + * TASK ABORTED status (TAS) bit support +- */ +- if ((tmr_nacl && +- (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) ++ */ ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ send_tas = (cmd->transport_state & CMD_T_TAS); ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ ++ if (send_tas) { ++ remove = false; + transport_send_task_abort(cmd); ++ } + +- transport_cmd_finish_abort(cmd, 0); ++ transport_cmd_finish_abort(cmd, remove); + } + + static int target_check_cdb_and_preempt(struct list_head *list, +@@ -112,6 +116,47 @@ static int target_check_cdb_and_preempt(struct list_head *list, + return 1; + } + ++static bool __target_check_io_state(struct se_cmd *se_cmd, ++ struct se_session *tmr_sess, int tas) ++{ ++ struct se_session *sess = se_cmd->se_sess; ++ ++ assert_spin_locked(&sess->sess_cmd_lock); ++ WARN_ON_ONCE(!irqs_disabled()); ++ /* ++ * If command already reached CMD_T_COMPLETE state within ++ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, ++ * this se_cmd has been passed to fabric driver and will ++ * not be aborted. ++ * ++ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR ++ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as ++ * long as se_cmd->cmd_kref is still active unless zero. ++ */ ++ spin_lock(&se_cmd->t_state_lock); ++ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) { ++ pr_debug("Attempted to abort io tag: %u already complete or" ++ " fabric stop, skipping\n", ++ se_cmd->se_tfo->get_task_tag(se_cmd)); ++ spin_unlock(&se_cmd->t_state_lock); ++ return false; ++ } ++ if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { ++ pr_debug("Attempted to abort io tag: %u already shutdown," ++ " skipping\n", se_cmd->se_tfo->get_task_tag(se_cmd)); ++ spin_unlock(&se_cmd->t_state_lock); ++ return false; ++ } ++ se_cmd->transport_state |= CMD_T_ABORTED; ++ ++ if ((tmr_sess != se_cmd->se_sess) && tas) ++ se_cmd->transport_state |= CMD_T_TAS; ++ ++ spin_unlock(&se_cmd->t_state_lock); ++ ++ return kref_get_unless_zero(&se_cmd->cmd_kref); ++} ++ + void core_tmr_abort_task( + struct se_device *dev, + struct se_tmr_req *tmr, +@@ -134,33 +179,19 @@ void core_tmr_abort_task( + printk("ABORT_TASK: Found referenced %s task_tag: %u\n", + se_cmd->se_tfo->get_fabric_name(), ref_tag); + +- spin_lock(&se_cmd->t_state_lock); +- if (se_cmd->transport_state & CMD_T_COMPLETE) { +- printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag); +- spin_unlock(&se_cmd->t_state_lock); ++ if (!__target_check_io_state(se_cmd, se_sess, 0)) { + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); ++ target_put_sess_cmd(se_sess, se_cmd); + goto out; + } +- se_cmd->transport_state |= CMD_T_ABORTED; +- spin_unlock(&se_cmd->t_state_lock); + + list_del_init(&se_cmd->se_cmd_list); +- kref_get(&se_cmd->cmd_kref); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + + cancel_work_sync(&se_cmd->work); + transport_wait_for_tasks(se_cmd); +- /* +- * Now send SAM_STAT_TASK_ABORTED status for the referenced +- * se_cmd descriptor.. +- */ +- transport_send_task_abort(se_cmd); +- /* +- * Also deal with possible extra acknowledge reference.. +- */ +- if (se_cmd->se_cmd_flags & SCF_ACK_KREF) +- target_put_sess_cmd(se_sess, se_cmd); + ++ transport_cmd_finish_abort(se_cmd, true); + target_put_sess_cmd(se_sess, se_cmd); + + printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" +@@ -182,9 +213,11 @@ static void core_tmr_drain_tmr_list( + struct list_head *preempt_and_abort_list) + { + LIST_HEAD(drain_tmr_list); ++ struct se_session *sess; + struct se_tmr_req *tmr_p, *tmr_pp; + struct se_cmd *cmd; + unsigned long flags; ++ bool rc; + /* + * Release all pending and outgoing TMRs aside from the received + * LUN_RESET tmr.. +@@ -210,17 +243,39 @@ static void core_tmr_drain_tmr_list( + if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) + continue; + ++ sess = cmd->se_sess; ++ if (WARN_ON_ONCE(!sess)) ++ continue; ++ ++ spin_lock(&sess->sess_cmd_lock); + spin_lock(&cmd->t_state_lock); +- if (!(cmd->transport_state & CMD_T_ACTIVE)) { ++ if (!(cmd->transport_state & CMD_T_ACTIVE) || ++ (cmd->transport_state & CMD_T_FABRIC_STOP)) { + spin_unlock(&cmd->t_state_lock); ++ spin_unlock(&sess->sess_cmd_lock); + continue; + } + if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { + spin_unlock(&cmd->t_state_lock); ++ spin_unlock(&sess->sess_cmd_lock); + continue; + } ++ if (sess->sess_tearing_down || cmd->cmd_wait_set) { ++ spin_unlock(&cmd->t_state_lock); ++ spin_unlock(&sess->sess_cmd_lock); ++ continue; ++ } ++ cmd->transport_state |= CMD_T_ABORTED; + spin_unlock(&cmd->t_state_lock); + ++ rc = kref_get_unless_zero(&cmd->cmd_kref); ++ if (!rc) { ++ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n"); ++ spin_unlock(&sess->sess_cmd_lock); ++ continue; ++ } ++ spin_unlock(&sess->sess_cmd_lock); ++ + list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); + } + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); +@@ -234,20 +289,26 @@ static void core_tmr_drain_tmr_list( + (preempt_and_abort_list) ? "Preempt" : "", tmr_p, + tmr_p->function, tmr_p->response, cmd->t_state); + ++ cancel_work_sync(&cmd->work); ++ transport_wait_for_tasks(cmd); ++ + transport_cmd_finish_abort(cmd, 1); ++ target_put_sess_cmd(cmd->se_sess, cmd); + } + } + + static void core_tmr_drain_state_list( + struct se_device *dev, + struct se_cmd *prout_cmd, +- struct se_node_acl *tmr_nacl, ++ struct se_session *tmr_sess, + int tas, + struct list_head *preempt_and_abort_list) + { + LIST_HEAD(drain_task_list); ++ struct se_session *sess; + struct se_cmd *cmd, *next; + unsigned long flags; ++ int rc; + + /* + * Complete outstanding commands with TASK_ABORTED SAM status. +@@ -286,6 +347,16 @@ static void core_tmr_drain_state_list( + if (prout_cmd == cmd) + continue; + ++ sess = cmd->se_sess; ++ if (WARN_ON_ONCE(!sess)) ++ continue; ++ ++ spin_lock(&sess->sess_cmd_lock); ++ rc = __target_check_io_state(cmd, tmr_sess, tas); ++ spin_unlock(&sess->sess_cmd_lock); ++ if (!rc) ++ continue; ++ + list_move_tail(&cmd->state_list, &drain_task_list); + cmd->state_active = false; + } +@@ -293,7 +364,7 @@ static void core_tmr_drain_state_list( + + while (!list_empty(&drain_task_list)) { + cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); +- list_del(&cmd->state_list); ++ list_del_init(&cmd->state_list); + + pr_debug("LUN_RESET: %s cmd: %p" + " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" +@@ -317,16 +388,11 @@ static void core_tmr_drain_state_list( + * loop above, but we do it down here given that + * cancel_work_sync may block. + */ +- if (cmd->t_state == TRANSPORT_COMPLETE) +- cancel_work_sync(&cmd->work); +- +- spin_lock_irqsave(&cmd->t_state_lock, flags); +- target_stop_cmd(cmd, &flags); +- +- cmd->transport_state |= CMD_T_ABORTED; +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ cancel_work_sync(&cmd->work); ++ transport_wait_for_tasks(cmd); + +- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); ++ core_tmr_handle_tas_abort(cmd, tas); ++ target_put_sess_cmd(cmd->se_sess, cmd); + } + } + +@@ -338,6 +404,7 @@ int core_tmr_lun_reset( + { + struct se_node_acl *tmr_nacl = NULL; + struct se_portal_group *tmr_tpg = NULL; ++ struct se_session *tmr_sess = NULL; + int tas; + /* + * TASK_ABORTED status bit, this is configurable via ConfigFS +@@ -356,8 +423,9 @@ int core_tmr_lun_reset( + * or struct se_device passthrough.. + */ + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { +- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; +- tmr_tpg = tmr->task_cmd->se_sess->se_tpg; ++ tmr_sess = tmr->task_cmd->se_sess; ++ tmr_nacl = tmr_sess->se_node_acl; ++ tmr_tpg = tmr_sess->se_tpg; + if (tmr_nacl && tmr_tpg) { + pr_debug("LUN_RESET: TMR caller fabric: %s" + " initiator port %s\n", +@@ -370,7 +438,7 @@ int core_tmr_lun_reset( + dev->transport->name, tas); + + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); +- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, ++ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas, + preempt_and_abort_list); + + /* +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index 7afea9b59e2c..cbf927a67160 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -509,9 +509,6 @@ void transport_deregister_session(struct se_session *se_sess) + } + EXPORT_SYMBOL(transport_deregister_session); + +-/* +- * Called with cmd->t_state_lock held. +- */ + static void target_remove_from_state_list(struct se_cmd *cmd) + { + struct se_device *dev = cmd->se_dev; +@@ -536,10 +533,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, + { + unsigned long flags; + +- spin_lock_irqsave(&cmd->t_state_lock, flags); +- if (write_pending) +- cmd->t_state = TRANSPORT_WRITE_PENDING; +- + if (remove_from_lists) { + target_remove_from_state_list(cmd); + +@@ -549,6 +542,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, + cmd->se_lun = NULL; + } + ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (write_pending) ++ cmd->t_state = TRANSPORT_WRITE_PENDING; ++ + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. +@@ -603,9 +600,20 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) + + void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) + { ++ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); ++ ++ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) ++ transport_lun_remove_cmd(cmd); ++ /* ++ * Allow the fabric driver to unmap any resources before ++ * releasing the descriptor via TFO->release_cmd() ++ */ ++ if (remove) ++ cmd->se_tfo->aborted_task(cmd); ++ + if (transport_cmd_check_stop_to_fabric(cmd)) + return; +- if (remove) ++ if (remove && ack_kref) + transport_put_cmd(cmd); + } + +@@ -673,7 +681,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) + * Check for case where an explicit ABORT_TASK has been received + * and transport_wait_for_tasks() will be waiting for completion.. + */ +- if (cmd->transport_state & CMD_T_ABORTED && ++ if (cmd->transport_state & CMD_T_ABORTED || + cmd->transport_state & CMD_T_STOP) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + complete_all(&cmd->t_transport_stop_comp); +@@ -1746,19 +1754,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd) + return true; + } + ++static int __transport_check_aborted_status(struct se_cmd *, int); ++ + void target_execute_cmd(struct se_cmd *cmd) + { + /* +- * If the received CDB has aleady been aborted stop processing it here. +- */ +- if (transport_check_aborted_status(cmd, 1)) +- return; +- +- /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. ++ * ++ * If the received CDB has aleady been aborted stop processing it here. + */ + spin_lock_irq(&cmd->t_state_lock); ++ if (__transport_check_aborted_status(cmd, 1)) { ++ spin_unlock_irq(&cmd->t_state_lock); ++ return; ++ } + if (cmd->transport_state & CMD_T_STOP) { + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", + __func__, __LINE__, +@@ -2076,20 +2086,14 @@ static inline void transport_free_pages(struct se_cmd *cmd) + } + + /** +- * transport_release_cmd - free a command +- * @cmd: command to free ++ * transport_put_cmd - release a reference to a command ++ * @cmd: command to release + * +- * This routine unconditionally frees a command, and reference counting +- * or list removal must be done in the caller. ++ * This routine releases our reference to the command and frees it if possible. + */ +-static int transport_release_cmd(struct se_cmd *cmd) ++static int transport_put_cmd(struct se_cmd *cmd) + { + BUG_ON(!cmd->se_tfo); +- +- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) +- core_tmr_release_req(cmd->se_tmr_req); +- if (cmd->t_task_cdb != cmd->__t_task_cdb) +- kfree(cmd->t_task_cdb); + /* + * If this cmd has been setup with target_get_sess_cmd(), drop + * the kref and call ->release_cmd() in kref callback. +@@ -2097,18 +2101,6 @@ static int transport_release_cmd(struct se_cmd *cmd) + return target_put_sess_cmd(cmd->se_sess, cmd); + } + +-/** +- * transport_put_cmd - release a reference to a command +- * @cmd: command to release +- * +- * This routine releases our reference to the command and frees it if possible. +- */ +-static int transport_put_cmd(struct se_cmd *cmd) +-{ +- transport_free_pages(cmd); +- return transport_release_cmd(cmd); +-} +- + void *transport_kmap_data_sg(struct se_cmd *cmd) + { + struct scatterlist *sg = cmd->t_data_sg; +@@ -2296,34 +2288,59 @@ static void transport_write_pending_qf(struct se_cmd *cmd) + } + } + +-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) ++static bool ++__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, ++ unsigned long *flags); ++ ++static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) + { + unsigned long flags; ++ ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++} ++ ++int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) ++{ + int ret = 0; ++ bool aborted = false, tas = false; + + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { + if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) +- transport_wait_for_tasks(cmd); ++ target_wait_free_cmd(cmd, &aborted, &tas); + +- ret = transport_release_cmd(cmd); ++ if (!aborted || tas) ++ ret = transport_put_cmd(cmd); + } else { + if (wait_for_tasks) +- transport_wait_for_tasks(cmd); ++ target_wait_free_cmd(cmd, &aborted, &tas); + /* + * Handle WRITE failure case where transport_generic_new_cmd() + * has already added se_cmd to state_list, but fabric has + * failed command before I/O submission. + */ +- if (cmd->state_active) { +- spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->state_active) + target_remove_from_state_list(cmd); +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); +- } + + if (cmd->se_lun) + transport_lun_remove_cmd(cmd); + +- ret = transport_put_cmd(cmd); ++ if (!aborted || tas) ++ ret = transport_put_cmd(cmd); ++ } ++ /* ++ * If the task has been internally aborted due to TMR ABORT_TASK ++ * or LUN_RESET, target_core_tmr.c is responsible for performing ++ * the remaining calls to target_put_sess_cmd(), and not the ++ * callers of this function. ++ */ ++ if (aborted) { ++ pr_debug("Detected CMD_T_ABORTED for ITT: %u\n", ++ cmd->se_tfo->get_task_tag(cmd)); ++ wait_for_completion(&cmd->cmd_wait_comp); ++ cmd->se_tfo->release_cmd(cmd); ++ ret = 1; + } + return ret; + } +@@ -2366,24 +2383,44 @@ out: + } + EXPORT_SYMBOL(target_get_sess_cmd); + ++static void target_free_cmd_mem(struct se_cmd *cmd) ++{ ++ transport_free_pages(cmd); ++ ++ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) ++ core_tmr_release_req(cmd->se_tmr_req); ++ if (cmd->t_task_cdb != cmd->__t_task_cdb) ++ kfree(cmd->t_task_cdb); ++} ++ + static void target_release_cmd_kref(struct kref *kref) + { + struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); + struct se_session *se_sess = se_cmd->se_sess; ++ bool fabric_stop; + + if (list_empty(&se_cmd->se_cmd_list)) { + spin_unlock(&se_sess->sess_cmd_lock); ++ target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + return; + } +- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { ++ ++ spin_lock(&se_cmd->t_state_lock); ++ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP); ++ spin_unlock(&se_cmd->t_state_lock); ++ ++ if (se_cmd->cmd_wait_set || fabric_stop) { ++ list_del_init(&se_cmd->se_cmd_list); + spin_unlock(&se_sess->sess_cmd_lock); ++ target_free_cmd_mem(se_cmd); + complete(&se_cmd->cmd_wait_comp); + return; + } +- list_del(&se_cmd->se_cmd_list); ++ list_del_init(&se_cmd->se_cmd_list); + spin_unlock(&se_sess->sess_cmd_lock); + ++ target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + } + +@@ -2394,6 +2431,7 @@ static void target_release_cmd_kref(struct kref *kref) + int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) + { + if (!se_sess) { ++ target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + return 1; + } +@@ -2411,6 +2449,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) + { + struct se_cmd *se_cmd; + unsigned long flags; ++ int rc; + + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + if (se_sess->sess_tearing_down) { +@@ -2420,8 +2459,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) + se_sess->sess_tearing_down = 1; + list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); + +- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) +- se_cmd->cmd_wait_set = 1; ++ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) { ++ rc = kref_get_unless_zero(&se_cmd->cmd_kref); ++ if (rc) { ++ se_cmd->cmd_wait_set = 1; ++ spin_lock(&se_cmd->t_state_lock); ++ se_cmd->transport_state |= CMD_T_FABRIC_STOP; ++ spin_unlock(&se_cmd->t_state_lock); ++ } ++ } + + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + } +@@ -2434,15 +2480,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) + { + struct se_cmd *se_cmd, *tmp_cmd; + unsigned long flags; ++ bool tas; + + list_for_each_entry_safe(se_cmd, tmp_cmd, + &se_sess->sess_wait_list, se_cmd_list) { +- list_del(&se_cmd->se_cmd_list); ++ list_del_init(&se_cmd->se_cmd_list); + + pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" + " %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); + ++ spin_lock_irqsave(&se_cmd->t_state_lock, flags); ++ tas = (se_cmd->transport_state & CMD_T_TAS); ++ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); ++ ++ if (!target_put_sess_cmd(se_sess, se_cmd)) { ++ if (tas) ++ target_put_sess_cmd(se_sess, se_cmd); ++ } ++ + wait_for_completion(&se_cmd->cmd_wait_comp); + pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" + " fabric state: %d\n", se_cmd, se_cmd->t_state, +@@ -2485,34 +2541,38 @@ int transport_clear_lun_ref(struct se_lun *lun) + return 0; + } + +-/** +- * transport_wait_for_tasks - wait for completion to occur +- * @cmd: command to wait +- * +- * Called from frontend fabric context to wait for storage engine +- * to pause and/or release frontend generated struct se_cmd. +- */ +-bool transport_wait_for_tasks(struct se_cmd *cmd) ++static bool ++__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, ++ bool *aborted, bool *tas, unsigned long *flags) ++ __releases(&cmd->t_state_lock) ++ __acquires(&cmd->t_state_lock) + { +- unsigned long flags; + +- spin_lock_irqsave(&cmd->t_state_lock, flags); ++ assert_spin_locked(&cmd->t_state_lock); ++ WARN_ON_ONCE(!irqs_disabled()); ++ ++ if (fabric_stop) ++ cmd->transport_state |= CMD_T_FABRIC_STOP; ++ ++ if (cmd->transport_state & CMD_T_ABORTED) ++ *aborted = true; ++ ++ if (cmd->transport_state & CMD_T_TAS) ++ *tas = true; ++ + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && +- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) + return false; +- } + + if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && +- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) + return false; +- } + +- if (!(cmd->transport_state & CMD_T_ACTIVE)) { +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ if (!(cmd->transport_state & CMD_T_ACTIVE)) ++ return false; ++ ++ if (fabric_stop && *aborted) + return false; +- } + + cmd->transport_state |= CMD_T_STOP; + +@@ -2521,20 +2581,37 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) + cmd, cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); + +- spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ spin_unlock_irqrestore(&cmd->t_state_lock, *flags); + + wait_for_completion(&cmd->t_transport_stop_comp); + +- spin_lock_irqsave(&cmd->t_state_lock, flags); ++ spin_lock_irqsave(&cmd->t_state_lock, *flags); + cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); + + pr_debug("wait_for_tasks: Stopped wait_for_completion(" + "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", + cmd->se_tfo->get_task_tag(cmd)); + ++ return true; ++} ++ ++/** ++ * transport_wait_for_tasks - wait for completion to occur ++ * @cmd: command to wait ++ * ++ * Called from frontend fabric context to wait for storage engine ++ * to pause and/or release frontend generated struct se_cmd. ++ */ ++bool transport_wait_for_tasks(struct se_cmd *cmd) ++{ ++ unsigned long flags; ++ bool ret, aborted = false, tas = false; ++ ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + +- return true; ++ return ret; + } + EXPORT_SYMBOL(transport_wait_for_tasks); + +@@ -2820,24 +2897,51 @@ after_reason: + } + EXPORT_SYMBOL(transport_send_check_condition_and_sense); + +-int transport_check_aborted_status(struct se_cmd *cmd, int send_status) ++static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) ++ __releases(&cmd->t_state_lock) ++ __acquires(&cmd->t_state_lock) + { ++ assert_spin_locked(&cmd->t_state_lock); ++ WARN_ON_ONCE(!irqs_disabled()); ++ + if (!(cmd->transport_state & CMD_T_ABORTED)) + return 0; + +- if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) ++ /* ++ * If cmd has been aborted but either no status is to be sent or it has ++ * already been sent, just return ++ */ ++ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) { ++ if (send_status) ++ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; + return 1; ++ } + +- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", +- cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); ++ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:" ++ " 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0], ++ cmd->se_tfo->get_task_tag(cmd)); + +- cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; ++ cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; + cmd->scsi_status = SAM_STAT_TASK_ABORTED; + trace_target_cmd_complete(cmd); ++ ++ spin_unlock_irq(&cmd->t_state_lock); + cmd->se_tfo->queue_status(cmd); ++ spin_lock_irq(&cmd->t_state_lock); + + return 1; + } ++ ++int transport_check_aborted_status(struct se_cmd *cmd, int send_status) ++{ ++ int ret; ++ ++ spin_lock_irq(&cmd->t_state_lock); ++ ret = __transport_check_aborted_status(cmd, send_status); ++ spin_unlock_irq(&cmd->t_state_lock); ++ ++ return ret; ++} + EXPORT_SYMBOL(transport_check_aborted_status); + + void transport_send_task_abort(struct se_cmd *cmd) +@@ -2845,7 +2949,7 @@ void transport_send_task_abort(struct se_cmd *cmd) + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); +- if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { ++ if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } +@@ -2859,11 +2963,17 @@ void transport_send_task_abort(struct se_cmd *cmd) + */ + if (cmd->data_direction == DMA_TO_DEVICE) { + if (cmd->se_tfo->write_pending_status(cmd) != 0) { +- cmd->transport_state |= CMD_T_ABORTED; +- smp_mb__after_atomic_inc(); ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) { ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ goto send_abort; ++ } ++ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } + } ++send_abort: + cmd->scsi_status = SAM_STAT_TASK_ABORTED; + + transport_lun_remove_cmd(cmd); +@@ -2881,8 +2991,17 @@ static void target_tmr_work(struct work_struct *work) + struct se_cmd *cmd = container_of(work, struct se_cmd, work); + struct se_device *dev = cmd->se_dev; + struct se_tmr_req *tmr = cmd->se_tmr_req; ++ unsigned long flags; + int ret; + ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->transport_state & CMD_T_ABORTED) { ++ tmr->response = TMR_FUNCTION_REJECTED; ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ goto check_stop; ++ } ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ + switch (tmr->function) { + case TMR_ABORT_TASK: + core_tmr_abort_task(dev, tmr, cmd->se_sess); +@@ -2910,9 +3029,17 @@ static void target_tmr_work(struct work_struct *work) + break; + } + ++ spin_lock_irqsave(&cmd->t_state_lock, flags); ++ if (cmd->transport_state & CMD_T_ABORTED) { ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ goto check_stop; ++ } + cmd->t_state = TRANSPORT_ISTATE_PROCESSING; ++ spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ + cmd->se_tfo->queue_tm_rsp(cmd); + ++check_stop: + transport_cmd_check_stop_to_fabric(cmd); + } + +diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h +index 752863acecb8..4f4b97161228 100644 +--- a/drivers/target/tcm_fc/tcm_fc.h ++++ b/drivers/target/tcm_fc/tcm_fc.h +@@ -163,6 +163,7 @@ int ft_write_pending_status(struct se_cmd *); + u32 ft_get_task_tag(struct se_cmd *); + int ft_get_cmd_state(struct se_cmd *); + void ft_queue_tm_resp(struct se_cmd *); ++void ft_aborted_task(struct se_cmd *); + + /* + * other internal functions. +diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c +index d22cdc77e9d4..f5fd515b2bee 100644 +--- a/drivers/target/tcm_fc/tfc_cmd.c ++++ b/drivers/target/tcm_fc/tfc_cmd.c +@@ -426,6 +426,11 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd) + ft_send_resp_code(cmd, code); + } + ++void ft_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static void ft_send_work(struct work_struct *work); + + /* +diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c +index e879da81ad93..b8b5a719a784 100644 +--- a/drivers/target/tcm_fc/tfc_conf.c ++++ b/drivers/target/tcm_fc/tfc_conf.c +@@ -536,6 +536,7 @@ static struct target_core_fabric_ops ft_fabric_ops = { + .queue_data_in = ft_queue_data_in, + .queue_status = ft_queue_status, + .queue_tm_rsp = ft_queue_tm_resp, ++ .aborted_task = ft_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c +diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c +index 39bd7ec8bf75..f66a14b6fc73 100644 +--- a/drivers/usb/chipidea/otg.c ++++ b/drivers/usb/chipidea/otg.c +@@ -96,7 +96,7 @@ static void ci_otg_work(struct work_struct *work) + int ci_hdrc_otg_init(struct ci_hdrc *ci) + { + INIT_WORK(&ci->work, ci_otg_work); +- ci->wq = create_singlethread_workqueue("ci_otg"); ++ ci->wq = create_freezable_workqueue("ci_otg"); + if (!ci->wq) { + dev_err(ci->dev, "can't create workqueue\n"); + return -ENODEV; +diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c +index 460c266b8e24..cdec2492ff40 100644 +--- a/drivers/usb/gadget/tcm_usb_gadget.c ++++ b/drivers/usb/gadget/tcm_usb_gadget.c +@@ -1471,6 +1471,11 @@ static void usbg_queue_tm_rsp(struct se_cmd *se_cmd) + { + } + ++static void usbg_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static const char *usbg_check_wwn(const char *name) + { + const char *n; +@@ -1897,6 +1902,7 @@ static struct target_core_fabric_ops usbg_ops = { + .queue_data_in = usbg_send_read_response, + .queue_status = usbg_send_status_response, + .queue_tm_rsp = usbg_queue_tm_rsp, ++ .aborted_task = usbg_aborted_task, + .check_stop_free = usbg_check_stop_free, + + .fabric_make_wwn = usbg_make_tport, +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 21bf168981f9..922723edd6b0 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -164,6 +164,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ + { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ + { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ ++ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 9bab34cf01d4..24366a2afea6 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -271,6 +271,7 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_UE910_V2 0x1012 + #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 + #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 ++#define TELIT_PRODUCT_LE922_USBCFG5 0x1045 + #define TELIT_PRODUCT_LE920 0x1200 + #define TELIT_PRODUCT_LE910 0x1201 + +@@ -1140,6 +1141,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), +@@ -1191,6 +1194,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c +index 6aeea1936aea..8f48f2bf34d4 100644 +--- a/drivers/vhost/scsi.c ++++ b/drivers/vhost/scsi.c +@@ -539,6 +539,11 @@ static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) + return; + } + ++static void tcm_vhost_aborted_task(struct se_cmd *se_cmd) ++{ ++ return; ++} ++ + static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) + { + vs->vs_events_nr--; +@@ -2173,6 +2178,7 @@ static struct target_core_fabric_ops tcm_vhost_ops = { + .queue_data_in = tcm_vhost_queue_data_in, + .queue_status = tcm_vhost_queue_status, + .queue_tm_rsp = tcm_vhost_queue_tm_rsp, ++ .aborted_task = tcm_vhost_aborted_task, + /* + * Setup callers for generic logic in target_core_fabric_configfs.c + */ +diff --git a/fs/bio.c b/fs/bio.c +index b2b1451912b5..6d8cf434bf94 100644 +--- a/fs/bio.c ++++ b/fs/bio.c +@@ -1096,9 +1096,12 @@ int bio_uncopy_user(struct bio *bio) + ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, + bio_data_dir(bio) == READ, + 0, bmd->is_our_pages); +- else if (bmd->is_our_pages) +- bio_for_each_segment_all(bvec, bio, i) +- __free_page(bvec->bv_page); ++ else { ++ ret = -EINTR; ++ if (bmd->is_our_pages) ++ bio_for_each_segment_all(bvec, bio, i) ++ __free_page(bvec->bv_page); ++ } + } + kfree(bmd); + bio_put(bio); +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index f3264bd7a83d..3f709ab0223b 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -1399,11 +1399,10 @@ openRetry: + * current bigbuf. + */ + static int +-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++discard_remaining_data(struct TCP_Server_Info *server) + { + unsigned int rfclen = get_rfc1002_length(server->smallbuf); + int remaining = rfclen + 4 - server->total_read; +- struct cifs_readdata *rdata = mid->callback_data; + + while (remaining > 0) { + int length; +@@ -1417,10 +1416,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) + remaining -= length; + } + +- dequeue_mid(mid, rdata->result); + return 0; + } + ++static int ++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++{ ++ int length; ++ struct cifs_readdata *rdata = mid->callback_data; ++ ++ length = discard_remaining_data(server); ++ dequeue_mid(mid, rdata->result); ++ return length; ++} ++ + int + cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + { +@@ -1449,6 +1458,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + return length; + server->total_read += length; + ++ if (server->ops->is_status_pending && ++ server->ops->is_status_pending(buf, server, 0)) { ++ discard_remaining_data(server); ++ return -1; ++ } ++ + /* Was the SMB read successful? */ + rdata->result = server->ops->map_error(buf, false); + if (rdata->result != 0) { +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 348792911e1f..ae375dff03da 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -1004,21 +1004,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp, + { + char *data_offset; + struct create_context *cc; +- unsigned int next = 0; ++ unsigned int next; ++ unsigned int remaining; + char *name; + + data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset); ++ remaining = le32_to_cpu(rsp->CreateContextsLength); + cc = (struct create_context *)data_offset; +- do { +- cc = (struct create_context *)((char *)cc + next); ++ while (remaining >= sizeof(struct create_context)) { + name = le16_to_cpu(cc->NameOffset) + (char *)cc; +- if (le16_to_cpu(cc->NameLength) != 4 || +- strncmp(name, "RqLs", 4)) { +- next = le32_to_cpu(cc->Next); +- continue; +- } +- return server->ops->parse_lease_buf(cc, epoch); +- } while (next != 0); ++ if (le16_to_cpu(cc->NameLength) == 4 && ++ strncmp(name, "RqLs", 4) == 0) ++ return server->ops->parse_lease_buf(cc, epoch); ++ ++ next = le32_to_cpu(cc->Next); ++ if (!next) ++ break; ++ remaining -= next; ++ cc = (struct create_context *)((char *)cc + next); ++ } + + return 0; + } +diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking +index 3ea36554107f..8918ac905a3b 100644 +--- a/fs/jffs2/README.Locking ++++ b/fs/jffs2/README.Locking +@@ -2,10 +2,6 @@ + JFFS2 LOCKING DOCUMENTATION + --------------------------- + +-At least theoretically, JFFS2 does not require the Big Kernel Lock +-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS +-code. It has its own locking, as described below. +- + This document attempts to describe the existing locking rules for + JFFS2. It is not expected to remain perfectly up to date, but ought to + be fairly close. +@@ -69,6 +65,7 @@ Ordering constraints: + any f->sem held. + 2. Never attempt to lock two file mutexes in one thread. + No ordering rules have been made for doing so. ++ 3. Never lock a page cache page with f->sem held. + + + erase_completion_lock spinlock +diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c +index a3750f902adc..c1f04947d7dc 100644 +--- a/fs/jffs2/build.c ++++ b/fs/jffs2/build.c +@@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) + + + static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, +- struct jffs2_inode_cache *ic) ++ struct jffs2_inode_cache *ic, ++ int *dir_hardlinks) + { + struct jffs2_full_dirent *fd; + +@@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", + fd->name, fd->ino, ic->ino); + jffs2_mark_node_obsolete(c, fd->raw); ++ /* Clear the ic/raw union so it doesn't cause problems later. */ ++ fd->ic = NULL; + continue; + } + ++ /* From this point, fd->raw is no longer used so we can set fd->ic */ ++ fd->ic = child_ic; ++ child_ic->pino_nlink++; ++ /* If we appear (at this stage) to have hard-linked directories, ++ * set a flag to trigger a scan later */ + if (fd->type == DT_DIR) { +- if (child_ic->pino_nlink) { +- JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", +- fd->name, fd->ino, ic->ino); +- /* TODO: What do we do about it? */ +- } else { +- child_ic->pino_nlink = ic->ino; +- } +- } else +- child_ic->pino_nlink++; ++ child_ic->flags |= INO_FLAGS_IS_DIR; ++ if (child_ic->pino_nlink > 1) ++ *dir_hardlinks = 1; ++ } + + dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); + /* Can't free scan_dents so far. We might need them in pass 2 */ +@@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + */ + static int jffs2_build_filesystem(struct jffs2_sb_info *c) + { +- int ret; +- int i; ++ int ret, i, dir_hardlinks = 0; + struct jffs2_inode_cache *ic; + struct jffs2_full_dirent *fd; + struct jffs2_full_dirent *dead_fds = NULL; +@@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + /* Now scan the directory tree, increasing nlink according to every dirent found. */ + for_each_inode(i, c, ic) { + if (ic->scan_dents) { +- jffs2_build_inode_pass1(c, ic); ++ jffs2_build_inode_pass1(c, ic, &dir_hardlinks); + cond_resched(); + } + } +@@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + } + + dbg_fsbuild("pass 2a complete\n"); ++ ++ if (dir_hardlinks) { ++ /* If we detected directory hardlinks earlier, *hopefully* ++ * they are gone now because some of the links were from ++ * dead directories which still had some old dirents lying ++ * around and not yet garbage-collected, but which have ++ * been discarded above. So clear the pino_nlink field ++ * in each directory, so that the final scan below can ++ * print appropriate warnings. */ ++ for_each_inode(i, c, ic) { ++ if (ic->flags & INO_FLAGS_IS_DIR) ++ ic->pino_nlink = 0; ++ } ++ } + dbg_fsbuild("freeing temporary data structures\n"); + + /* Finally, we can scan again and free the dirent structs */ +@@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + while(ic->scan_dents) { + fd = ic->scan_dents; + ic->scan_dents = fd->next; ++ /* We do use the pino_nlink field to count nlink of ++ * directories during fs build, so set it to the ++ * parent ino# now. Now that there's hopefully only ++ * one. */ ++ if (fd->type == DT_DIR) { ++ if (!fd->ic) { ++ /* We'll have complained about it and marked the coresponding ++ raw node obsolete already. Just skip it. */ ++ continue; ++ } ++ ++ /* We *have* to have set this in jffs2_build_inode_pass1() */ ++ BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR)); ++ ++ /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks ++ * is set. Otherwise, we know this should never trigger anyway, so ++ * we don't do the check. And ic->pino_nlink still contains the nlink ++ * value (which is 1). */ ++ if (dir_hardlinks && fd->ic->pino_nlink) { ++ JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n", ++ fd->name, fd->ino, ic->ino, fd->ic->pino_nlink); ++ /* Should we unlink it from its previous parent? */ ++ } ++ ++ /* For directories, ic->pino_nlink holds that parent inode # */ ++ fd->ic->pino_nlink = ic->ino; ++ } + jffs2_free_full_dirent(fd); + } + ic->scan_dents = NULL; +@@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, + + /* Reduce nlink of the child. If it's now zero, stick it on the + dead_fds list to be cleaned up later. Else just free the fd */ +- +- if (fd->type == DT_DIR) +- child_ic->pino_nlink = 0; +- else +- child_ic->pino_nlink--; ++ child_ic->pino_nlink--; + + if (!child_ic->pino_nlink) { + dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", +diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c +index 256cd19a3b78..d2570ae2e998 100644 +--- a/fs/jffs2/file.c ++++ b/fs/jffs2/file.c +@@ -139,39 +139,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + struct page *pg; + struct inode *inode = mapping->host; + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); +- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); +- struct jffs2_raw_inode ri; +- uint32_t alloc_len = 0; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + uint32_t pageofs = index << PAGE_CACHE_SHIFT; + int ret = 0; + +- jffs2_dbg(1, "%s()\n", __func__); +- +- if (pageofs > inode->i_size) { +- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, +- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); +- if (ret) +- return ret; +- } +- +- mutex_lock(&f->sem); + pg = grab_cache_page_write_begin(mapping, index, flags); +- if (!pg) { +- if (alloc_len) +- jffs2_complete_reservation(c); +- mutex_unlock(&f->sem); ++ if (!pg) + return -ENOMEM; +- } + *pagep = pg; + +- if (alloc_len) { ++ jffs2_dbg(1, "%s()\n", __func__); ++ ++ if (pageofs > inode->i_size) { + /* Make new hole frag from old EOF to new page */ ++ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); ++ struct jffs2_raw_inode ri; + struct jffs2_full_dnode *fn; ++ uint32_t alloc_len; + + jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", + (unsigned int)inode->i_size, pageofs); + ++ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, ++ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); ++ if (ret) ++ goto out_page; ++ ++ mutex_lock(&f->sem); + memset(&ri, 0, sizeof(ri)); + + ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); +@@ -198,6 +192,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + if (IS_ERR(fn)) { + ret = PTR_ERR(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + ret = jffs2_add_full_dnode_to_inode(c, f, fn); +@@ -212,10 +207,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + jffs2_mark_node_obsolete(c, fn->raw); + jffs2_free_full_dnode(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + jffs2_complete_reservation(c); + inode->i_size = pageofs; ++ mutex_unlock(&f->sem); + } + + /* +@@ -224,18 +221,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + * case of a short-copy. + */ + if (!PageUptodate(pg)) { ++ mutex_lock(&f->sem); + ret = jffs2_do_readpage_nolock(inode, pg); ++ mutex_unlock(&f->sem); + if (ret) + goto out_page; + } +- mutex_unlock(&f->sem); + jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); + return ret; + + out_page: + unlock_page(pg); + page_cache_release(pg); +- mutex_unlock(&f->sem); + return ret; + } + +diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c +index 5a2dec2b064c..95d5880a63ee 100644 +--- a/fs/jffs2/gc.c ++++ b/fs/jffs2/gc.c +@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era + BUG_ON(start > orig_start); + } + +- /* First, use readpage() to read the appropriate page into the page cache */ +- /* Q: What happens if we actually try to GC the _same_ page for which commit_write() +- * triggered garbage collection in the first place? +- * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the +- * page OK. We'll actually write it out again in commit_write, which is a little +- * suboptimal, but at least we're correct. +- */ ++ /* The rules state that we must obtain the page lock *before* f->sem, so ++ * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's ++ * actually going to *change* so we're safe; we only allow reading. ++ * ++ * It is important to note that jffs2_write_begin() will ensure that its ++ * page is marked Uptodate before allocating space. That means that if we ++ * end up here trying to GC the *same* page that jffs2_write_begin() is ++ * trying to write out, read_cache_page() will not deadlock. */ ++ mutex_unlock(&f->sem); + pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); ++ mutex_lock(&f->sem); + + if (IS_ERR(pg_ptr)) { + pr_warn("read_cache_page() returned error: %ld\n", +diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h +index fa35ff79ab35..0637271f3770 100644 +--- a/fs/jffs2/nodelist.h ++++ b/fs/jffs2/nodelist.h +@@ -194,6 +194,7 @@ struct jffs2_inode_cache { + #define INO_STATE_CLEARING 6 /* In clear_inode() */ + + #define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */ ++#define INO_FLAGS_IS_DIR 0x02 /* is a directory */ + + #define RAWNODE_CLASS_INODE_CACHE 0 + #define RAWNODE_CLASS_XATTR_DATUM 1 +@@ -249,7 +250,10 @@ struct jffs2_readinode_info + + struct jffs2_full_dirent + { +- struct jffs2_raw_node_ref *raw; ++ union { ++ struct jffs2_raw_node_ref *raw; ++ struct jffs2_inode_cache *ic; /* Just during part of build */ ++ }; + struct jffs2_full_dirent *next; + uint32_t version; + uint32_t ino; /* == zero for unlink */ +diff --git a/fs/locks.c b/fs/locks.c +index 2c61c4e9368c..f3197830f07e 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -2007,7 +2007,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, + goto out; + } + +-again: + error = flock_to_posix_lock(filp, file_lock, &flock); + if (error) + goto out; +@@ -2038,19 +2037,22 @@ again: + * Attempt to detect a close/fcntl race and recover by + * releasing the lock that was just acquired. + */ +- /* +- * we need that spin_lock here - it prevents reordering between +- * update of inode->i_flock and check for it done in close(). +- * rcu_read_lock() wouldn't do. +- */ +- spin_lock(¤t->files->file_lock); +- f = fcheck(fd); +- spin_unlock(¤t->files->file_lock); +- if (!error && f != filp && flock.l_type != F_UNLCK) { +- flock.l_type = F_UNLCK; +- goto again; ++ if (!error && file_lock->fl_type != F_UNLCK) { ++ /* ++ * We need that spin_lock here - it prevents reordering between ++ * update of inode->i_flock and check for it done in ++ * close(). rcu_read_lock() wouldn't do. ++ */ ++ spin_lock(¤t->files->file_lock); ++ f = fcheck(fd); ++ spin_unlock(¤t->files->file_lock); ++ if (f != filp) { ++ file_lock->fl_type = F_UNLCK; ++ error = do_lock_file_wait(filp, cmd, file_lock); ++ WARN_ON_ONCE(error); ++ error = -EBADF; ++ } + } +- + out: + locks_free_lock(file_lock); + return error; +@@ -2125,7 +2127,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, + goto out; + } + +-again: + error = flock64_to_posix_lock(filp, file_lock, &flock); + if (error) + goto out; +@@ -2156,14 +2157,22 @@ again: + * Attempt to detect a close/fcntl race and recover by + * releasing the lock that was just acquired. + */ +- spin_lock(¤t->files->file_lock); +- f = fcheck(fd); +- spin_unlock(¤t->files->file_lock); +- if (!error && f != filp && flock.l_type != F_UNLCK) { +- flock.l_type = F_UNLCK; +- goto again; ++ if (!error && file_lock->fl_type != F_UNLCK) { ++ /* ++ * We need that spin_lock here - it prevents reordering between ++ * update of inode->i_flock and check for it done in ++ * close(). rcu_read_lock() wouldn't do. ++ */ ++ spin_lock(¤t->files->file_lock); ++ f = fcheck(fd); ++ spin_unlock(¤t->files->file_lock); ++ if (f != filp) { ++ file_lock->fl_type = F_UNLCK; ++ error = do_lock_file_wait(filp, cmd, file_lock); ++ WARN_ON_ONCE(error); ++ error = -EBADF; ++ } + } +- + out: + locks_free_lock(file_lock); + return error; +diff --git a/include/linux/ata.h b/include/linux/ata.h +index f2f4d8da97c0..f7ff6554a354 100644 +--- a/include/linux/ata.h ++++ b/include/linux/ata.h +@@ -484,8 +484,8 @@ enum ata_tf_protocols { + }; + + enum ata_ioctls { +- ATA_IOC_GET_IO32 = 0x309, +- ATA_IOC_SET_IO32 = 0x324, ++ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */ ++ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */ + }; + + /* core structures */ +diff --git a/include/linux/libata.h b/include/linux/libata.h +index 189c9ff97b29..a445209be917 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -712,7 +712,7 @@ struct ata_device { + union { + u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ + u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ +- }; ++ } ____cacheline_aligned; + + /* DEVSLP Timing Variables from Identify Device Data Log */ + u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; +diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h +index d1fb912740f3..0d7a62fea895 100644 +--- a/include/target/iscsi/iscsi_transport.h ++++ b/include/target/iscsi/iscsi_transport.h +@@ -21,6 +21,7 @@ struct iscsit_transport { + int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool); + int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *); + int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *); ++ void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *); + }; + + static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd) +diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h +index f5915b39386a..522ae25a61a7 100644 +--- a/include/target/target_core_backend.h ++++ b/include/target/target_core_backend.h +@@ -94,4 +94,8 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, + + void array_free(void *array, int n); + ++sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); ++bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, ++ struct request_queue *q, int block_size); ++ + #endif /* TARGET_CORE_BACKEND_H */ +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 42606764d830..3beb2fed86aa 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -162,7 +162,7 @@ enum se_cmd_flags_table { + SCF_SENT_CHECK_CONDITION = 0x00000800, + SCF_OVERFLOW_BIT = 0x00001000, + SCF_UNDERFLOW_BIT = 0x00002000, +- SCF_SENT_DELAYED_TAS = 0x00004000, ++ SCF_SEND_DELAYED_TAS = 0x00004000, + SCF_ALUA_NON_OPTIMIZED = 0x00008000, + SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, + SCF_ACK_KREF = 0x00040000, +@@ -528,6 +528,8 @@ struct se_cmd { + #define CMD_T_DEV_ACTIVE (1 << 7) + #define CMD_T_REQUEST_STOP (1 << 8) + #define CMD_T_BUSY (1 << 9) ++#define CMD_T_TAS (1 << 10) ++#define CMD_T_FABRIC_STOP (1 << 11) + spinlock_t t_state_lock; + struct completion t_transport_stop_comp; + +diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h +index 0218d689b3d7..1d1043644b9b 100644 +--- a/include/target/target_core_fabric.h ++++ b/include/target/target_core_fabric.h +@@ -62,6 +62,7 @@ struct target_core_fabric_ops { + int (*queue_data_in)(struct se_cmd *); + int (*queue_status)(struct se_cmd *); + void (*queue_tm_rsp)(struct se_cmd *); ++ void (*aborted_task)(struct se_cmd *); + /* + * fabric module calls for target_core_fabric_configfs.c + */ +diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c +index 2bb95a7a8809..c14565bde887 100644 +--- a/sound/core/control_compat.c ++++ b/sound/core/control_compat.c +@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 { + unsigned char reserved[128]; + }; + ++#ifdef CONFIG_X86_X32 ++/* x32 has a different alignment for 64bit values from ia32 */ ++struct snd_ctl_elem_value_x32 { ++ struct snd_ctl_elem_id id; ++ unsigned int indirect; /* bit-field causes misalignment */ ++ union { ++ s32 integer[128]; ++ unsigned char data[512]; ++ s64 integer64[64]; ++ } value; ++ unsigned char reserved[128]; ++}; ++#endif /* CONFIG_X86_X32 */ + + /* get the value type and count of the control */ + static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id, +@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count) + + static int copy_ctl_value_from_user(struct snd_card *card, + struct snd_ctl_elem_value *data, +- struct snd_ctl_elem_value32 __user *data32, ++ void __user *userdata, ++ void __user *valuep, + int *typep, int *countp) + { ++ struct snd_ctl_elem_value32 __user *data32 = userdata; + int i, type, size; + int uninitialized_var(count); + unsigned int indirect; +@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; +- if (get_user(val, &data32->value.integer[i])) ++ if (get_user(val, &intp[i])) + return -EFAULT; + data->value.integer.value[i] = val; + } +@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card, + printk(KERN_ERR "snd_ioctl32_ctl_elem_value: unknown type %d\n", type); + return -EINVAL; + } +- if (copy_from_user(data->value.bytes.data, +- data32->value.data, size)) ++ if (copy_from_user(data->value.bytes.data, valuep, size)) + return -EFAULT; + } + +@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card, + } + + /* restore the value to 32bit */ +-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, ++static int copy_ctl_value_to_user(void __user *userdata, ++ void __user *valuep, + struct snd_ctl_elem_value *data, + int type, int count) + { +@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; + val = data->value.integer.value[i]; +- if (put_user(val, &data32->value.integer[i])) ++ if (put_user(val, &intp[i])) + return -EFAULT; + } + } else { + size = get_elem_size(type, count); +- if (copy_to_user(data32->value.data, +- data->value.bytes.data, size)) ++ if (copy_to_user(valuep, data->value.bytes.data, size)) + return -EFAULT; + } + return 0; + } + +-static int snd_ctl_elem_read_user_compat(struct snd_card *card, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_read_user(struct snd_card *card, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + int err, type, count; +@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + err = snd_ctl_elem_read(card, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + +-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_write_user(struct snd_ctl_file *file, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + struct snd_card *card = file->card; +@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + err = snd_ctl_elem_write(card, file, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + ++static int snd_ctl_elem_read_user_compat(struct snd_card *card, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++ ++#ifdef CONFIG_X86_X32 ++static int snd_ctl_elem_read_user_x32(struct snd_card *card, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++#endif /* CONFIG_X86_X32 */ ++ + /* add or replace a user control */ + static int snd_ctl_elem_add_compat(struct snd_ctl_file *file, + struct snd_ctl_elem_info32 __user *data32, +@@ -393,6 +441,10 @@ enum { + SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32), + SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32), + SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32), ++ SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_ctl_elem_add_compat(ctl, argp, 0); + case SNDRV_CTL_IOCTL_ELEM_REPLACE32: + return snd_ctl_elem_add_compat(ctl, argp, 1); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_CTL_IOCTL_ELEM_READ_X32: ++ return snd_ctl_elem_read_user_x32(ctl->card, argp); ++ case SNDRV_CTL_IOCTL_ELEM_WRITE_X32: ++ return snd_ctl_elem_write_user_x32(ctl, argp); ++#endif /* CONFIG_X86_X32 */ + } + + down_read(&snd_ioctl_rwsem); +diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c +index 5268c1f58c25..09a89094dcf7 100644 +--- a/sound/core/rawmidi_compat.c ++++ b/sound/core/rawmidi_compat.c +@@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has 64bit timespec and 64bit alignment */ ++struct snd_rawmidi_status_x32 { ++ s32 stream; ++ u32 rsvd; /* alignment */ ++ struct timespec tstamp; ++ u32 avail; ++ u32 xruns; ++ unsigned char reserved[16]; ++} __attribute__((packed)); ++ ++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst)) ++ ++static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile, ++ struct snd_rawmidi_status_x32 __user *src) ++{ ++ int err; ++ struct snd_rawmidi_status status; ++ ++ if (rfile->output == NULL) ++ return -EINVAL; ++ if (get_user(status.stream, &src->stream)) ++ return -EFAULT; ++ ++ switch (status.stream) { ++ case SNDRV_RAWMIDI_STREAM_OUTPUT: ++ err = snd_rawmidi_output_status(rfile->output, &status); ++ break; ++ case SNDRV_RAWMIDI_STREAM_INPUT: ++ err = snd_rawmidi_input_status(rfile->input, &status); ++ break; ++ default: ++ return -EINVAL; ++ } ++ if (err < 0) ++ return err; ++ ++ if (put_timespec(&status.tstamp, &src->tstamp) || ++ put_user(status.avail, &src->avail) || ++ put_user(status.xruns, &src->xruns)) ++ return -EFAULT; ++ ++ return 0; ++} ++#endif /* CONFIG_X86_X32 */ ++ + enum { + SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32), + SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign + return snd_rawmidi_ioctl_params_compat(rfile, argp); + case SNDRV_RAWMIDI_IOCTL_STATUS32: + return snd_rawmidi_ioctl_status_compat(rfile, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_RAWMIDI_IOCTL_STATUS_X32: ++ return snd_rawmidi_ioctl_status_x32(rfile, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c +index 8d4d5e853efe..ab774954c985 100644 +--- a/sound/core/seq/oss/seq_oss.c ++++ b/sound/core/seq/oss/seq_oss.c +@@ -150,8 +150,6 @@ odev_release(struct inode *inode, struct file *file) + if ((dp = file->private_data) == NULL) + return 0; + +- snd_seq_oss_drain_write(dp); +- + mutex_lock(®ister_mutex); + snd_seq_oss_release(dp); + mutex_unlock(®ister_mutex); +diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h +index c0154a959d55..2464112b08ad 100644 +--- a/sound/core/seq/oss/seq_oss_device.h ++++ b/sound/core/seq/oss/seq_oss_device.h +@@ -131,7 +131,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co + unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait); + + void snd_seq_oss_reset(struct seq_oss_devinfo *dp); +-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp); + + /* */ + void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time); +diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c +index b3f39b5ed742..f9e09e458227 100644 +--- a/sound/core/seq/oss/seq_oss_init.c ++++ b/sound/core/seq/oss/seq_oss_init.c +@@ -457,23 +457,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp) + + + /* +- * Wait until the queue is empty (if we don't have nonblock) +- */ +-void +-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp) +-{ +- if (! dp->timer->running) +- return; +- if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) && +- dp->writeq) { +- debug_printk(("syncing..\n")); +- while (snd_seq_oss_writeq_sync(dp->writeq)) +- ; +- } +-} +- +- +-/* + * reset sequencer devices + */ + void +diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c +index e05802ae6e1b..2e908225d754 100644 +--- a/sound/core/timer_compat.c ++++ b/sound/core/timer_compat.c +@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file, + struct snd_timer_status32 __user *_status) + { + struct snd_timer_user *tu; +- struct snd_timer_status status; ++ struct snd_timer_status32 status; + + tu = file->private_data; + if (snd_BUG_ON(!tu->timeri)) + return -ENXIO; + memset(&status, 0, sizeof(status)); +- status.tstamp = tu->tstamp; ++ status.tstamp.tv_sec = tu->tstamp.tv_sec; ++ status.tstamp.tv_nsec = tu->tstamp.tv_nsec; + status.resolution = snd_timer_resolution(tu->timeri); + status.lost = tu->timeri->lost; + status.overrun = tu->overrun; +@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has the same struct as x86-64 */ ++#define snd_timer_user_status_x32(file, s) \ ++ snd_timer_user_status(file, s) ++#endif /* CONFIG_X86_X32 */ ++ + /* + */ + + enum { + SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32), + SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_timer_user_info_compat(file, argp); + case SNDRV_TIMER_IOCTL_STATUS32: + return snd_timer_user_status_compat(file, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_TIMER_IOCTL_STATUS_X32: ++ return snd_timer_user_status_x32(file, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c +index bd90c80bb494..4bc0d66377d6 100644 +--- a/sound/pci/rme9652/hdsp.c ++++ b/sound/pci/rme9652/hdsp.c +@@ -2917,7 +2917,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + { + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp); ++ ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp); + return 0; + } + +@@ -2929,7 +2929,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + + if (!snd_hdsp_use_is_exclusive(hdsp)) + return -EBUSY; +- val = ucontrol->value.enumerated.item[0]; ++ val = ucontrol->value.integer.value[0]; + spin_lock_irq(&hdsp->lock); + if (val != hdsp_dds_offset(hdsp)) + change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0; +diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c +index 21167503a3f9..dec173081798 100644 +--- a/sound/pci/rme9652/hdspm.c ++++ b/sound/pci/rme9652/hdspm.c +@@ -1602,6 +1602,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate) + { + u64 n; + ++ if (snd_BUG_ON(rate <= 0)) ++ return; ++ + if (rate >= 112000) + rate /= 4; + else if (rate >= 56000) +@@ -2224,6 +2227,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm) + } else { + /* slave mode, return external sample rate */ + rate = hdspm_external_sample_rate(hdspm); ++ if (!rate) ++ rate = hdspm->system_sample_rate; + } + } + +@@ -2269,8 +2274,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol, + ucontrol) + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); ++ int rate = ucontrol->value.integer.value[0]; + +- hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]); ++ if (rate < 27000 || rate > 207000) ++ return -EINVAL; ++ hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]); + return 0; + } + +@@ -4469,7 +4477,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdspm->tco->term; ++ ucontrol->value.integer.value[0] = hdspm->tco->term; + + return 0; + } +@@ -4480,8 +4488,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) { +- hdspm->tco->term = ucontrol->value.enumerated.item[0]; ++ if (hdspm->tco->term != ucontrol->value.integer.value[0]) { ++ hdspm->tco->term = ucontrol->value.integer.value[0]; + + hdspm_tco_write(hdspm); + diff --git a/patch/kernel/udoo-default/patch-3.14.64-65.patch b/patch/kernel/udoo-default/patch-3.14.64-65.patch new file mode 100644 index 000000000..73548c75e --- /dev/null +++ b/patch/kernel/udoo-default/patch-3.14.64-65.patch @@ -0,0 +1,1518 @@ +diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt +index c477af086e65..686a64bba775 100644 +--- a/Documentation/filesystems/efivarfs.txt ++++ b/Documentation/filesystems/efivarfs.txt +@@ -14,3 +14,10 @@ filesystem. + efivarfs is typically mounted like this, + + mount -t efivarfs none /sys/firmware/efi/efivars ++ ++Due to the presence of numerous firmware bugs where removing non-standard ++UEFI variables causes the system firmware to fail to POST, efivarfs ++files that are not well-known standardized variables are created ++as immutable files. This doesn't prevent removal - "chattr -i" will work - ++but it does prevent this kind of failure from being accomplished ++accidentally. +diff --git a/Makefile b/Makefile +index de41fa82652f..a19c22a77728 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 64 ++SUBLEVEL = 65 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c +index 12664c130d73..78a859ec4946 100644 +--- a/arch/powerpc/kernel/module_64.c ++++ b/arch/powerpc/kernel/module_64.c +@@ -202,7 +202,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) + if (syms[i].st_shndx == SHN_UNDEF) { + char *name = strtab + syms[i].st_name; + if (name[0] == '.') +- memmove(name, name+1, strlen(name)); ++ syms[i].st_name++; + } + } + } +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +index b3bad672e5d9..87fa70f8472a 100644 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +@@ -1148,6 +1148,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) + std r6, VCPU_ACOP(r9) + stw r7, VCPU_GUEST_PID(r9) + std r8, VCPU_WORT(r9) ++ /* ++ * Restore various registers to 0, where non-zero values ++ * set by the guest could disrupt the host. ++ */ ++ li r0, 0 ++ mtspr SPRN_IAMR, r0 ++ mtspr SPRN_CIABR, r0 ++ mtspr SPRN_DAWRX, r0 ++ mtspr SPRN_TCSCR, r0 ++ mtspr SPRN_WORT, r0 ++ /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ ++ li r0, 1 ++ sldi r0, r0, 31 ++ mtspr SPRN_MMCRS, r0 + 8: + + /* Save and reset AMR and UAMOR before turning on the MMU */ +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 80c22a3ca688..b6fc5fc64cef 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1555,6 +1555,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, + return; + } + break; ++ case MSR_IA32_PEBS_ENABLE: ++ /* PEBS needs a quiescent period after being disabled (to write ++ * a record). Disabling PEBS through VMX MSR swapping doesn't ++ * provide that period, so a CPU could write host's record into ++ * guest's memory. ++ */ ++ wrmsrl(MSR_IA32_PEBS_ENABLE, 0); + } + + for (i = 0; i < m->nr; ++i) +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 1777f89875fb..8db66424be97 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1991,6 +1991,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu) + + static void record_steal_time(struct kvm_vcpu *vcpu) + { ++ accumulate_steal_time(vcpu); ++ + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) + return; + +@@ -2123,12 +2125,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + if (!(data & KVM_MSR_ENABLED)) + break; + +- vcpu->arch.st.last_steal = current->sched_info.run_delay; +- +- preempt_disable(); +- accumulate_steal_time(vcpu); +- preempt_enable(); +- + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + + break; +@@ -2818,7 +2814,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + vcpu->cpu = cpu; + } + +- accumulate_steal_time(vcpu); + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + } + +diff --git a/crypto/af_alg.c b/crypto/af_alg.c +index 1971f3ccb09a..eeb2fb239934 100644 +--- a/crypto/af_alg.c ++++ b/crypto/af_alg.c +@@ -125,23 +125,6 @@ int af_alg_release(struct socket *sock) + } + EXPORT_SYMBOL_GPL(af_alg_release); + +-void af_alg_release_parent(struct sock *sk) +-{ +- struct alg_sock *ask = alg_sk(sk); +- bool last; +- +- sk = ask->parent; +- ask = alg_sk(sk); +- +- lock_sock(sk); +- last = !--ask->refcnt; +- release_sock(sk); +- +- if (last) +- sock_put(sk); +-} +-EXPORT_SYMBOL_GPL(af_alg_release_parent); +- + static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + { + struct sock *sk = sock->sk; +@@ -149,7 +132,6 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + struct sockaddr_alg *sa = (void *)uaddr; + const struct af_alg_type *type; + void *private; +- int err; + + if (sock->state == SS_CONNECTED) + return -EINVAL; +@@ -175,22 +157,16 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + return PTR_ERR(private); + } + +- err = -EBUSY; + lock_sock(sk); +- if (ask->refcnt) +- goto unlock; + + swap(ask->type, type); + swap(ask->private, private); + +- err = 0; +- +-unlock: + release_sock(sk); + + alg_do_release(type, private); + +- return err; ++ return 0; + } + + static int alg_setkey(struct sock *sk, char __user *ukey, +@@ -223,15 +199,11 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type; +- int err = -EBUSY; ++ int err = -ENOPROTOOPT; + + lock_sock(sk); +- if (ask->refcnt) +- goto unlock; +- + type = ask->type; + +- err = -ENOPROTOOPT; + if (level != SOL_ALG || !type) + goto unlock; + +@@ -280,8 +252,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) + + sk2->sk_family = PF_ALG; + +- if (!ask->refcnt++) +- sock_hold(sk); ++ sock_hold(sk); + alg_sk(sk2)->parent = sk; + alg_sk(sk2)->type = type; + +diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c +index 3dc248239197..f04ce007bf7f 100644 +--- a/drivers/firmware/efi/efivars.c ++++ b/drivers/firmware/efi/efivars.c +@@ -219,7 +219,8 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) + } + + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || +- efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) { ++ efivar_validate(new_var->VendorGuid, new_var->VariableName, ++ new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } +@@ -334,7 +335,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, + return -EACCES; + + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || +- efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) { ++ efivar_validate(new_var->VendorGuid, new_var->VariableName, ++ new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } +@@ -409,35 +411,27 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) + { + int i, short_name_size; + char *short_name; +- unsigned long variable_name_size; +- efi_char16_t *variable_name; +- +- variable_name = new_var->var.VariableName; +- variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t); ++ unsigned long utf8_name_size; ++ efi_char16_t *variable_name = new_var->var.VariableName; + + /* +- * Length of the variable bytes in ASCII, plus the '-' separator, ++ * Length of the variable bytes in UTF8, plus the '-' separator, + * plus the GUID, plus trailing NUL + */ +- short_name_size = variable_name_size / sizeof(efi_char16_t) +- + 1 + EFI_VARIABLE_GUID_LEN + 1; +- +- short_name = kzalloc(short_name_size, GFP_KERNEL); ++ utf8_name_size = ucs2_utf8size(variable_name); ++ short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1; + ++ short_name = kmalloc(short_name_size, GFP_KERNEL); + if (!short_name) + return 1; + +- /* Convert Unicode to normal chars (assume top bits are 0), +- ala UTF-8 */ +- for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) { +- short_name[i] = variable_name[i] & 0xFF; +- } ++ ucs2_as_utf8(short_name, variable_name, short_name_size); ++ + /* This is ugly, but necessary to separate one vendor's + private variables from another's. */ +- +- *(short_name + strlen(short_name)) = '-'; ++ short_name[utf8_name_size] = '-'; + efi_guid_unparse(&new_var->var.VendorGuid, +- short_name + strlen(short_name)); ++ short_name + utf8_name_size + 1); + + new_var->kobj.kset = efivars_kset; + +diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c +index e6125522860a..4e2f46938bf0 100644 +--- a/drivers/firmware/efi/vars.c ++++ b/drivers/firmware/efi/vars.c +@@ -42,7 +42,7 @@ DECLARE_WORK(efivar_work, NULL); + EXPORT_SYMBOL_GPL(efivar_work); + + static bool +-validate_device_path(struct efi_variable *var, int match, u8 *buffer, ++validate_device_path(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + struct efi_generic_dev_path *node; +@@ -75,7 +75,7 @@ validate_device_path(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_boot_order(struct efi_variable *var, int match, u8 *buffer, ++validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + /* An array of 16-bit integers */ +@@ -86,18 +86,18 @@ validate_boot_order(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_load_option(struct efi_variable *var, int match, u8 *buffer, ++validate_load_option(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + u16 filepathlength; + int i, desclength = 0, namelen; + +- namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName)); ++ namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN); + + /* Either "Boot" or "Driver" followed by four digits of hex */ + for (i = match; i < match+4; i++) { +- if (var->VariableName[i] > 127 || +- hex_to_bin(var->VariableName[i] & 0xff) < 0) ++ if (var_name[i] > 127 || ++ hex_to_bin(var_name[i] & 0xff) < 0) + return true; + } + +@@ -132,12 +132,12 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, + /* + * And, finally, check the filepath + */ +- return validate_device_path(var, match, buffer + desclength + 6, ++ return validate_device_path(var_name, match, buffer + desclength + 6, + filepathlength); + } + + static bool +-validate_uint16(struct efi_variable *var, int match, u8 *buffer, ++validate_uint16(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + /* A single 16-bit integer */ +@@ -148,7 +148,7 @@ validate_uint16(struct efi_variable *var, int match, u8 *buffer, + } + + static bool +-validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, ++validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer, + unsigned long len) + { + int i; +@@ -165,67 +165,133 @@ validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, + } + + struct variable_validate { ++ efi_guid_t vendor; + char *name; +- bool (*validate)(struct efi_variable *var, int match, u8 *data, ++ bool (*validate)(efi_char16_t *var_name, int match, u8 *data, + unsigned long len); + }; + ++/* ++ * This is the list of variables we need to validate, as well as the ++ * whitelist for what we think is safe not to default to immutable. ++ * ++ * If it has a validate() method that's not NULL, it'll go into the ++ * validation routine. If not, it is assumed valid, but still used for ++ * whitelisting. ++ * ++ * Note that it's sorted by {vendor,name}, but globbed names must come after ++ * any other name with the same prefix. ++ */ + static const struct variable_validate variable_validate[] = { +- { "BootNext", validate_uint16 }, +- { "BootOrder", validate_boot_order }, +- { "DriverOrder", validate_boot_order }, +- { "Boot*", validate_load_option }, +- { "Driver*", validate_load_option }, +- { "ConIn", validate_device_path }, +- { "ConInDev", validate_device_path }, +- { "ConOut", validate_device_path }, +- { "ConOutDev", validate_device_path }, +- { "ErrOut", validate_device_path }, +- { "ErrOutDev", validate_device_path }, +- { "Timeout", validate_uint16 }, +- { "Lang", validate_ascii_string }, +- { "PlatformLang", validate_ascii_string }, +- { "", NULL }, ++ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 }, ++ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option }, ++ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string }, ++ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL }, ++ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string }, ++ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 }, ++ { LINUX_EFI_CRASH_GUID, "*", NULL }, ++ { NULL_GUID, "", NULL }, + }; + ++static bool ++variable_matches(const char *var_name, size_t len, const char *match_name, ++ int *match) ++{ ++ for (*match = 0; ; (*match)++) { ++ char c = match_name[*match]; ++ char u = var_name[*match]; ++ ++ /* Wildcard in the matching name means we've matched */ ++ if (c == '*') ++ return true; ++ ++ /* Case sensitive match */ ++ if (!c && *match == len) ++ return true; ++ ++ if (c != u) ++ return false; ++ ++ if (!c) ++ return true; ++ } ++ return true; ++} ++ + bool +-efivar_validate(struct efi_variable *var, u8 *data, unsigned long len) ++efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, ++ unsigned long data_size) + { + int i; +- u16 *unicode_name = var->VariableName; ++ unsigned long utf8_size; ++ u8 *utf8_name; + +- for (i = 0; variable_validate[i].validate != NULL; i++) { +- const char *name = variable_validate[i].name; +- int match; ++ utf8_size = ucs2_utf8size(var_name); ++ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL); ++ if (!utf8_name) ++ return false; + +- for (match = 0; ; match++) { +- char c = name[match]; +- u16 u = unicode_name[match]; ++ ucs2_as_utf8(utf8_name, var_name, utf8_size); ++ utf8_name[utf8_size] = '\0'; + +- /* All special variables are plain ascii */ +- if (u > 127) +- return true; ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { ++ const char *name = variable_validate[i].name; ++ int match = 0; + +- /* Wildcard in the matching name means we've matched */ +- if (c == '*') +- return variable_validate[i].validate(var, +- match, data, len); ++ if (efi_guidcmp(vendor, variable_validate[i].vendor)) ++ continue; + +- /* Case sensitive match */ +- if (c != u) ++ if (variable_matches(utf8_name, utf8_size+1, name, &match)) { ++ if (variable_validate[i].validate == NULL) + break; +- +- /* Reached the end of the string while matching */ +- if (!c) +- return variable_validate[i].validate(var, +- match, data, len); ++ kfree(utf8_name); ++ return variable_validate[i].validate(var_name, match, ++ data, data_size); + } + } +- ++ kfree(utf8_name); + return true; + } + EXPORT_SYMBOL_GPL(efivar_validate); + ++bool ++efivar_variable_is_removable(efi_guid_t vendor, const char *var_name, ++ size_t len) ++{ ++ int i; ++ bool found = false; ++ int match = 0; ++ ++ /* ++ * Check if our variable is in the validated variables list ++ */ ++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { ++ if (efi_guidcmp(variable_validate[i].vendor, vendor)) ++ continue; ++ ++ if (variable_matches(var_name, len, ++ variable_validate[i].name, &match)) { ++ found = true; ++ break; ++ } ++ } ++ ++ /* ++ * If it's in our list, it is removable. ++ */ ++ return found; ++} ++EXPORT_SYMBOL_GPL(efivar_variable_is_removable); ++ + static efi_status_t + check_var_size(u32 attributes, unsigned long size) + { +@@ -805,7 +871,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, + + *set = false; + +- if (efivar_validate(&entry->var, data, *size) == false) ++ if (efivar_validate(*vendor, name, data, *size) == false) + return -EINVAL; + + /* +diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c +index 8dd524f32284..08f105a06fbf 100644 +--- a/fs/efivarfs/file.c ++++ b/fs/efivarfs/file.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include "internal.h" + +@@ -108,9 +109,79 @@ out_free: + return size; + } + ++static int ++efivarfs_ioc_getxflags(struct file *file, void __user *arg) ++{ ++ struct inode *inode = file->f_mapping->host; ++ unsigned int i_flags; ++ unsigned int flags = 0; ++ ++ i_flags = inode->i_flags; ++ if (i_flags & S_IMMUTABLE) ++ flags |= FS_IMMUTABLE_FL; ++ ++ if (copy_to_user(arg, &flags, sizeof(flags))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ++efivarfs_ioc_setxflags(struct file *file, void __user *arg) ++{ ++ struct inode *inode = file->f_mapping->host; ++ unsigned int flags; ++ unsigned int i_flags = 0; ++ int error; ++ ++ if (!inode_owner_or_capable(inode)) ++ return -EACCES; ++ ++ if (copy_from_user(&flags, arg, sizeof(flags))) ++ return -EFAULT; ++ ++ if (flags & ~FS_IMMUTABLE_FL) ++ return -EOPNOTSUPP; ++ ++ if (!capable(CAP_LINUX_IMMUTABLE)) ++ return -EPERM; ++ ++ if (flags & FS_IMMUTABLE_FL) ++ i_flags |= S_IMMUTABLE; ++ ++ ++ error = mnt_want_write_file(file); ++ if (error) ++ return error; ++ ++ mutex_lock(&inode->i_mutex); ++ inode->i_flags &= ~S_IMMUTABLE; ++ inode->i_flags |= i_flags; ++ mutex_unlock(&inode->i_mutex); ++ ++ mnt_drop_write_file(file); ++ ++ return 0; ++} ++ ++long ++efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p) ++{ ++ void __user *arg = (void __user *)p; ++ ++ switch (cmd) { ++ case FS_IOC_GETFLAGS: ++ return efivarfs_ioc_getxflags(file, arg); ++ case FS_IOC_SETFLAGS: ++ return efivarfs_ioc_setxflags(file, arg); ++ } ++ ++ return -ENOTTY; ++} ++ + const struct file_operations efivarfs_file_operations = { + .open = simple_open, + .read = efivarfs_file_read, + .write = efivarfs_file_write, + .llseek = no_llseek, ++ .unlocked_ioctl = efivarfs_file_ioctl, + }; +diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c +index 07ab49745e31..7e7318f10575 100644 +--- a/fs/efivarfs/inode.c ++++ b/fs/efivarfs/inode.c +@@ -15,7 +15,8 @@ + #include "internal.h" + + struct inode *efivarfs_get_inode(struct super_block *sb, +- const struct inode *dir, int mode, dev_t dev) ++ const struct inode *dir, int mode, ++ dev_t dev, bool is_removable) + { + struct inode *inode = new_inode(sb); + +@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb, + inode->i_ino = get_next_ino(); + inode->i_mode = mode; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; ++ inode->i_flags = is_removable ? 0 : S_IMMUTABLE; + switch (mode & S_IFMT) { + case S_IFREG: + inode->i_fop = &efivarfs_file_operations; +@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid) + static int efivarfs_create(struct inode *dir, struct dentry *dentry, + umode_t mode, bool excl) + { +- struct inode *inode; ++ struct inode *inode = NULL; + struct efivar_entry *var; + int namelen, i = 0, err = 0; ++ bool is_removable = false; + + if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len)) + return -EINVAL; + +- inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0); +- if (!inode) +- return -ENOMEM; +- + var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL); +- if (!var) { +- err = -ENOMEM; +- goto out; +- } ++ if (!var) ++ return -ENOMEM; + + /* length of the variable name itself: remove GUID and separator */ + namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1; +@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, + efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1, + &var->var.VendorGuid); + ++ if (efivar_variable_is_removable(var->var.VendorGuid, ++ dentry->d_name.name, namelen)) ++ is_removable = true; ++ ++ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable); ++ if (!inode) { ++ err = -ENOMEM; ++ goto out; ++ } ++ + for (i = 0; i < namelen; i++) + var->var.VariableName[i] = dentry->d_name.name[i]; + +@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, + out: + if (err) { + kfree(var); +- iput(inode); ++ if (inode) ++ iput(inode); + } + return err; + } +diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h +index b5ff16addb7c..b4505188e799 100644 +--- a/fs/efivarfs/internal.h ++++ b/fs/efivarfs/internal.h +@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations; + extern const struct inode_operations efivarfs_dir_inode_operations; + extern bool efivarfs_valid_name(const char *str, int len); + extern struct inode *efivarfs_get_inode(struct super_block *sb, +- const struct inode *dir, int mode, dev_t dev); ++ const struct inode *dir, int mode, dev_t dev, ++ bool is_removable); + + extern struct list_head efivarfs_list; + +diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c +index becc725a1953..a3a30f84f917 100644 +--- a/fs/efivarfs/super.c ++++ b/fs/efivarfs/super.c +@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + struct dentry *dentry, *root = sb->s_root; + unsigned long size = 0; + char *name; +- int len, i; ++ int len; + int err = -ENOMEM; ++ bool is_removable = false; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) +@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + memcpy(entry->var.VariableName, name16, name_size); + memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); + +- len = ucs2_strlen(entry->var.VariableName); ++ len = ucs2_utf8size(entry->var.VariableName); + + /* name, plus '-', plus GUID, plus NUL*/ + name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); + if (!name) + goto fail; + +- for (i = 0; i < len; i++) +- name[i] = entry->var.VariableName[i] & 0xFF; ++ ucs2_as_utf8(name, entry->var.VariableName, len); ++ ++ if (efivar_variable_is_removable(entry->var.VendorGuid, name, len)) ++ is_removable = true; + + name[len] = '-'; + +@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + + name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; + +- inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0); ++ inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0, ++ is_removable); + if (!inode) + goto fail_name; + +@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) + sb->s_d_op = &efivarfs_d_ops; + sb->s_time_gran = 1; + +- inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0); ++ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true); + if (!inode) + return -ENOMEM; + inode->i_op = &efivarfs_dir_inode_operations; +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h +index 2f38daaab3d7..d61c11170213 100644 +--- a/include/crypto/if_alg.h ++++ b/include/crypto/if_alg.h +@@ -30,8 +30,6 @@ struct alg_sock { + + struct sock *parent; + +- unsigned int refcnt; +- + const struct af_alg_type *type; + void *private; + }; +@@ -66,7 +64,6 @@ int af_alg_register_type(const struct af_alg_type *type); + int af_alg_unregister_type(const struct af_alg_type *type); + + int af_alg_release(struct socket *sock); +-void af_alg_release_parent(struct sock *sk); + int af_alg_accept(struct sock *sk, struct socket *newsock); + + int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len, +@@ -83,6 +80,11 @@ static inline struct alg_sock *alg_sk(struct sock *sk) + return (struct alg_sock *)sk; + } + ++static inline void af_alg_release_parent(struct sock *sk) ++{ ++ sock_put(alg_sk(sk)->parent); ++} ++ + static inline void af_alg_init_completion(struct af_alg_completion *completion) + { + init_completion(&completion->completion); +diff --git a/include/linux/efi.h b/include/linux/efi.h +index 0a819e7a60c9..b39a4651360a 100644 +--- a/include/linux/efi.h ++++ b/include/linux/efi.h +@@ -792,8 +792,10 @@ struct efivars { + * and we use a page for reading/writing. + */ + ++#define EFI_VAR_NAME_LEN 1024 ++ + struct efi_variable { +- efi_char16_t VariableName[1024/sizeof(efi_char16_t)]; ++ efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)]; + efi_guid_t VendorGuid; + unsigned long DataSize; + __u8 Data[1024]; +@@ -864,7 +866,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), + struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, + struct list_head *head, bool remove); + +-bool efivar_validate(struct efi_variable *var, u8 *data, unsigned long len); ++bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, ++ unsigned long data_size); ++bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, ++ size_t len); + + extern struct work_struct efivar_work; + void efivar_run_worker(void); +diff --git a/include/linux/module.h b/include/linux/module.h +index eaf60ff9ba94..adea1d663a06 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -227,6 +227,12 @@ struct module_ref { + unsigned long decs; + } __attribute((aligned(2 * sizeof(unsigned long)))); + ++struct mod_kallsyms { ++ Elf_Sym *symtab; ++ unsigned int num_symtab; ++ char *strtab; ++}; ++ + struct module { + enum module_state state; + +@@ -314,14 +320,9 @@ struct module { + #endif + + #ifdef CONFIG_KALLSYMS +- /* +- * We keep the symbol and string tables for kallsyms. +- * The core_* fields below are temporary, loader-only (they +- * could really be discarded after module init). +- */ +- Elf_Sym *symtab, *core_symtab; +- unsigned int num_symtab, core_num_syms; +- char *strtab, *core_strtab; ++ /* Protected by RCU and/or module_mutex: use rcu_dereference() */ ++ struct mod_kallsyms *kallsyms; ++ struct mod_kallsyms core_kallsyms; + + /* Section attributes */ + struct module_sect_attrs *sect_attrs; +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h +index 97c8689c7e51..326f8b238c31 100644 +--- a/include/linux/tracepoint.h ++++ b/include/linux/tracepoint.h +@@ -129,9 +129,6 @@ static inline void tracepoint_synchronize_unregister(void) + void *it_func; \ + void *__data; \ + \ +- if (!cpu_online(raw_smp_processor_id())) \ +- return; \ +- \ + if (!(cond)) \ + return; \ + prercu; \ +@@ -265,15 +262,19 @@ static inline void tracepoint_synchronize_unregister(void) + * "void *__data, proto" as the callback prototype. + */ + #define DECLARE_TRACE_NOARGS(name) \ +- __DECLARE_TRACE(name, void, , 1, void *__data, __data) ++ __DECLARE_TRACE(name, void, , \ ++ cpu_online(raw_smp_processor_id()), \ ++ void *__data, __data) + + #define DECLARE_TRACE(name, proto, args) \ +- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \ +- PARAMS(void *__data, proto), \ +- PARAMS(__data, args)) ++ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ ++ cpu_online(raw_smp_processor_id()), \ ++ PARAMS(void *__data, proto), \ ++ PARAMS(__data, args)) + + #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ +- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ ++ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ ++ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) + +diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h +index cbb20afdbc01..bb679b48f408 100644 +--- a/include/linux/ucs2_string.h ++++ b/include/linux/ucs2_string.h +@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s); + unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); + int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); + ++unsigned long ucs2_utf8size(const ucs2_char_t *src); ++unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, ++ unsigned long maxlength); ++ + #endif /* _LINUX_UCS2_STRING_H_ */ +diff --git a/kernel/module.c b/kernel/module.c +index e40617fac8ad..3a311a1d26d7 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -180,6 +180,9 @@ struct load_info { + struct _ddebug *debug; + unsigned int num_debug; + bool sig_ok; ++#ifdef CONFIG_KALLSYMS ++ unsigned long mod_kallsyms_init_off; ++#endif + struct { + unsigned int sym, str, mod, vers, info, pcpu; + } index; +@@ -2320,8 +2323,20 @@ static void layout_symtab(struct module *mod, struct load_info *info) + strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, + info->index.str) | INIT_OFFSET_MASK; + pr_debug("\t%s\n", info->secstrings + strsect->sh_name); ++ ++ /* We'll tack temporary mod_kallsyms on the end. */ ++ mod->init_size = ALIGN(mod->init_size, ++ __alignof__(struct mod_kallsyms)); ++ info->mod_kallsyms_init_off = mod->init_size; ++ mod->init_size += sizeof(struct mod_kallsyms); ++ mod->init_size = debug_align(mod->init_size); + } + ++/* ++ * We use the full symtab and strtab which layout_symtab arranged to ++ * be appended to the init section. Later we switch to the cut-down ++ * core-only ones. ++ */ + static void add_kallsyms(struct module *mod, const struct load_info *info) + { + unsigned int i, ndst; +@@ -2330,28 +2345,33 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) + char *s; + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + +- mod->symtab = (void *)symsec->sh_addr; +- mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); ++ /* Set up to point into init section. */ ++ mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off; ++ ++ mod->kallsyms->symtab = (void *)symsec->sh_addr; ++ mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + /* Make sure we get permanent strtab: don't use info->strtab. */ +- mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; ++ mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + + /* Set types up while we still have access to sections. */ +- for (i = 0; i < mod->num_symtab; i++) +- mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); +- +- mod->core_symtab = dst = mod->module_core + info->symoffs; +- mod->core_strtab = s = mod->module_core + info->stroffs; +- src = mod->symtab; +- for (ndst = i = 0; i < mod->num_symtab; i++) { ++ for (i = 0; i < mod->kallsyms->num_symtab; i++) ++ mod->kallsyms->symtab[i].st_info ++ = elf_type(&mod->kallsyms->symtab[i], info); ++ ++ /* Now populate the cut down core kallsyms for after init. */ ++ mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs; ++ mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs; ++ src = mod->kallsyms->symtab; ++ for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { + if (i == 0 || + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { + dst[ndst] = src[i]; +- dst[ndst++].st_name = s - mod->core_strtab; +- s += strlcpy(s, &mod->strtab[src[i].st_name], ++ dst[ndst++].st_name = s - mod->core_kallsyms.strtab; ++ s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], + KSYM_NAME_LEN) + 1; + } + } +- mod->core_num_syms = ndst; ++ mod->core_kallsyms.num_symtab = ndst; + } + #else + static inline void layout_symtab(struct module *mod, struct load_info *info) +@@ -3089,9 +3109,8 @@ static int do_init_module(struct module *mod) + module_put(mod); + trim_init_extable(mod); + #ifdef CONFIG_KALLSYMS +- mod->num_symtab = mod->core_num_syms; +- mod->symtab = mod->core_symtab; +- mod->strtab = mod->core_strtab; ++ /* Switch to core kallsyms now init is done: kallsyms may be walking! */ ++ rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); + #endif + unset_module_init_ro_nx(mod); + module_free(mod, mod->module_init); +@@ -3381,9 +3400,9 @@ static inline int is_arm_mapping_symbol(const char *str) + && (str[2] == '\0' || str[2] == '.'); + } + +-static const char *symname(struct module *mod, unsigned int symnum) ++static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum) + { +- return mod->strtab + mod->symtab[symnum].st_name; ++ return kallsyms->strtab + kallsyms->symtab[symnum].st_name; + } + + static const char *get_ksymbol(struct module *mod, +@@ -3393,6 +3412,7 @@ static const char *get_ksymbol(struct module *mod, + { + unsigned int i, best = 0; + unsigned long nextval; ++ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + + /* At worse, next value is at end of module */ + if (within_module_init(addr, mod)) +@@ -3402,32 +3422,32 @@ static const char *get_ksymbol(struct module *mod, + + /* Scan for closest preceding symbol, and next symbol. (ELF + starts real symbols at 1). */ +- for (i = 1; i < mod->num_symtab; i++) { +- if (mod->symtab[i].st_shndx == SHN_UNDEF) ++ for (i = 1; i < kallsyms->num_symtab; i++) { ++ if (kallsyms->symtab[i].st_shndx == SHN_UNDEF) + continue; + + /* We ignore unnamed symbols: they're uninformative + * and inserted at a whim. */ +- if (*symname(mod, i) == '\0' +- || is_arm_mapping_symbol(symname(mod, i))) ++ if (*symname(kallsyms, i) == '\0' ++ || is_arm_mapping_symbol(symname(kallsyms, i))) + continue; + +- if (mod->symtab[i].st_value <= addr +- && mod->symtab[i].st_value > mod->symtab[best].st_value) ++ if (kallsyms->symtab[i].st_value <= addr ++ && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value) + best = i; +- if (mod->symtab[i].st_value > addr +- && mod->symtab[i].st_value < nextval) +- nextval = mod->symtab[i].st_value; ++ if (kallsyms->symtab[i].st_value > addr ++ && kallsyms->symtab[i].st_value < nextval) ++ nextval = kallsyms->symtab[i].st_value; + } + + if (!best) + return NULL; + + if (size) +- *size = nextval - mod->symtab[best].st_value; ++ *size = nextval - kallsyms->symtab[best].st_value; + if (offset) +- *offset = addr - mod->symtab[best].st_value; +- return symname(mod, best); ++ *offset = addr - kallsyms->symtab[best].st_value; ++ return symname(kallsyms, best); + } + + /* For kallsyms to ask for address resolution. NULL means not found. Careful +@@ -3523,18 +3543,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { ++ struct mod_kallsyms *kallsyms; ++ + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- if (symnum < mod->num_symtab) { +- *value = mod->symtab[symnum].st_value; +- *type = mod->symtab[symnum].st_info; +- strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN); ++ kallsyms = rcu_dereference_sched(mod->kallsyms); ++ if (symnum < kallsyms->num_symtab) { ++ *value = kallsyms->symtab[symnum].st_value; ++ *type = kallsyms->symtab[symnum].st_info; ++ strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN); + strlcpy(module_name, mod->name, MODULE_NAME_LEN); + *exported = is_exported(name, *value, mod); + preempt_enable(); + return 0; + } +- symnum -= mod->num_symtab; ++ symnum -= kallsyms->num_symtab; + } + preempt_enable(); + return -ERANGE; +@@ -3543,11 +3566,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + static unsigned long mod_find_symname(struct module *mod, const char *name) + { + unsigned int i; ++ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + +- for (i = 0; i < mod->num_symtab; i++) +- if (strcmp(name, symname(mod, i)) == 0 && +- mod->symtab[i].st_info != 'U') +- return mod->symtab[i].st_value; ++ for (i = 0; i < kallsyms->num_symtab; i++) ++ if (strcmp(name, symname(kallsyms, i)) == 0 && ++ kallsyms->symtab[i].st_info != 'U') ++ return kallsyms->symtab[i].st_value; + return 0; + } + +@@ -3584,11 +3608,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + int ret; + + list_for_each_entry(mod, &modules, list) { ++ /* We hold module_mutex: no need for rcu_dereference_sched */ ++ struct mod_kallsyms *kallsyms = mod->kallsyms; ++ + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- for (i = 0; i < mod->num_symtab; i++) { +- ret = fn(data, symname(mod, i), +- mod, mod->symtab[i].st_value); ++ for (i = 0; i < kallsyms->num_symtab; i++) { ++ ret = fn(data, symname(kallsyms, i), ++ mod, kallsyms->symtab[i].st_value); + if (ret != 0) + return ret; + } +diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c +index 6f500ef2301d..f0b323abb4c6 100644 +--- a/lib/ucs2_string.c ++++ b/lib/ucs2_string.c +@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) + } + } + EXPORT_SYMBOL(ucs2_strncmp); ++ ++unsigned long ++ucs2_utf8size(const ucs2_char_t *src) ++{ ++ unsigned long i; ++ unsigned long j = 0; ++ ++ for (i = 0; i < ucs2_strlen(src); i++) { ++ u16 c = src[i]; ++ ++ if (c >= 0x800) ++ j += 3; ++ else if (c >= 0x80) ++ j += 2; ++ else ++ j += 1; ++ } ++ ++ return j; ++} ++EXPORT_SYMBOL(ucs2_utf8size); ++ ++/* ++ * copy at most maxlength bytes of whole utf8 characters to dest from the ++ * ucs2 string src. ++ * ++ * The return value is the number of characters copied, not including the ++ * final NUL character. ++ */ ++unsigned long ++ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength) ++{ ++ unsigned int i; ++ unsigned long j = 0; ++ unsigned long limit = ucs2_strnlen(src, maxlength); ++ ++ for (i = 0; maxlength && i < limit; i++) { ++ u16 c = src[i]; ++ ++ if (c >= 0x800) { ++ if (maxlength < 3) ++ break; ++ maxlength -= 3; ++ dest[j++] = 0xe0 | (c & 0xf000) >> 12; ++ dest[j++] = 0x80 | (c & 0x0fc0) >> 6; ++ dest[j++] = 0x80 | (c & 0x003f); ++ } else if (c >= 0x80) { ++ if (maxlength < 2) ++ break; ++ maxlength -= 2; ++ dest[j++] = 0xc0 | (c & 0x7c0) >> 6; ++ dest[j++] = 0x80 | (c & 0x03f); ++ } else { ++ maxlength -= 1; ++ dest[j++] = c & 0x7f; ++ } ++ } ++ if (maxlength) ++ dest[j] = '\0'; ++ return j; ++} ++EXPORT_SYMBOL(ucs2_as_utf8); +diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c +index 31bf2586fb84..864408026202 100644 +--- a/net/mac80211/agg-rx.c ++++ b/net/mac80211/agg-rx.c +@@ -290,7 +290,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, + } + + /* prepare A-MPDU MLME for Rx aggregation */ +- tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); ++ tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL); + if (!tid_agg_rx) + goto end; + +diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c +index c1b5b73c5b91..c3111e2b6fa0 100644 +--- a/net/mac80211/rc80211_minstrel_ht.c ++++ b/net/mac80211/rc80211_minstrel_ht.c +@@ -463,7 +463,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) + if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) + return; + +- ieee80211_start_tx_ba_session(pubsta, tid, 5000); ++ ieee80211_start_tx_ba_session(pubsta, tid, 0); + } + + static void +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c +index c8717c1d082e..87dd619fb2e9 100644 +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -342,6 +342,39 @@ static const int compat_event_type_size[] = { + + /* IW event code */ + ++static void wireless_nlevent_flush(void) ++{ ++ struct sk_buff *skb; ++ struct net *net; ++ ++ ASSERT_RTNL(); ++ ++ for_each_net(net) { ++ while ((skb = skb_dequeue(&net->wext_nlevents))) ++ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, ++ GFP_KERNEL); ++ } ++} ++ ++static int wext_netdev_notifier_call(struct notifier_block *nb, ++ unsigned long state, void *ptr) ++{ ++ /* ++ * When a netdev changes state in any way, flush all pending messages ++ * to avoid them going out in a strange order, e.g. RTM_NEWLINK after ++ * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close() ++ * or similar - all of which could otherwise happen due to delays from ++ * schedule_work(). ++ */ ++ wireless_nlevent_flush(); ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block wext_netdev_notifier = { ++ .notifier_call = wext_netdev_notifier_call, ++}; ++ + static int __net_init wext_pernet_init(struct net *net) + { + skb_queue_head_init(&net->wext_nlevents); +@@ -360,7 +393,12 @@ static struct pernet_operations wext_pernet_ops = { + + static int __init wireless_nlevent_init(void) + { +- return register_pernet_subsys(&wext_pernet_ops); ++ int err = register_pernet_subsys(&wext_pernet_ops); ++ ++ if (err) ++ return err; ++ ++ return register_netdevice_notifier(&wext_netdev_notifier); + } + + subsys_initcall(wireless_nlevent_init); +@@ -368,17 +406,8 @@ subsys_initcall(wireless_nlevent_init); + /* Process events generated by the wireless layer or the driver. */ + static void wireless_nlevent_process(struct work_struct *work) + { +- struct sk_buff *skb; +- struct net *net; +- + rtnl_lock(); +- +- for_each_net(net) { +- while ((skb = skb_dequeue(&net->wext_nlevents))) +- rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, +- GFP_KERNEL); +- } +- ++ wireless_nlevent_flush(); + rtnl_unlock(); + } + +diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c +index d4248e00160e..4f736197f6ee 100644 +--- a/sound/soc/codecs/wm8958-dsp2.c ++++ b/sound/soc/codecs/wm8958-dsp2.c +@@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +@@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol, + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + struct wm8994 *control = wm8994->wm8994; +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + int reg; + + /* Don't allow on the fly reconfiguration */ +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c +index 66aec0c3b7bb..355b07e4fe2a 100644 +--- a/sound/soc/codecs/wm8994.c ++++ b/sound/soc/codecs/wm8994.c +@@ -360,7 +360,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol, + struct wm8994 *control = wm8994->wm8994; + struct wm8994_pdata *pdata = &control->pdata; + int drc = wm8994_get_drc(kcontrol->id.name); +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + + if (drc < 0) + return drc; +@@ -467,7 +467,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol, + struct wm8994 *control = wm8994->wm8994; + struct wm8994_pdata *pdata = &control->pdata; + int block = wm8994_get_retune_mobile_block(kcontrol->id.name); +- int value = ucontrol->value.integer.value[0]; ++ int value = ucontrol->value.enumerated.item[0]; + + if (block < 0) + return block; +diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh +index 77edcdcc016b..057278448515 100644 +--- a/tools/testing/selftests/efivarfs/efivarfs.sh ++++ b/tools/testing/selftests/efivarfs/efivarfs.sh +@@ -88,7 +88,11 @@ test_delete() + exit 1 + fi + +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + + if [ -e $file ]; then + echo "$file couldn't be deleted" >&2 +@@ -111,6 +115,7 @@ test_zero_size_delete() + exit 1 + fi + ++ chattr -i $file + printf "$attrs" > $file + + if [ -e $file ]; then +@@ -141,7 +146,11 @@ test_valid_filenames() + echo "$file could not be created" >&2 + ret=1 + else +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + fi + done + +@@ -174,7 +183,11 @@ test_invalid_filenames() + + if [ -e $file ]; then + echo "Creating $file should have failed" >&2 +- rm $file ++ rm $file 2>/dev/null ++ if [ $? -ne 0 ]; then ++ chattr -i $file ++ rm $file ++ fi + ret=1 + fi + done +diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c +index 8c0764407b3c..4af74f733036 100644 +--- a/tools/testing/selftests/efivarfs/open-unlink.c ++++ b/tools/testing/selftests/efivarfs/open-unlink.c +@@ -1,10 +1,68 @@ ++#include + #include + #include + #include + #include ++#include + #include + #include + #include ++#include ++ ++static int set_immutable(const char *path, int immutable) ++{ ++ unsigned int flags; ++ int fd; ++ int rc; ++ int error; ++ ++ fd = open(path, O_RDONLY); ++ if (fd < 0) ++ return fd; ++ ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); ++ if (rc < 0) { ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++ } ++ ++ if (immutable) ++ flags |= FS_IMMUTABLE_FL; ++ else ++ flags &= ~FS_IMMUTABLE_FL; ++ ++ rc = ioctl(fd, FS_IOC_SETFLAGS, &flags); ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++} ++ ++static int get_immutable(const char *path) ++{ ++ unsigned int flags; ++ int fd; ++ int rc; ++ int error; ++ ++ fd = open(path, O_RDONLY); ++ if (fd < 0) ++ return fd; ++ ++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); ++ if (rc < 0) { ++ error = errno; ++ close(fd); ++ errno = error; ++ return rc; ++ } ++ close(fd); ++ if (flags & FS_IMMUTABLE_FL) ++ return 1; ++ return 0; ++} + + int main(int argc, char **argv) + { +@@ -27,7 +85,7 @@ int main(int argc, char **argv) + buf[4] = 0; + + /* create a test variable */ +- fd = open(path, O_WRONLY | O_CREAT); ++ fd = open(path, O_WRONLY | O_CREAT, 0600); + if (fd < 0) { + perror("open(O_WRONLY)"); + return EXIT_FAILURE; +@@ -41,6 +99,18 @@ int main(int argc, char **argv) + + close(fd); + ++ rc = get_immutable(path); ++ if (rc < 0) { ++ perror("ioctl(FS_IOC_GETFLAGS)"); ++ return EXIT_FAILURE; ++ } else if (rc) { ++ rc = set_immutable(path, 0); ++ if (rc < 0) { ++ perror("ioctl(FS_IOC_SETFLAGS)"); ++ return EXIT_FAILURE; ++ } ++ } ++ + fd = open(path, O_RDONLY); + if (fd < 0) { + perror("open"); From 5faa94915222472d7c0be8c72a1f967d9e51865d Mon Sep 17 00:00:00 2001 From: Igor Pecovnik Date: Wed, 16 Mar 2016 20:18:43 +0100 Subject: [PATCH 2/2] Fixed broken patches for Udoo Quad uboot and Cubox kernel upgrade --- patch/kernel/cubox-default/patch-3.14.63-64.patch | 13 ------------- .../u-boot-udoo-default/udoo-2015-u-boot.patch | 6 +++--- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/patch/kernel/cubox-default/patch-3.14.63-64.patch b/patch/kernel/cubox-default/patch-3.14.63-64.patch index 4e1bc88a6..1e253d6ab 100644 --- a/patch/kernel/cubox-default/patch-3.14.63-64.patch +++ b/patch/kernel/cubox-default/patch-3.14.63-64.patch @@ -1592,19 +1592,6 @@ index e879da81ad93..b8b5a719a784 100644 /* * Setup function pointers for generic logic in * target_core_fabric_configfs.c -diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c -index 39bd7ec8bf75..f66a14b6fc73 100644 ---- a/drivers/usb/chipidea/otg.c -+++ b/drivers/usb/chipidea/otg.c -@@ -96,7 +96,7 @@ static void ci_otg_work(struct work_struct *work) - int ci_hdrc_otg_init(struct ci_hdrc *ci) - { - INIT_WORK(&ci->work, ci_otg_work); -- ci->wq = create_singlethread_workqueue("ci_otg"); -+ ci->wq = create_freezable_workqueue("ci_otg"); - if (!ci->wq) { - dev_err(ci->dev, "can't create workqueue\n"); - return -ENODEV; diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c index 460c266b8e24..cdec2492ff40 100644 --- a/drivers/usb/gadget/tcm_usb_gadget.c diff --git a/patch/u-boot/u-boot-udoo-default/udoo-2015-u-boot.patch b/patch/u-boot/u-boot-udoo-default/udoo-2015-u-boot.patch index c843a1e1e..c068c93b0 100644 --- a/patch/u-boot/u-boot-udoo-default/udoo-2015-u-boot.patch +++ b/patch/u-boot/u-boot-udoo-default/udoo-2015-u-boot.patch @@ -18,11 +18,11 @@ diff --git a/include/configs/udoo_qdl.h b/include/configs/udoo_qdl.h "mmcargs=setenv bootargs console=${console},${baudrate} ${video} ${memory} " \ "root=${mmcroot}\0" \ "loadbootscript=" \ -- "fatload mmc ${mmcdev}:${mmcpart} ${loadaddr} ${script};\0" \ +- "fatload " UBOOT_DEVICE " ${mmcdev}:${mmcpart} ${loadaddr} ${script};\0" \ + "run scan_dev_for_boot;\0" \ "bootscript=echo Running bootscript from mmc ...; " \ - "env import -t ${loadaddr} ${filesize};\0" \ + "source\0" \ - "loadimage=fatload mmc ${mmcdev}:${mmcpart} ${loadaddr} ${image}\0" \ - "loadfdt=fatload mmc ${mmcdev}:${mmcpart} ${fdt_addr} ${fdt_file}\0" \ + "loadimage=fatload " UBOOT_DEVICE " ${mmcdev}:${mmcpart} ${loadaddr} ${image}\0" \ + "loadfdt=fatload " UBOOT_DEVICE " ${mmcdev}:${mmcpart} ${fdt_addr} ${fdt_file}\0" \ "mmcboot=echo Booting from mmc ...; " \ \ No newline at end of file