mirror of
https://github.com/Fishwaldo/build.git
synced 2025-07-11 15:38:48 +00:00
* Change DEV to EDGE * Renaming patches dev folder to edge * Move patches into subdir where they will be archived. * Relink patch directories properly
1636 lines
52 KiB
Diff
1636 lines
52 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index 2dabcc4f0d16d..494420ad33a1d 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 14
|
|
-SUBLEVEL = 217
|
|
+SUBLEVEL = 218
|
|
EXTRAVERSION =
|
|
NAME = Petit Gorille
|
|
|
|
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
|
|
index ba7f4c8f5c3e4..e8e637c4f354d 100644
|
|
--- a/arch/arm/xen/enlighten.c
|
|
+++ b/arch/arm/xen/enlighten.c
|
|
@@ -393,7 +393,7 @@ static int __init xen_guest_init(void)
|
|
}
|
|
gnttab_init();
|
|
if (!xen_initial_domain())
|
|
- xenbus_probe(NULL);
|
|
+ xenbus_probe();
|
|
|
|
/*
|
|
* Making sure board specific code will not set up ops for
|
|
diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig
|
|
index d0de378beefe5..7d54f284ce10f 100644
|
|
--- a/arch/sh/drivers/dma/Kconfig
|
|
+++ b/arch/sh/drivers/dma/Kconfig
|
|
@@ -63,8 +63,7 @@ config PVR2_DMA
|
|
|
|
config G2_DMA
|
|
tristate "G2 Bus DMA support"
|
|
- depends on SH_DREAMCAST
|
|
- select SH_DMA_API
|
|
+ depends on SH_DREAMCAST && SH_DMA_API
|
|
help
|
|
This enables support for the DMA controller for the Dreamcast's
|
|
G2 bus. Drivers that want this will generally enable this on
|
|
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
|
|
index 3a250ca2406c0..644f9e14cb095 100644
|
|
--- a/arch/x86/boot/compressed/Makefile
|
|
+++ b/arch/x86/boot/compressed/Makefile
|
|
@@ -36,6 +36,8 @@ KBUILD_CFLAGS += -mno-mmx -mno-sse
|
|
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
|
|
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
|
|
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
|
|
+# Disable relocation relaxation in case the link is not PIE.
|
|
+KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
|
|
|
|
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
|
GCOV_PROFILE := n
|
|
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
|
|
index e79db7ba2f10d..bd58f0743cfc4 100644
|
|
--- a/drivers/acpi/scan.c
|
|
+++ b/drivers/acpi/scan.c
|
|
@@ -585,6 +585,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
|
|
if (!device)
|
|
return -EINVAL;
|
|
|
|
+ *device = NULL;
|
|
+
|
|
status = acpi_get_data_full(handle, acpi_scan_drop_device,
|
|
(void **)device, callback);
|
|
if (ACPI_FAILURE(status) || !*device) {
|
|
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
|
|
index fc762b4adcb22..b14d481ab7dbb 100644
|
|
--- a/drivers/gpio/gpio-mvebu.c
|
|
+++ b/drivers/gpio/gpio-mvebu.c
|
|
@@ -654,9 +654,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
|
|
|
|
spin_lock_irqsave(&mvpwm->lock, flags);
|
|
|
|
- val = (unsigned long long)
|
|
- readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
|
|
- val *= NSEC_PER_SEC;
|
|
+ u = readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
|
|
+ val = (unsigned long long) u * NSEC_PER_SEC;
|
|
do_div(val, mvpwm->clk_rate);
|
|
if (val > UINT_MAX)
|
|
state->duty_cycle = UINT_MAX;
|
|
@@ -665,21 +664,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
|
|
else
|
|
state->duty_cycle = 1;
|
|
|
|
- val = (unsigned long long)
|
|
- readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
|
|
+ val = (unsigned long long) u; /* on duration */
|
|
+ /* period = on + off duration */
|
|
+ val += readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
|
|
val *= NSEC_PER_SEC;
|
|
do_div(val, mvpwm->clk_rate);
|
|
- if (val < state->duty_cycle) {
|
|
+ if (val > UINT_MAX)
|
|
+ state->period = UINT_MAX;
|
|
+ else if (val)
|
|
+ state->period = val;
|
|
+ else
|
|
state->period = 1;
|
|
- } else {
|
|
- val -= state->duty_cycle;
|
|
- if (val > UINT_MAX)
|
|
- state->period = UINT_MAX;
|
|
- else if (val)
|
|
- state->period = val;
|
|
- else
|
|
- state->period = 1;
|
|
- }
|
|
|
|
regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
|
|
if (u)
|
|
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
|
|
index 331478bd2ff86..ec7c8cc0e4b62 100644
|
|
--- a/drivers/gpu/drm/drm_atomic_helper.c
|
|
+++ b/drivers/gpu/drm/drm_atomic_helper.c
|
|
@@ -2608,7 +2608,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
|
|
|
|
ret = handle_conflicting_encoders(state, true);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto fail;
|
|
|
|
ret = drm_atomic_commit(state);
|
|
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
|
|
index 7deb81b6dbac6..4b571cc6bc70f 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
|
|
@@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
|
|
nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
|
|
image.base, image.type, image.size);
|
|
|
|
- if (!shadow_fetch(bios, mthd, image.size)) {
|
|
+ if (!shadow_fetch(bios, mthd, image.base + image.size)) {
|
|
nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
|
|
index edb6148cbca04..d0e80ad526845 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
|
|
@@ -33,7 +33,7 @@ static void
|
|
gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
|
|
{
|
|
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
|
|
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
|
|
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000);
|
|
}
|
|
|
|
static int
|
|
@@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
|
|
AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
|
|
return -EBUSY;
|
|
}
|
|
- } while (ctrl & 0x03010000);
|
|
+ } while (ctrl & 0x07010000);
|
|
|
|
/* set some magic, and wait up to 1ms for it to appear */
|
|
- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
|
|
+ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq);
|
|
timeout = 1000;
|
|
do {
|
|
ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
|
|
@@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
|
|
gm200_i2c_aux_fini(aux);
|
|
return -EBUSY;
|
|
}
|
|
- } while ((ctrl & 0x03000000) != urep);
|
|
+ } while ((ctrl & 0x07000000) != urep);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
|
|
index d80dbc8f09b20..55a4ea4393c62 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
|
|
@@ -22,6 +22,7 @@
|
|
* Authors: Ben Skeggs
|
|
*/
|
|
#include "priv.h"
|
|
+#include <subdev/timer.h>
|
|
|
|
static void
|
|
gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
|
|
@@ -31,7 +32,6 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
|
|
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
|
|
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
|
|
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
|
|
- nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
|
|
}
|
|
|
|
static void
|
|
@@ -42,7 +42,6 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
|
|
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
|
|
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
|
|
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
|
|
- nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
|
|
}
|
|
|
|
static void
|
|
@@ -53,7 +52,6 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
|
|
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
|
|
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
|
|
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
|
|
- nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
|
|
}
|
|
|
|
void
|
|
@@ -90,6 +88,12 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
|
|
intr1 &= ~stat;
|
|
}
|
|
}
|
|
+
|
|
+ nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002);
|
|
+ nvkm_msec(device, 2000,
|
|
+ if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f))
|
|
+ break;
|
|
+ );
|
|
}
|
|
|
|
static int
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
|
|
index 9025ed1bd2a99..4caf3ef087e1d 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
|
|
@@ -22,6 +22,7 @@
|
|
* Authors: Ben Skeggs
|
|
*/
|
|
#include "priv.h"
|
|
+#include <subdev/timer.h>
|
|
|
|
static void
|
|
gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
|
|
@@ -31,7 +32,6 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
|
|
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
|
|
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
|
|
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
|
|
- nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
|
|
}
|
|
|
|
static void
|
|
@@ -42,7 +42,6 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
|
|
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
|
|
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
|
|
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
|
|
- nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
|
|
}
|
|
|
|
static void
|
|
@@ -53,7 +52,6 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
|
|
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
|
|
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
|
|
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
|
|
- nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
|
|
}
|
|
|
|
void
|
|
@@ -90,6 +88,12 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
|
|
intr1 &= ~stat;
|
|
}
|
|
}
|
|
+
|
|
+ nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002);
|
|
+ nvkm_msec(device, 2000,
|
|
+ if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
|
|
+ break;
|
|
+ );
|
|
}
|
|
|
|
static int
|
|
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
|
|
index 99ef61de9b1e7..6a66825370249 100644
|
|
--- a/drivers/hwtracing/intel_th/pci.c
|
|
+++ b/drivers/hwtracing/intel_th/pci.c
|
|
@@ -238,6 +238,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
|
|
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
|
},
|
|
+ {
|
|
+ /* Alder Lake-P */
|
|
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
|
|
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
|
|
+ },
|
|
{
|
|
/* Emmitsburg PCH */
|
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1bcc),
|
|
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
|
|
index 3da7b673aab25..3957ce678265d 100644
|
|
--- a/drivers/hwtracing/stm/heartbeat.c
|
|
+++ b/drivers/hwtracing/stm/heartbeat.c
|
|
@@ -72,7 +72,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data)
|
|
|
|
static int stm_heartbeat_init(void)
|
|
{
|
|
- int i, ret = -ENOMEM;
|
|
+ int i, ret;
|
|
|
|
if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
|
|
return -EINVAL;
|
|
@@ -80,8 +80,10 @@ static int stm_heartbeat_init(void)
|
|
for (i = 0; i < nr_devs; i++) {
|
|
stm_heartbeat[i].data.name =
|
|
kasprintf(GFP_KERNEL, "heartbeat.%d", i);
|
|
- if (!stm_heartbeat[i].data.name)
|
|
+ if (!stm_heartbeat[i].data.name) {
|
|
+ ret = -ENOMEM;
|
|
goto fail_unregister;
|
|
+ }
|
|
|
|
stm_heartbeat[i].data.nr_chans = 1;
|
|
stm_heartbeat[i].data.link = stm_heartbeat_link;
|
|
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
|
|
index 1d87757990568..cd512a93f3ba9 100644
|
|
--- a/drivers/i2c/busses/i2c-octeon-core.c
|
|
+++ b/drivers/i2c/busses/i2c-octeon-core.c
|
|
@@ -346,7 +346,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
|
|
if (result)
|
|
return result;
|
|
if (recv_len && i == 0) {
|
|
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
|
|
+ if (data[i] > I2C_SMBUS_BLOCK_MAX)
|
|
return -EPROTO;
|
|
length += data[i];
|
|
}
|
|
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
|
|
index f6cd35d0a2ac0..240bd1e908927 100644
|
|
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
|
|
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
|
|
@@ -91,7 +91,7 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
|
|
flags &= ~I2C_M_RECV_LEN;
|
|
}
|
|
|
|
- return (flags != 0) ? -EINVAL : 0;
|
|
+ return 0;
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
|
|
index 712d86b4be09b..7a2cda108a97e 100644
|
|
--- a/drivers/iio/dac/ad5504.c
|
|
+++ b/drivers/iio/dac/ad5504.c
|
|
@@ -189,9 +189,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
|
|
return ret;
|
|
|
|
if (pwr_down)
|
|
- st->pwr_down_mask |= (1 << chan->channel);
|
|
- else
|
|
st->pwr_down_mask &= ~(1 << chan->channel);
|
|
+ else
|
|
+ st->pwr_down_mask |= (1 << chan->channel);
|
|
|
|
ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
|
|
AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
|
|
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
|
|
index 66f97fde13d80..51e09f6c653c3 100644
|
|
--- a/drivers/irqchip/irq-mips-cpu.c
|
|
+++ b/drivers/irqchip/irq-mips-cpu.c
|
|
@@ -201,6 +201,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
|
|
+ &mips_mt_cpu_irq_controller,
|
|
+ NULL);
|
|
+
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
|
|
if (ret)
|
|
return ret;
|
|
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
|
|
index 78d4e7347e2f3..c855ab2feb181 100644
|
|
--- a/drivers/md/dm-table.c
|
|
+++ b/drivers/md/dm-table.c
|
|
@@ -431,14 +431,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
|
|
{
|
|
int r;
|
|
dev_t dev;
|
|
+ unsigned int major, minor;
|
|
+ char dummy;
|
|
struct dm_dev_internal *dd;
|
|
struct dm_table *t = ti->table;
|
|
|
|
BUG_ON(!t);
|
|
|
|
- dev = dm_get_dev_t(path);
|
|
- if (!dev)
|
|
- return -ENODEV;
|
|
+ if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
|
|
+ /* Extract the major/minor numbers */
|
|
+ dev = MKDEV(major, minor);
|
|
+ if (MAJOR(dev) != major || MINOR(dev) != minor)
|
|
+ return -EOVERFLOW;
|
|
+ } else {
|
|
+ dev = dm_get_dev_t(path);
|
|
+ if (!dev)
|
|
+ return -ENODEV;
|
|
+ }
|
|
|
|
dd = find_device(&t->devices, dev);
|
|
if (!dd) {
|
|
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
|
|
index fafb02644efde..ca34fa424634f 100644
|
|
--- a/drivers/mmc/host/sdhci-xenon.c
|
|
+++ b/drivers/mmc/host/sdhci-xenon.c
|
|
@@ -170,7 +170,12 @@ static void xenon_reset_exit(struct sdhci_host *host,
|
|
/* Disable tuning request and auto-retuning again */
|
|
xenon_retune_setup(host);
|
|
|
|
- xenon_set_acg(host, true);
|
|
+ /*
|
|
+ * The ACG should be turned off at the early init time, in order
|
|
+ * to solve a possible issues with the 1.8V regulator stabilization.
|
|
+ * The feature is enabled in later stage.
|
|
+ */
|
|
+ xenon_set_acg(host, false);
|
|
|
|
xenon_set_sdclk_off_idle(host, sdhc_id, false);
|
|
|
|
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
|
|
index c483c4b787fee..1025cfd463ece 100644
|
|
--- a/drivers/net/can/dev.c
|
|
+++ b/drivers/net/can/dev.c
|
|
@@ -578,11 +578,11 @@ static void can_restart(struct net_device *dev)
|
|
}
|
|
cf->can_id |= CAN_ERR_RESTARTED;
|
|
|
|
- netif_rx_ni(skb);
|
|
-
|
|
stats->rx_packets++;
|
|
stats->rx_bytes += cf->can_dlc;
|
|
|
|
+ netif_rx_ni(skb);
|
|
+
|
|
restart:
|
|
netdev_dbg(dev, "restarted\n");
|
|
priv->can_stats.restarts++;
|
|
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
|
|
index b4c4a2c764378..fc9197f14a3fb 100644
|
|
--- a/drivers/net/can/vxcan.c
|
|
+++ b/drivers/net/can/vxcan.c
|
|
@@ -49,6 +49,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
struct net_device *peer;
|
|
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
|
struct net_device_stats *peerstats, *srcstats = &dev->stats;
|
|
+ u8 len;
|
|
|
|
if (can_dropped_invalid_skb(dev, skb))
|
|
return NETDEV_TX_OK;
|
|
@@ -71,12 +72,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
skb->dev = peer;
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
+ len = cfd->len;
|
|
if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
|
|
srcstats->tx_packets++;
|
|
- srcstats->tx_bytes += cfd->len;
|
|
+ srcstats->tx_bytes += len;
|
|
peerstats = &peer->stats;
|
|
peerstats->rx_packets++;
|
|
- peerstats->rx_bytes += cfd->len;
|
|
+ peerstats->rx_bytes += len;
|
|
}
|
|
|
|
out_unlock:
|
|
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
|
|
index 5c3fa0be8844e..c17cdbd0bb6af 100644
|
|
--- a/drivers/net/dsa/b53/b53_common.c
|
|
+++ b/drivers/net/dsa/b53/b53_common.c
|
|
@@ -970,7 +970,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
|
|
if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
|
|
return -EOPNOTSUPP;
|
|
|
|
- if (vlan->vid_end > dev->num_vlans)
|
|
+ if (vlan->vid_end >= dev->num_vlans)
|
|
return -ERANGE;
|
|
|
|
b53_enable_vlan(dev, true);
|
|
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
|
|
index 25f3b2ad26e9c..1f4987364ef88 100644
|
|
--- a/drivers/net/ethernet/renesas/sh_eth.c
|
|
+++ b/drivers/net/ethernet/renesas/sh_eth.c
|
|
@@ -2517,10 +2517,10 @@ static int sh_eth_close(struct net_device *ndev)
|
|
/* Free all the skbuffs in the Rx queue and the DMA buffer. */
|
|
sh_eth_ring_free(ndev);
|
|
|
|
- pm_runtime_put_sync(&mdp->pdev->dev);
|
|
-
|
|
mdp->is_opened = 0;
|
|
|
|
+ pm_runtime_put(&mdp->pdev->dev);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
|
|
index 342e086e41991..f46fa8a2f6585 100644
|
|
--- a/drivers/scsi/ufs/ufshcd.c
|
|
+++ b/drivers/scsi/ufs/ufshcd.c
|
|
@@ -5536,19 +5536,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
|
{
|
|
struct Scsi_Host *host;
|
|
struct ufs_hba *hba;
|
|
- unsigned int tag;
|
|
u32 pos;
|
|
int err;
|
|
- u8 resp = 0xF;
|
|
- struct ufshcd_lrb *lrbp;
|
|
+ u8 resp = 0xF, lun;
|
|
unsigned long flags;
|
|
|
|
host = cmd->device->host;
|
|
hba = shost_priv(host);
|
|
- tag = cmd->request->tag;
|
|
|
|
- lrbp = &hba->lrb[tag];
|
|
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
|
|
+ lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
|
|
+ err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
|
|
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
|
|
if (!err)
|
|
err = resp;
|
|
@@ -5557,7 +5554,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
|
|
|
/* clear the commands that were pending for corresponding LUN */
|
|
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
|
|
- if (hba->lrb[pos].lun == lrbp->lun) {
|
|
+ if (hba->lrb[pos].lun == lun) {
|
|
err = ufshcd_clear_cmd(hba, pos);
|
|
if (err)
|
|
break;
|
|
diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
|
|
index c74ac25dddcd0..051091bd265bc 100644
|
|
--- a/drivers/usb/gadget/udc/bdc/Kconfig
|
|
+++ b/drivers/usb/gadget/udc/bdc/Kconfig
|
|
@@ -15,7 +15,7 @@ if USB_BDC_UDC
|
|
comment "Platform Support"
|
|
config USB_BDC_PCI
|
|
tristate "BDC support for PCIe based platforms"
|
|
- depends on USB_PCI
|
|
+ depends on USB_PCI && BROKEN
|
|
default USB_BDC_UDC
|
|
help
|
|
Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
|
|
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
|
|
index 4c6d612990ba4..db7c8aec23fc6 100644
|
|
--- a/drivers/usb/gadget/udc/core.c
|
|
+++ b/drivers/usb/gadget/udc/core.c
|
|
@@ -1458,10 +1458,13 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
|
|
struct device_attribute *attr, const char *buf, size_t n)
|
|
{
|
|
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
|
|
+ ssize_t ret;
|
|
|
|
+ mutex_lock(&udc_lock);
|
|
if (!udc->driver) {
|
|
dev_err(dev, "soft-connect without a gadget driver\n");
|
|
- return -EOPNOTSUPP;
|
|
+ ret = -EOPNOTSUPP;
|
|
+ goto out;
|
|
}
|
|
|
|
if (sysfs_streq(buf, "connect")) {
|
|
@@ -1473,10 +1476,14 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
|
|
usb_gadget_udc_stop(udc);
|
|
} else {
|
|
dev_err(dev, "unsupported command '%s'\n", buf);
|
|
- return -EINVAL;
|
|
+ ret = -EINVAL;
|
|
+ goto out;
|
|
}
|
|
|
|
- return n;
|
|
+ ret = n;
|
|
+out:
|
|
+ mutex_unlock(&udc_lock);
|
|
+ return ret;
|
|
}
|
|
static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
|
|
|
|
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
|
|
index 44b7066b12469..406294a5a5f74 100644
|
|
--- a/drivers/usb/host/ehci-hcd.c
|
|
+++ b/drivers/usb/host/ehci-hcd.c
|
|
@@ -587,6 +587,7 @@ static int ehci_run (struct usb_hcd *hcd)
|
|
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
|
|
u32 temp;
|
|
u32 hcc_params;
|
|
+ int rc;
|
|
|
|
hcd->uses_new_polling = 1;
|
|
|
|
@@ -642,9 +643,20 @@ static int ehci_run (struct usb_hcd *hcd)
|
|
down_write(&ehci_cf_port_reset_rwsem);
|
|
ehci->rh_state = EHCI_RH_RUNNING;
|
|
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
|
|
+
|
|
+ /* Wait until HC become operational */
|
|
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
|
|
msleep(5);
|
|
+ rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
|
|
+
|
|
up_write(&ehci_cf_port_reset_rwsem);
|
|
+
|
|
+ if (rc) {
|
|
+ ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
|
|
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
ehci->last_periodic_enable = ktime_get_real();
|
|
|
|
temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
|
|
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
|
|
index c144172a09a40..1cb613364eb62 100644
|
|
--- a/drivers/usb/host/ehci-hub.c
|
|
+++ b/drivers/usb/host/ehci-hub.c
|
|
@@ -358,6 +358,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
|
|
|
|
unlink_empty_async_suspended(ehci);
|
|
|
|
+ /* Some Synopsys controllers mistakenly leave IAA turned on */
|
|
+ ehci_writel(ehci, STS_IAA, &ehci->regs->status);
|
|
+
|
|
/* Any IAA cycle that started before the suspend is now invalid */
|
|
end_iaa_cycle(ehci);
|
|
ehci_handle_start_intr_unlinks(ehci);
|
|
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
|
|
index 9828c1eff9a5f..0c5b2c75b8713 100644
|
|
--- a/drivers/usb/host/xhci-ring.c
|
|
+++ b/drivers/usb/host/xhci-ring.c
|
|
@@ -2907,6 +2907,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
trb->field[0] = cpu_to_le32(field1);
|
|
trb->field[1] = cpu_to_le32(field2);
|
|
trb->field[2] = cpu_to_le32(field3);
|
|
+ /* make sure TRB is fully written before giving it to the controller */
|
|
+ wmb();
|
|
trb->field[3] = cpu_to_le32(field4);
|
|
|
|
trace_xhci_queue_trb(ring, trb);
|
|
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
|
|
index 28df32d856715..808720b9f33e0 100644
|
|
--- a/drivers/usb/host/xhci-tegra.c
|
|
+++ b/drivers/usb/host/xhci-tegra.c
|
|
@@ -579,6 +579,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
|
|
enable);
|
|
if (err < 0)
|
|
break;
|
|
+
|
|
+ /*
|
|
+ * wait 500us for LFPS detector to be disabled before
|
|
+ * sending ACK
|
|
+ */
|
|
+ if (!enable)
|
|
+ usleep_range(500, 1000);
|
|
}
|
|
|
|
if (err < 0) {
|
|
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
|
|
index aca8456752797..8c08c7d46d3d0 100644
|
|
--- a/drivers/xen/events/events_base.c
|
|
+++ b/drivers/xen/events/events_base.c
|
|
@@ -1987,16 +1987,6 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
|
|
.irq_ack = ack_dynirq,
|
|
};
|
|
|
|
-int xen_set_callback_via(uint64_t via)
|
|
-{
|
|
- struct xen_hvm_param a;
|
|
- a.domid = DOMID_SELF;
|
|
- a.index = HVM_PARAM_CALLBACK_IRQ;
|
|
- a.value = via;
|
|
- return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(xen_set_callback_via);
|
|
-
|
|
#ifdef CONFIG_XEN_PVHVM
|
|
/* Vector callbacks are better than PCI interrupts to receive event
|
|
* channel notifications because we can receive vector callbacks on any
|
|
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
|
|
index 5d7dcad0b0a0d..4cec8146609ad 100644
|
|
--- a/drivers/xen/platform-pci.c
|
|
+++ b/drivers/xen/platform-pci.c
|
|
@@ -162,7 +162,6 @@ static int platform_pci_probe(struct pci_dev *pdev,
|
|
ret = gnttab_init();
|
|
if (ret)
|
|
goto grant_out;
|
|
- xenbus_probe(NULL);
|
|
return 0;
|
|
grant_out:
|
|
gnttab_free_auto_xlat_frames();
|
|
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
|
|
index 139539b0ab20d..e6a8d02d35254 100644
|
|
--- a/drivers/xen/xenbus/xenbus.h
|
|
+++ b/drivers/xen/xenbus/xenbus.h
|
|
@@ -114,6 +114,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
|
|
const char *type,
|
|
const char *nodename);
|
|
int xenbus_probe_devices(struct xen_bus_type *bus);
|
|
+void xenbus_probe(void);
|
|
|
|
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
|
|
|
|
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
|
|
index eb5151fc8efab..e5fda0256feb3 100644
|
|
--- a/drivers/xen/xenbus/xenbus_comms.c
|
|
+++ b/drivers/xen/xenbus/xenbus_comms.c
|
|
@@ -57,16 +57,8 @@ DEFINE_MUTEX(xs_response_mutex);
|
|
static int xenbus_irq;
|
|
static struct task_struct *xenbus_task;
|
|
|
|
-static DECLARE_WORK(probe_work, xenbus_probe);
|
|
-
|
|
-
|
|
static irqreturn_t wake_waiting(int irq, void *unused)
|
|
{
|
|
- if (unlikely(xenstored_ready == 0)) {
|
|
- xenstored_ready = 1;
|
|
- schedule_work(&probe_work);
|
|
- }
|
|
-
|
|
wake_up(&xb_waitq);
|
|
return IRQ_HANDLED;
|
|
}
|
|
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
|
|
index 217bcc092a968..fe24e8dcb2b8e 100644
|
|
--- a/drivers/xen/xenbus/xenbus_probe.c
|
|
+++ b/drivers/xen/xenbus/xenbus_probe.c
|
|
@@ -674,29 +674,76 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
|
|
}
|
|
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
|
|
|
|
-void xenbus_probe(struct work_struct *unused)
|
|
+void xenbus_probe(void)
|
|
{
|
|
xenstored_ready = 1;
|
|
|
|
+ /*
|
|
+ * In the HVM case, xenbus_init() deferred its call to
|
|
+ * xs_init() in case callbacks were not operational yet.
|
|
+ * So do it now.
|
|
+ */
|
|
+ if (xen_store_domain_type == XS_HVM)
|
|
+ xs_init();
|
|
+
|
|
/* Notify others that xenstore is up */
|
|
blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
|
|
}
|
|
-EXPORT_SYMBOL_GPL(xenbus_probe);
|
|
|
|
-static int __init xenbus_probe_initcall(void)
|
|
+/*
|
|
+ * Returns true when XenStore init must be deferred in order to
|
|
+ * allow the PCI platform device to be initialised, before we
|
|
+ * can actually have event channel interrupts working.
|
|
+ */
|
|
+static bool xs_hvm_defer_init_for_callback(void)
|
|
{
|
|
- if (!xen_domain())
|
|
- return -ENODEV;
|
|
+#ifdef CONFIG_XEN_PVHVM
|
|
+ return xen_store_domain_type == XS_HVM &&
|
|
+ !xen_have_vector_callback;
|
|
+#else
|
|
+ return false;
|
|
+#endif
|
|
+}
|
|
|
|
- if (xen_initial_domain() || xen_hvm_domain())
|
|
- return 0;
|
|
+static int __init xenbus_probe_initcall(void)
|
|
+{
|
|
+ /*
|
|
+ * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
|
|
+ * need to wait for the platform PCI device to come up.
|
|
+ */
|
|
+ if (xen_store_domain_type == XS_PV ||
|
|
+ (xen_store_domain_type == XS_HVM &&
|
|
+ !xs_hvm_defer_init_for_callback()))
|
|
+ xenbus_probe();
|
|
|
|
- xenbus_probe(NULL);
|
|
return 0;
|
|
}
|
|
-
|
|
device_initcall(xenbus_probe_initcall);
|
|
|
|
+int xen_set_callback_via(uint64_t via)
|
|
+{
|
|
+ struct xen_hvm_param a;
|
|
+ int ret;
|
|
+
|
|
+ a.domid = DOMID_SELF;
|
|
+ a.index = HVM_PARAM_CALLBACK_IRQ;
|
|
+ a.value = via;
|
|
+
|
|
+ ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /*
|
|
+ * If xenbus_probe_initcall() deferred the xenbus_probe()
|
|
+ * due to the callback not functioning yet, we can do it now.
|
|
+ */
|
|
+ if (!xenstored_ready && xs_hvm_defer_init_for_callback())
|
|
+ xenbus_probe();
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(xen_set_callback_via);
|
|
+
|
|
/* Set up event channel for xenstored which is run as a local process
|
|
* (this is normally used only in dom0)
|
|
*/
|
|
@@ -810,11 +857,17 @@ static int __init xenbus_init(void)
|
|
break;
|
|
}
|
|
|
|
- /* Initialize the interface to xenstore. */
|
|
- err = xs_init();
|
|
- if (err) {
|
|
- pr_warn("Error initializing xenstore comms: %i\n", err);
|
|
- goto out_error;
|
|
+ /*
|
|
+ * HVM domains may not have a functional callback yet. In that
|
|
+ * case let xs_init() be called from xenbus_probe(), which will
|
|
+ * get invoked at an appropriate time.
|
|
+ */
|
|
+ if (xen_store_domain_type != XS_HVM) {
|
|
+ err = xs_init();
|
|
+ if (err) {
|
|
+ pr_warn("Error initializing xenstore comms: %i\n", err);
|
|
+ goto out_error;
|
|
+ }
|
|
}
|
|
|
|
if ((xen_store_domain_type != XS_LOCAL) &&
|
|
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
|
|
index 898f962d3a068..eb635eab304ed 100644
|
|
--- a/fs/ext4/inode.c
|
|
+++ b/fs/ext4/inode.c
|
|
@@ -5064,16 +5064,16 @@ static int other_inode_match(struct inode * inode, unsigned long ino,
|
|
|
|
if ((inode->i_ino != ino) ||
|
|
(inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
|
|
- I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
|
|
+ I_DIRTY_INODE)) ||
|
|
((inode->i_state & I_DIRTY_TIME) == 0))
|
|
return 0;
|
|
spin_lock(&inode->i_lock);
|
|
if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
|
|
- I_DIRTY_SYNC | I_DIRTY_DATASYNC)) == 0) &&
|
|
+ I_DIRTY_INODE)) == 0) &&
|
|
(inode->i_state & I_DIRTY_TIME)) {
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
|
|
- inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
|
|
+ inode->i_state &= ~I_DIRTY_TIME;
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
spin_lock(&ei->i_raw_lock);
|
|
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
|
|
index 3dbb875ed7903..384f95e1936dd 100644
|
|
--- a/fs/fs-writeback.c
|
|
+++ b/fs/fs-writeback.c
|
|
@@ -1154,7 +1154,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
|
|
*/
|
|
static int move_expired_inodes(struct list_head *delaying_queue,
|
|
struct list_head *dispatch_queue,
|
|
- int flags, unsigned long dirtied_before)
|
|
+ unsigned long dirtied_before)
|
|
{
|
|
LIST_HEAD(tmp);
|
|
struct list_head *pos, *node;
|
|
@@ -1170,8 +1170,6 @@ static int move_expired_inodes(struct list_head *delaying_queue,
|
|
list_move(&inode->i_io_list, &tmp);
|
|
moved++;
|
|
spin_lock(&inode->i_lock);
|
|
- if (flags & EXPIRE_DIRTY_ATIME)
|
|
- inode->i_state |= I_DIRTY_TIME_EXPIRED;
|
|
inode->i_state |= I_SYNC_QUEUED;
|
|
spin_unlock(&inode->i_lock);
|
|
if (sb_is_blkdev_sb(inode->i_sb))
|
|
@@ -1219,11 +1217,11 @@ static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
|
|
|
|
assert_spin_locked(&wb->list_lock);
|
|
list_splice_init(&wb->b_more_io, &wb->b_io);
|
|
- moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, dirtied_before);
|
|
+ moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
|
|
if (!work->for_sync)
|
|
time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
|
|
moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
|
|
- EXPIRE_DIRTY_ATIME, time_expire_jif);
|
|
+ time_expire_jif);
|
|
if (moved)
|
|
wb_io_lists_populated(wb);
|
|
trace_writeback_queue_io(wb, work, dirtied_before, moved);
|
|
@@ -1391,26 +1389,26 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
|
ret = err;
|
|
}
|
|
|
|
+ /*
|
|
+ * If the inode has dirty timestamps and we need to write them, call
|
|
+ * mark_inode_dirty_sync() to notify the filesystem about it and to
|
|
+ * change I_DIRTY_TIME into I_DIRTY_SYNC.
|
|
+ */
|
|
+ if ((inode->i_state & I_DIRTY_TIME) &&
|
|
+ (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
|
|
+ time_after(jiffies, inode->dirtied_time_when +
|
|
+ dirtytime_expire_interval * HZ))) {
|
|
+ trace_writeback_lazytime(inode);
|
|
+ mark_inode_dirty_sync(inode);
|
|
+ }
|
|
+
|
|
/*
|
|
* Some filesystems may redirty the inode during the writeback
|
|
* due to delalloc, clear dirty metadata flags right before
|
|
* write_inode()
|
|
*/
|
|
spin_lock(&inode->i_lock);
|
|
-
|
|
dirty = inode->i_state & I_DIRTY;
|
|
- if (inode->i_state & I_DIRTY_TIME) {
|
|
- if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
|
|
- wbc->sync_mode == WB_SYNC_ALL ||
|
|
- unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
|
|
- unlikely(time_after(jiffies,
|
|
- (inode->dirtied_time_when +
|
|
- dirtytime_expire_interval * HZ)))) {
|
|
- dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
|
|
- trace_writeback_lazytime(inode);
|
|
- }
|
|
- } else
|
|
- inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
|
|
inode->i_state &= ~dirty;
|
|
|
|
/*
|
|
@@ -1431,8 +1429,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
- if (dirty & I_DIRTY_TIME)
|
|
- mark_inode_dirty_sync(inode);
|
|
/* Don't write the inode if only I_DIRTY_PAGES was set */
|
|
if (dirty & ~I_DIRTY_PAGES) {
|
|
int err = write_inode(inode, wbc);
|
|
@@ -2136,7 +2132,6 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
|
|
*/
|
|
void __mark_inode_dirty(struct inode *inode, int flags)
|
|
{
|
|
-#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
|
|
struct super_block *sb = inode->i_sb;
|
|
int dirtytime;
|
|
|
|
@@ -2146,7 +2141,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
|
* Don't do this for I_DIRTY_PAGES - that doesn't actually
|
|
* dirty the inode itself
|
|
*/
|
|
- if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) {
|
|
+ if (flags & (I_DIRTY_INODE | I_DIRTY_TIME)) {
|
|
trace_writeback_dirty_inode_start(inode, flags);
|
|
|
|
if (sb->s_op->dirty_inode)
|
|
@@ -2222,7 +2217,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
|
if (dirtytime)
|
|
inode->dirtied_time_when = jiffies;
|
|
|
|
- if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES))
|
|
+ if (inode->i_state & I_DIRTY)
|
|
dirty_list = &wb->b_dirty;
|
|
else
|
|
dirty_list = &wb->b_dirty_time;
|
|
@@ -2246,8 +2241,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
|
}
|
|
out_unlock_inode:
|
|
spin_unlock(&inode->i_lock);
|
|
-
|
|
-#undef I_DIRTY_INODE
|
|
}
|
|
EXPORT_SYMBOL(__mark_inode_dirty);
|
|
|
|
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
|
|
index 639e2c86758a4..bcf95ec1bc31d 100644
|
|
--- a/fs/gfs2/super.c
|
|
+++ b/fs/gfs2/super.c
|
|
@@ -791,7 +791,7 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
|
|
int need_endtrans = 0;
|
|
int ret;
|
|
|
|
- if (!(flags & (I_DIRTY_DATASYNC|I_DIRTY_SYNC)))
|
|
+ if (!(flags & I_DIRTY_INODE))
|
|
return;
|
|
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
|
return;
|
|
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
|
|
index 6d7ead22c1b4c..6aef1d7ec96b6 100644
|
|
--- a/include/linux/compiler-gcc.h
|
|
+++ b/include/linux/compiler-gcc.h
|
|
@@ -152,6 +152,12 @@
|
|
|
|
#if GCC_VERSION < 30200
|
|
# error Sorry, your compiler is too old - please upgrade it.
|
|
+#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100 && !defined(__clang__)
|
|
+/*
|
|
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293
|
|
+ * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk
|
|
+ */
|
|
+# error Sorry, your version of GCC is too old - please use 5.1 or newer.
|
|
#endif
|
|
|
|
#if GCC_VERSION < 30300
|
|
diff --git a/include/linux/fs.h b/include/linux/fs.h
|
|
index 30172ad84b25f..309c151decd8c 100644
|
|
--- a/include/linux/fs.h
|
|
+++ b/include/linux/fs.h
|
|
@@ -2010,12 +2010,12 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
|
|
#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
|
|
#define I_LINKABLE (1 << 10)
|
|
#define I_DIRTY_TIME (1 << 11)
|
|
-#define I_DIRTY_TIME_EXPIRED (1 << 12)
|
|
#define I_WB_SWITCH (1 << 13)
|
|
#define I_OVL_INUSE (1 << 14)
|
|
#define I_SYNC_QUEUED (1 << 17)
|
|
|
|
-#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
|
|
+#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
|
|
+#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
|
|
#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
|
|
|
|
extern void __mark_inode_dirty(struct inode *, int);
|
|
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
|
|
index 3a76ca2eecd06..627f5759b67d1 100644
|
|
--- a/include/trace/events/writeback.h
|
|
+++ b/include/trace/events/writeback.h
|
|
@@ -20,7 +20,6 @@
|
|
{I_CLEAR, "I_CLEAR"}, \
|
|
{I_SYNC, "I_SYNC"}, \
|
|
{I_DIRTY_TIME, "I_DIRTY_TIME"}, \
|
|
- {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \
|
|
{I_REFERENCED, "I_REFERENCED"} \
|
|
)
|
|
|
|
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
|
|
index eba01ab5a55e0..fe9a9fa2ebc45 100644
|
|
--- a/include/xen/xenbus.h
|
|
+++ b/include/xen/xenbus.h
|
|
@@ -187,7 +187,7 @@ void xs_suspend_cancel(void);
|
|
|
|
struct work_struct;
|
|
|
|
-void xenbus_probe(struct work_struct *);
|
|
+void xenbus_probe(void);
|
|
|
|
#define XENBUS_IS_ERR_READ(str) ({ \
|
|
if (!IS_ERR(str) && strlen(str) == 0) { \
|
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
|
index 322b9a840da63..39d2c20f500db 100644
|
|
--- a/kernel/futex.c
|
|
+++ b/kernel/futex.c
|
|
@@ -839,6 +839,29 @@ static struct futex_pi_state *alloc_pi_state(void)
|
|
return pi_state;
|
|
}
|
|
|
|
+static void pi_state_update_owner(struct futex_pi_state *pi_state,
|
|
+ struct task_struct *new_owner)
|
|
+{
|
|
+ struct task_struct *old_owner = pi_state->owner;
|
|
+
|
|
+ lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
|
|
+
|
|
+ if (old_owner) {
|
|
+ raw_spin_lock(&old_owner->pi_lock);
|
|
+ WARN_ON(list_empty(&pi_state->list));
|
|
+ list_del_init(&pi_state->list);
|
|
+ raw_spin_unlock(&old_owner->pi_lock);
|
|
+ }
|
|
+
|
|
+ if (new_owner) {
|
|
+ raw_spin_lock(&new_owner->pi_lock);
|
|
+ WARN_ON(!list_empty(&pi_state->list));
|
|
+ list_add(&pi_state->list, &new_owner->pi_state_list);
|
|
+ pi_state->owner = new_owner;
|
|
+ raw_spin_unlock(&new_owner->pi_lock);
|
|
+ }
|
|
+}
|
|
+
|
|
static void get_pi_state(struct futex_pi_state *pi_state)
|
|
{
|
|
WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
|
|
@@ -861,17 +884,11 @@ static void put_pi_state(struct futex_pi_state *pi_state)
|
|
* and has cleaned up the pi_state already
|
|
*/
|
|
if (pi_state->owner) {
|
|
- struct task_struct *owner;
|
|
unsigned long flags;
|
|
|
|
raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
|
|
- owner = pi_state->owner;
|
|
- if (owner) {
|
|
- raw_spin_lock(&owner->pi_lock);
|
|
- list_del_init(&pi_state->list);
|
|
- raw_spin_unlock(&owner->pi_lock);
|
|
- }
|
|
- rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
|
|
+ pi_state_update_owner(pi_state, NULL);
|
|
+ rt_mutex_proxy_unlock(&pi_state->pi_mutex);
|
|
raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
|
|
}
|
|
|
|
@@ -1035,7 +1052,8 @@ static inline void exit_pi_state_list(struct task_struct *curr) { }
|
|
* FUTEX_OWNER_DIED bit. See [4]
|
|
*
|
|
* [10] There is no transient state which leaves owner and user space
|
|
- * TID out of sync.
|
|
+ * TID out of sync. Except one error case where the kernel is denied
|
|
+ * write access to the user address, see fixup_pi_state_owner().
|
|
*
|
|
*
|
|
* Serialization and lifetime rules:
|
|
@@ -1615,26 +1633,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
|
|
ret = -EINVAL;
|
|
}
|
|
|
|
- if (ret)
|
|
- goto out_unlock;
|
|
-
|
|
- /*
|
|
- * This is a point of no return; once we modify the uval there is no
|
|
- * going back and subsequent operations must not fail.
|
|
- */
|
|
-
|
|
- raw_spin_lock(&pi_state->owner->pi_lock);
|
|
- WARN_ON(list_empty(&pi_state->list));
|
|
- list_del_init(&pi_state->list);
|
|
- raw_spin_unlock(&pi_state->owner->pi_lock);
|
|
-
|
|
- raw_spin_lock(&new_owner->pi_lock);
|
|
- WARN_ON(!list_empty(&pi_state->list));
|
|
- list_add(&pi_state->list, &new_owner->pi_state_list);
|
|
- pi_state->owner = new_owner;
|
|
- raw_spin_unlock(&new_owner->pi_lock);
|
|
-
|
|
- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
|
|
+ if (!ret) {
|
|
+ /*
|
|
+ * This is a point of no return; once we modified the uval
|
|
+ * there is no going back and subsequent operations must
|
|
+ * not fail.
|
|
+ */
|
|
+ pi_state_update_owner(pi_state, new_owner);
|
|
+ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
|
|
+ }
|
|
|
|
out_unlock:
|
|
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
|
@@ -1725,8 +1732,8 @@ static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
|
|
{
|
|
unsigned int op = (encoded_op & 0x70000000) >> 28;
|
|
unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
|
|
- int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 12);
|
|
- int cmparg = sign_extend32(encoded_op & 0x00000fff, 12);
|
|
+ int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
|
|
+ int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
|
|
int oldval, ret;
|
|
|
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
|
|
@@ -2457,18 +2464,13 @@ static void unqueue_me_pi(struct futex_q *q)
|
|
spin_unlock(q->lock_ptr);
|
|
}
|
|
|
|
-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
|
- struct task_struct *argowner)
|
|
+static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
|
+ struct task_struct *argowner)
|
|
{
|
|
+ u32 uval, uninitialized_var(curval), newval, newtid;
|
|
struct futex_pi_state *pi_state = q->pi_state;
|
|
- u32 uval, uninitialized_var(curval), newval;
|
|
struct task_struct *oldowner, *newowner;
|
|
- u32 newtid;
|
|
- int ret, err = 0;
|
|
-
|
|
- lockdep_assert_held(q->lock_ptr);
|
|
-
|
|
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
|
+ int err = 0;
|
|
|
|
oldowner = pi_state->owner;
|
|
|
|
@@ -2502,14 +2504,12 @@ retry:
|
|
* We raced against a concurrent self; things are
|
|
* already fixed up. Nothing to do.
|
|
*/
|
|
- ret = 0;
|
|
- goto out_unlock;
|
|
+ return 0;
|
|
}
|
|
|
|
if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
|
|
- /* We got the lock after all, nothing to fix. */
|
|
- ret = 0;
|
|
- goto out_unlock;
|
|
+ /* We got the lock. pi_state is correct. Tell caller. */
|
|
+ return 1;
|
|
}
|
|
|
|
/*
|
|
@@ -2536,8 +2536,7 @@ retry:
|
|
* We raced against a concurrent self; things are
|
|
* already fixed up. Nothing to do.
|
|
*/
|
|
- ret = 0;
|
|
- goto out_unlock;
|
|
+ return 1;
|
|
}
|
|
newowner = argowner;
|
|
}
|
|
@@ -2567,22 +2566,9 @@ retry:
|
|
* We fixed up user space. Now we need to fix the pi_state
|
|
* itself.
|
|
*/
|
|
- if (pi_state->owner != NULL) {
|
|
- raw_spin_lock(&pi_state->owner->pi_lock);
|
|
- WARN_ON(list_empty(&pi_state->list));
|
|
- list_del_init(&pi_state->list);
|
|
- raw_spin_unlock(&pi_state->owner->pi_lock);
|
|
- }
|
|
+ pi_state_update_owner(pi_state, newowner);
|
|
|
|
- pi_state->owner = newowner;
|
|
-
|
|
- raw_spin_lock(&newowner->pi_lock);
|
|
- WARN_ON(!list_empty(&pi_state->list));
|
|
- list_add(&pi_state->list, &newowner->pi_state_list);
|
|
- raw_spin_unlock(&newowner->pi_lock);
|
|
- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
|
-
|
|
- return 0;
|
|
+ return argowner == current;
|
|
|
|
/*
|
|
* In order to reschedule or handle a page fault, we need to drop the
|
|
@@ -2603,17 +2589,16 @@ handle_err:
|
|
|
|
switch (err) {
|
|
case -EFAULT:
|
|
- ret = fault_in_user_writeable(uaddr);
|
|
+ err = fault_in_user_writeable(uaddr);
|
|
break;
|
|
|
|
case -EAGAIN:
|
|
cond_resched();
|
|
- ret = 0;
|
|
+ err = 0;
|
|
break;
|
|
|
|
default:
|
|
WARN_ON_ONCE(1);
|
|
- ret = err;
|
|
break;
|
|
}
|
|
|
|
@@ -2623,17 +2608,44 @@ handle_err:
|
|
/*
|
|
* Check if someone else fixed it for us:
|
|
*/
|
|
- if (pi_state->owner != oldowner) {
|
|
- ret = 0;
|
|
- goto out_unlock;
|
|
- }
|
|
+ if (pi_state->owner != oldowner)
|
|
+ return argowner == current;
|
|
|
|
- if (ret)
|
|
- goto out_unlock;
|
|
+ /* Retry if err was -EAGAIN or the fault in succeeded */
|
|
+ if (!err)
|
|
+ goto retry;
|
|
|
|
- goto retry;
|
|
+ /*
|
|
+ * fault_in_user_writeable() failed so user state is immutable. At
|
|
+ * best we can make the kernel state consistent but user state will
|
|
+ * be most likely hosed and any subsequent unlock operation will be
|
|
+ * rejected due to PI futex rule [10].
|
|
+ *
|
|
+ * Ensure that the rtmutex owner is also the pi_state owner despite
|
|
+ * the user space value claiming something different. There is no
|
|
+ * point in unlocking the rtmutex if current is the owner as it
|
|
+ * would need to wait until the next waiter has taken the rtmutex
|
|
+ * to guarantee consistent state. Keep it simple. Userspace asked
|
|
+ * for this wreckaged state.
|
|
+ *
|
|
+ * The rtmutex has an owner - either current or some other
|
|
+ * task. See the EAGAIN loop above.
|
|
+ */
|
|
+ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
|
|
|
|
-out_unlock:
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
|
+ struct task_struct *argowner)
|
|
+{
|
|
+ struct futex_pi_state *pi_state = q->pi_state;
|
|
+ int ret;
|
|
+
|
|
+ lockdep_assert_held(q->lock_ptr);
|
|
+
|
|
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
|
+ ret = __fixup_pi_state_owner(uaddr, q, argowner);
|
|
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
|
return ret;
|
|
}
|
|
@@ -2657,8 +2669,6 @@ static long futex_wait_restart(struct restart_block *restart);
|
|
*/
|
|
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
|
|
{
|
|
- int ret = 0;
|
|
-
|
|
if (locked) {
|
|
/*
|
|
* Got the lock. We might not be the anticipated owner if we
|
|
@@ -2669,8 +2679,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
|
|
* stable state, anything else needs more attention.
|
|
*/
|
|
if (q->pi_state->owner != current)
|
|
- ret = fixup_pi_state_owner(uaddr, q, current);
|
|
- goto out;
|
|
+ return fixup_pi_state_owner(uaddr, q, current);
|
|
+ return 1;
|
|
}
|
|
|
|
/*
|
|
@@ -2681,24 +2691,17 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
|
|
* Another speculative read; pi_state->owner == current is unstable
|
|
* but needs our attention.
|
|
*/
|
|
- if (q->pi_state->owner == current) {
|
|
- ret = fixup_pi_state_owner(uaddr, q, NULL);
|
|
- goto out;
|
|
- }
|
|
+ if (q->pi_state->owner == current)
|
|
+ return fixup_pi_state_owner(uaddr, q, NULL);
|
|
|
|
/*
|
|
* Paranoia check. If we did not take the lock, then we should not be
|
|
- * the owner of the rt_mutex.
|
|
+ * the owner of the rt_mutex. Warn and establish consistent state.
|
|
*/
|
|
- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
|
|
- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
|
|
- "pi-state %p\n", ret,
|
|
- q->pi_state->pi_mutex.owner,
|
|
- q->pi_state->owner);
|
|
- }
|
|
+ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
|
|
+ return fixup_pi_state_owner(uaddr, q, current);
|
|
|
|
-out:
|
|
- return ret ? ret : locked;
|
|
+ return 0;
|
|
}
|
|
|
|
/**
|
|
@@ -2919,7 +2922,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
|
|
ktime_t *time, int trylock)
|
|
{
|
|
struct hrtimer_sleeper timeout, *to = NULL;
|
|
- struct futex_pi_state *pi_state = NULL;
|
|
struct task_struct *exiting = NULL;
|
|
struct rt_mutex_waiter rt_waiter;
|
|
struct futex_hash_bucket *hb;
|
|
@@ -3062,23 +3064,9 @@ no_block:
|
|
if (res)
|
|
ret = (res < 0) ? res : 0;
|
|
|
|
- /*
|
|
- * If fixup_owner() faulted and was unable to handle the fault, unlock
|
|
- * it and return the fault to userspace.
|
|
- */
|
|
- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
|
|
- pi_state = q.pi_state;
|
|
- get_pi_state(pi_state);
|
|
- }
|
|
-
|
|
/* Unqueue and drop the lock */
|
|
unqueue_me_pi(&q);
|
|
|
|
- if (pi_state) {
|
|
- rt_mutex_futex_unlock(&pi_state->pi_mutex);
|
|
- put_pi_state(pi_state);
|
|
- }
|
|
-
|
|
goto out_put_key;
|
|
|
|
out_unlock_put_key:
|
|
@@ -3344,7 +3332,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
u32 __user *uaddr2)
|
|
{
|
|
struct hrtimer_sleeper timeout, *to = NULL;
|
|
- struct futex_pi_state *pi_state = NULL;
|
|
struct rt_mutex_waiter rt_waiter;
|
|
struct futex_hash_bucket *hb;
|
|
union futex_key key2 = FUTEX_KEY_INIT;
|
|
@@ -3429,16 +3416,17 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
if (q.pi_state && (q.pi_state->owner != current)) {
|
|
spin_lock(q.lock_ptr);
|
|
ret = fixup_pi_state_owner(uaddr2, &q, current);
|
|
- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
|
|
- pi_state = q.pi_state;
|
|
- get_pi_state(pi_state);
|
|
- }
|
|
/*
|
|
* Drop the reference to the pi state which
|
|
* the requeue_pi() code acquired for us.
|
|
*/
|
|
put_pi_state(q.pi_state);
|
|
spin_unlock(q.lock_ptr);
|
|
+ /*
|
|
+ * Adjust the return value. It's either -EFAULT or
|
|
+ * success (1) but the caller expects 0 for success.
|
|
+ */
|
|
+ ret = ret < 0 ? ret : 0;
|
|
}
|
|
} else {
|
|
struct rt_mutex *pi_mutex;
|
|
@@ -3469,25 +3457,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
if (res)
|
|
ret = (res < 0) ? res : 0;
|
|
|
|
- /*
|
|
- * If fixup_pi_state_owner() faulted and was unable to handle
|
|
- * the fault, unlock the rt_mutex and return the fault to
|
|
- * userspace.
|
|
- */
|
|
- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
|
|
- pi_state = q.pi_state;
|
|
- get_pi_state(pi_state);
|
|
- }
|
|
-
|
|
/* Unqueue and drop the lock. */
|
|
unqueue_me_pi(&q);
|
|
}
|
|
|
|
- if (pi_state) {
|
|
- rt_mutex_futex_unlock(&pi_state->pi_mutex);
|
|
- put_pi_state(pi_state);
|
|
- }
|
|
-
|
|
if (ret == -EINTR) {
|
|
/*
|
|
* We've already been requeued, but cannot restart by calling
|
|
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
|
|
index 71c554a9e17f1..ed1d8959e7149 100644
|
|
--- a/kernel/locking/rtmutex.c
|
|
+++ b/kernel/locking/rtmutex.c
|
|
@@ -1719,8 +1719,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
|
* possible because it belongs to the pi_state which is about to be freed
|
|
* and it is not longer visible to other tasks.
|
|
*/
|
|
-void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
|
- struct task_struct *proxy_owner)
|
|
+void rt_mutex_proxy_unlock(struct rt_mutex *lock)
|
|
{
|
|
debug_rt_mutex_proxy_unlock(lock);
|
|
rt_mutex_set_owner(lock, NULL);
|
|
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
|
|
index 68686b3ec3c17..4d27cb0d9d8a4 100644
|
|
--- a/kernel/locking/rtmutex_common.h
|
|
+++ b/kernel/locking/rtmutex_common.h
|
|
@@ -132,8 +132,7 @@ enum rtmutex_chainwalk {
|
|
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
|
|
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
|
struct task_struct *proxy_owner);
|
|
-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
|
- struct task_struct *proxy_owner);
|
|
+extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
|
|
extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
|
|
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
struct rt_mutex_waiter *waiter,
|
|
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
|
|
index 8082328eb01a4..d929c0afbacca 100644
|
|
--- a/kernel/trace/ring_buffer.c
|
|
+++ b/kernel/trace/ring_buffer.c
|
|
@@ -4262,6 +4262,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
|
|
|
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return;
|
|
+ /* prevent another thread from changing buffer sizes */
|
|
+ mutex_lock(&buffer->mutex);
|
|
|
|
atomic_inc(&buffer->resize_disabled);
|
|
atomic_inc(&cpu_buffer->record_disabled);
|
|
@@ -4285,6 +4287,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
|
|
|
|
atomic_dec(&cpu_buffer->record_disabled);
|
|
atomic_dec(&buffer->resize_disabled);
|
|
+
|
|
+ mutex_unlock(&buffer->mutex);
|
|
}
|
|
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
|
|
|
|
diff --git a/mm/slub.c b/mm/slub.c
|
|
index 6e7e8106e9a64..f1b4d4dc3bb3b 100644
|
|
--- a/mm/slub.c
|
|
+++ b/mm/slub.c
|
|
@@ -5755,10 +5755,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
|
|
|
|
s->kobj.kset = kset;
|
|
err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
|
|
- if (err) {
|
|
- kobject_put(&s->kobj);
|
|
+ if (err)
|
|
goto out;
|
|
- }
|
|
|
|
err = sysfs_create_group(&s->kobj, &slab_attr_group);
|
|
if (err)
|
|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
|
index a1e17c8d80a6e..82ffadff1e9c0 100644
|
|
--- a/net/core/skbuff.c
|
|
+++ b/net/core/skbuff.c
|
|
@@ -398,7 +398,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
|
|
|
|
len += NET_SKB_PAD;
|
|
|
|
- if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
|
|
+ /* If requested length is either too small or too big,
|
|
+ * we use kmalloc() for skb->head allocation.
|
|
+ */
|
|
+ if (len <= SKB_WITH_OVERHEAD(1024) ||
|
|
+ len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
|
|
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
|
|
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
|
|
if (!skb)
|
|
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
|
|
index 341d1bd637af2..6b8908e1e0baf 100644
|
|
--- a/net/ipv4/netfilter/ipt_rpfilter.c
|
|
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
|
|
@@ -94,7 +94,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
|
flow.saddr = rpfilter_get_saddr(iph->daddr);
|
|
flow.flowi4_oif = 0;
|
|
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
|
|
- flow.flowi4_tos = RT_TOS(iph->tos);
|
|
+ flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
|
|
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
|
|
flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
|
|
|
|
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
|
|
index f335dd4c84e2d..8f298f27f6ecf 100644
|
|
--- a/net/ipv4/udp.c
|
|
+++ b/net/ipv4/udp.c
|
|
@@ -2321,7 +2321,8 @@ int udp_v4_early_demux(struct sk_buff *skb)
|
|
*/
|
|
if (!inet_sk(sk)->inet_daddr && in_dev)
|
|
return ip_mc_validate_source(skb, iph->daddr,
|
|
- iph->saddr, iph->tos,
|
|
+ iph->saddr,
|
|
+ iph->tos & IPTOS_RT_MASK,
|
|
skb->dev, in_dev, &itag);
|
|
}
|
|
return 0;
|
|
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
|
|
index e58fdefcd2ca2..e47d5047d5373 100644
|
|
--- a/net/ipv6/addrconf.c
|
|
+++ b/net/ipv6/addrconf.c
|
|
@@ -2356,6 +2356,7 @@ static void addrconf_add_mroute(struct net_device *dev)
|
|
.fc_dst_len = 8,
|
|
.fc_flags = RTF_UP,
|
|
.fc_nlinfo.nl_net = dev_net(dev),
|
|
+ .fc_protocol = RTPROT_KERNEL,
|
|
};
|
|
|
|
ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
|
|
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
|
|
index c2d2c054a4e49..5b119efb20eee 100644
|
|
--- a/net/sched/cls_tcindex.c
|
|
+++ b/net/sched/cls_tcindex.c
|
|
@@ -357,9 +357,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
if (tb[TCA_TCINDEX_MASK])
|
|
cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
|
|
|
|
- if (tb[TCA_TCINDEX_SHIFT])
|
|
+ if (tb[TCA_TCINDEX_SHIFT]) {
|
|
cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
|
|
-
|
|
+ if (cp->shift > 16) {
|
|
+ err = -EINVAL;
|
|
+ goto errout;
|
|
+ }
|
|
+ }
|
|
if (!cp->hash) {
|
|
/* Hash not specified, use perfect hash if the upper limit
|
|
* of the hashing index is below the threshold.
|
|
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
|
|
index c939459172353..247b68790a522 100644
|
|
--- a/sound/core/seq/oss/seq_oss_synth.c
|
|
+++ b/sound/core/seq/oss/seq_oss_synth.c
|
|
@@ -624,7 +624,8 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in
|
|
|
|
if (info->is_midi) {
|
|
struct midi_info minf;
|
|
- snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
|
|
+ if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf))
|
|
+ return -ENXIO;
|
|
inf->synth_type = SYNTH_TYPE_MIDI;
|
|
inf->synth_subtype = 0;
|
|
inf->nr_voices = 16;
|
|
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
|
|
index fc30d1e8aa76a..9dd104c308e1d 100644
|
|
--- a/sound/pci/hda/patch_via.c
|
|
+++ b/sound/pci/hda/patch_via.c
|
|
@@ -135,6 +135,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
|
|
spec->codec_type = VT1708S;
|
|
spec->gen.indep_hp = 1;
|
|
spec->gen.keep_eapd_on = 1;
|
|
+ spec->gen.dac_min_mute = 1;
|
|
spec->gen.pcm_playback_hook = via_playback_pcm_hook;
|
|
spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
|
|
codec->power_save_node = 1;
|
|
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
|
|
index 8158409921e02..c6007aa95fff1 100644
|
|
--- a/sound/soc/intel/boards/haswell.c
|
|
+++ b/sound/soc/intel/boards/haswell.c
|
|
@@ -197,6 +197,7 @@ static struct platform_driver haswell_audio = {
|
|
.probe = haswell_audio_probe,
|
|
.driver = {
|
|
.name = "haswell-audio",
|
|
+ .pm = &snd_soc_pm_ops,
|
|
},
|
|
};
|
|
|