mirror of
https://github.com/Fishwaldo/build.git
synced 2025-07-22 21:08:49 +00:00
3608 lines
118 KiB
Diff
3608 lines
118 KiB
Diff
diff --git a/MAINTAINERS b/MAINTAINERS
|
|
index bfc1b86e3e733..b6ab9c1a21198 100644
|
|
--- a/MAINTAINERS
|
|
+++ b/MAINTAINERS
|
|
@@ -1169,7 +1169,7 @@ M: Joel Fernandes <joel@joelfernandes.org>
|
|
M: Christian Brauner <christian@brauner.io>
|
|
M: Hridya Valsaraju <hridya@google.com>
|
|
M: Suren Baghdasaryan <surenb@google.com>
|
|
-L: devel@driverdev.osuosl.org
|
|
+L: linux-kernel@vger.kernel.org
|
|
S: Supported
|
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
|
|
F: drivers/android/
|
|
@@ -8079,7 +8079,6 @@ F: drivers/crypto/hisilicon/sec2/sec_main.c
|
|
|
|
HISILICON STAGING DRIVERS FOR HIKEY 960/970
|
|
M: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
|
|
-L: devel@driverdev.osuosl.org
|
|
S: Maintained
|
|
F: drivers/staging/hikey9xx/
|
|
|
|
@@ -16911,7 +16910,7 @@ F: drivers/staging/vt665?/
|
|
|
|
STAGING SUBSYSTEM
|
|
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
-L: devel@driverdev.osuosl.org
|
|
+L: linux-staging@lists.linux.dev
|
|
S: Supported
|
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
|
|
F: drivers/staging/
|
|
@@ -18993,7 +18992,7 @@ VME SUBSYSTEM
|
|
M: Martyn Welch <martyn@welchs.me.uk>
|
|
M: Manohar Vanga <manohar.vanga@gmail.com>
|
|
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
-L: devel@driverdev.osuosl.org
|
|
+L: linux-kernel@vger.kernel.org
|
|
S: Maintained
|
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
|
|
F: Documentation/driver-api/vme.rst
|
|
diff --git a/Makefile b/Makefile
|
|
index d8a39ece170dd..23403c8e08385 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 5
|
|
PATCHLEVEL = 11
|
|
-SUBLEVEL = 8
|
|
+SUBLEVEL = 9
|
|
EXTRAVERSION =
|
|
NAME = 💕 Valentine's Day Edition 💕
|
|
|
|
@@ -1248,15 +1248,17 @@ endef
|
|
define filechk_version.h
|
|
if [ $(SUBLEVEL) -gt 255 ]; then \
|
|
echo \#define LINUX_VERSION_CODE $(shell \
|
|
- expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \
|
|
+ expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + 255); \
|
|
else \
|
|
echo \#define LINUX_VERSION_CODE $(shell \
|
|
- expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \
|
|
+ expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \
|
|
fi; \
|
|
echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + \
|
|
((c) > 255 ? 255 : (c)))'
|
|
endef
|
|
|
|
+$(version_h): PATCHLEVEL := $(if $(PATCHLEVEL), $(PATCHLEVEL), 0)
|
|
+$(version_h): SUBLEVEL := $(if $(SUBLEVEL), $(SUBLEVEL), 0)
|
|
$(version_h): FORCE
|
|
$(call filechk,version.h)
|
|
$(Q)rm -f $(old_version_h)
|
|
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
|
|
index 7897d16e09904..727d4b3219379 100644
|
|
--- a/arch/powerpc/include/asm/cpu_has_feature.h
|
|
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
|
|
@@ -7,7 +7,7 @@
|
|
#include <linux/bug.h>
|
|
#include <asm/cputable.h>
|
|
|
|
-static inline bool early_cpu_has_feature(unsigned long feature)
|
|
+static __always_inline bool early_cpu_has_feature(unsigned long feature)
|
|
{
|
|
return !!((CPU_FTRS_ALWAYS & feature) ||
|
|
(CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
|
|
@@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
|
|
return static_branch_likely(&cpu_feature_keys[i]);
|
|
}
|
|
#else
|
|
-static inline bool cpu_has_feature(unsigned long feature)
|
|
+static __always_inline bool cpu_has_feature(unsigned long feature)
|
|
{
|
|
return early_cpu_has_feature(feature);
|
|
}
|
|
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
|
|
index a6e29f880e0e3..d21d08140a5eb 100644
|
|
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
|
|
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
|
|
@@ -65,3 +65,14 @@ V_FUNCTION_END(__kernel_clock_getres)
|
|
V_FUNCTION_BEGIN(__kernel_time)
|
|
cvdso_call_time __c_kernel_time
|
|
V_FUNCTION_END(__kernel_time)
|
|
+
|
|
+/* Routines for restoring integer registers, called by the compiler. */
|
|
+/* Called with r11 pointing to the stack header word of the caller of the */
|
|
+/* function, just beyond the end of the integer restore area. */
|
|
+_GLOBAL(_restgpr_31_x)
|
|
+_GLOBAL(_rest32gpr_31_x)
|
|
+ lwz r0,4(r11)
|
|
+ lwz r31,-4(r11)
|
|
+ mtlr r0
|
|
+ mr r1,r11
|
|
+ blr
|
|
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
|
|
index e0a34eb5ed3b3..e6d569ae817d2 100644
|
|
--- a/arch/riscv/Kconfig
|
|
+++ b/arch/riscv/Kconfig
|
|
@@ -87,7 +87,6 @@ config RISCV
|
|
select PCI_MSI if PCI
|
|
select RISCV_INTC
|
|
select RISCV_TIMER if RISCV_SBI
|
|
- select SPARSEMEM_STATIC if 32BIT
|
|
select SPARSE_IRQ
|
|
select SYSCTL_EXCEPTION_TRACE
|
|
select THREAD_INFO_IN_TASK
|
|
@@ -148,7 +147,8 @@ config ARCH_FLATMEM_ENABLE
|
|
config ARCH_SPARSEMEM_ENABLE
|
|
def_bool y
|
|
depends on MMU
|
|
- select SPARSEMEM_VMEMMAP_ENABLE
|
|
+ select SPARSEMEM_STATIC if 32BIT && SPARSMEM
|
|
+ select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
|
|
|
|
config ARCH_SELECT_MEMORY_MODEL
|
|
def_bool ARCH_SPARSEMEM_ENABLE
|
|
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
|
|
index 653edb25d4957..c0fdb05ffa0b2 100644
|
|
--- a/arch/riscv/include/asm/sbi.h
|
|
+++ b/arch/riscv/include/asm/sbi.h
|
|
@@ -51,10 +51,10 @@ enum sbi_ext_rfence_fid {
|
|
SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
|
|
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
|
|
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
|
|
- SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
|
|
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
|
|
- SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
|
|
+ SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
|
|
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
|
|
+ SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
|
|
};
|
|
|
|
enum sbi_ext_hsm_fid {
|
|
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
|
|
index c7c0655dd45b0..968202561d470 100644
|
|
--- a/arch/riscv/kernel/setup.c
|
|
+++ b/arch/riscv/kernel/setup.c
|
|
@@ -147,7 +147,8 @@ static void __init init_resources(void)
|
|
bss_res.end = __pa_symbol(__bss_stop) - 1;
|
|
bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
|
|
|
- mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res);
|
|
+ /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
|
|
+ mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt + 1) * sizeof(*mem_res);
|
|
mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
|
|
if (!mem_res)
|
|
panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
|
|
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
|
|
index 212628932ddc1..a75d94a9bcb2f 100644
|
|
--- a/arch/s390/include/asm/pci.h
|
|
+++ b/arch/s390/include/asm/pci.h
|
|
@@ -201,8 +201,8 @@ extern unsigned int s390_pci_no_rid;
|
|
Prototypes
|
|
----------------------------------------------------------------------------- */
|
|
/* Base stuff */
|
|
-int zpci_create_device(struct zpci_dev *);
|
|
-void zpci_remove_device(struct zpci_dev *zdev);
|
|
+int zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
|
|
+void zpci_remove_device(struct zpci_dev *zdev, bool set_error);
|
|
int zpci_enable_device(struct zpci_dev *);
|
|
int zpci_disable_device(struct zpci_dev *);
|
|
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
|
|
@@ -212,7 +212,7 @@ void zpci_remove_reserved_devices(void);
|
|
/* CLP */
|
|
int clp_setup_writeback_mio(void);
|
|
int clp_scan_pci_devices(void);
|
|
-int clp_add_pci_device(u32, u32, int);
|
|
+int clp_query_pci_fn(struct zpci_dev *zdev);
|
|
int clp_enable_fh(struct zpci_dev *, u8);
|
|
int clp_disable_fh(struct zpci_dev *);
|
|
int clp_get_state(u32 fid, enum zpci_state *state);
|
|
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
|
|
index 978a35ea6081f..9b3c5978b6683 100644
|
|
--- a/arch/s390/kernel/vtime.c
|
|
+++ b/arch/s390/kernel/vtime.c
|
|
@@ -217,7 +217,7 @@ void vtime_flush(struct task_struct *tsk)
|
|
avg_steal = S390_lowcore.avg_steal_timer / 2;
|
|
if ((s64) steal > 0) {
|
|
S390_lowcore.steal_timer = 0;
|
|
- account_steal_time(steal);
|
|
+ account_steal_time(cputime_to_nsecs(steal));
|
|
avg_steal += steal;
|
|
}
|
|
S390_lowcore.avg_steal_timer = avg_steal;
|
|
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
|
|
index 41df8fcfddde2..91064077526df 100644
|
|
--- a/arch/s390/pci/pci.c
|
|
+++ b/arch/s390/pci/pci.c
|
|
@@ -682,56 +682,101 @@ int zpci_disable_device(struct zpci_dev *zdev)
|
|
}
|
|
EXPORT_SYMBOL_GPL(zpci_disable_device);
|
|
|
|
-void zpci_remove_device(struct zpci_dev *zdev)
|
|
+/* zpci_remove_device - Removes the given zdev from the PCI core
|
|
+ * @zdev: the zdev to be removed from the PCI core
|
|
+ * @set_error: if true the device's error state is set to permanent failure
|
|
+ *
|
|
+ * Sets a zPCI device to a configured but offline state; the zPCI
|
|
+ * device is still accessible through its hotplug slot and the zPCI
|
|
+ * API but is removed from the common code PCI bus, making it
|
|
+ * no longer available to drivers.
|
|
+ */
|
|
+void zpci_remove_device(struct zpci_dev *zdev, bool set_error)
|
|
{
|
|
struct zpci_bus *zbus = zdev->zbus;
|
|
struct pci_dev *pdev;
|
|
|
|
+ if (!zdev->zbus->bus)
|
|
+ return;
|
|
+
|
|
pdev = pci_get_slot(zbus->bus, zdev->devfn);
|
|
if (pdev) {
|
|
- if (pdev->is_virtfn)
|
|
- return zpci_iov_remove_virtfn(pdev, zdev->vfn);
|
|
+ if (set_error)
|
|
+ pdev->error_state = pci_channel_io_perm_failure;
|
|
+ if (pdev->is_virtfn) {
|
|
+ zpci_iov_remove_virtfn(pdev, zdev->vfn);
|
|
+ /* balance pci_get_slot */
|
|
+ pci_dev_put(pdev);
|
|
+ return;
|
|
+ }
|
|
pci_stop_and_remove_bus_device_locked(pdev);
|
|
+ /* balance pci_get_slot */
|
|
+ pci_dev_put(pdev);
|
|
}
|
|
}
|
|
|
|
-int zpci_create_device(struct zpci_dev *zdev)
|
|
+/**
|
|
+ * zpci_create_device() - Create a new zpci_dev and add it to the zbus
|
|
+ * @fid: Function ID of the device to be created
|
|
+ * @fh: Current Function Handle of the device to be created
|
|
+ * @state: Initial state after creation either Standby or Configured
|
|
+ *
|
|
+ * Creates a new zpci device and adds it to its, possibly newly created, zbus
|
|
+ * as well as zpci_list.
|
|
+ *
|
|
+ * Returns: 0 on success, an error value otherwise
|
|
+ */
|
|
+int zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
|
|
{
|
|
+ struct zpci_dev *zdev;
|
|
int rc;
|
|
|
|
- kref_init(&zdev->kref);
|
|
+ zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
|
|
+ zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
|
|
+ if (!zdev)
|
|
+ return -ENOMEM;
|
|
|
|
- spin_lock(&zpci_list_lock);
|
|
- list_add_tail(&zdev->entry, &zpci_list);
|
|
- spin_unlock(&zpci_list_lock);
|
|
+ /* FID and Function Handle are the static/dynamic identifiers */
|
|
+ zdev->fid = fid;
|
|
+ zdev->fh = fh;
|
|
|
|
- rc = zpci_init_iommu(zdev);
|
|
+ /* Query function properties and update zdev */
|
|
+ rc = clp_query_pci_fn(zdev);
|
|
if (rc)
|
|
- goto out;
|
|
+ goto error;
|
|
+ zdev->state = state;
|
|
|
|
+ kref_init(&zdev->kref);
|
|
mutex_init(&zdev->lock);
|
|
+
|
|
+ rc = zpci_init_iommu(zdev);
|
|
+ if (rc)
|
|
+ goto error;
|
|
+
|
|
if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
|
|
rc = zpci_enable_device(zdev);
|
|
if (rc)
|
|
- goto out_destroy_iommu;
|
|
+ goto error_destroy_iommu;
|
|
}
|
|
|
|
rc = zpci_bus_device_register(zdev, &pci_root_ops);
|
|
if (rc)
|
|
- goto out_disable;
|
|
+ goto error_disable;
|
|
+
|
|
+ spin_lock(&zpci_list_lock);
|
|
+ list_add_tail(&zdev->entry, &zpci_list);
|
|
+ spin_unlock(&zpci_list_lock);
|
|
|
|
return 0;
|
|
|
|
-out_disable:
|
|
+error_disable:
|
|
if (zdev->state == ZPCI_FN_STATE_ONLINE)
|
|
zpci_disable_device(zdev);
|
|
-
|
|
-out_destroy_iommu:
|
|
+error_destroy_iommu:
|
|
zpci_destroy_iommu(zdev);
|
|
-out:
|
|
- spin_lock(&zpci_list_lock);
|
|
- list_del(&zdev->entry);
|
|
- spin_unlock(&zpci_list_lock);
|
|
+error:
|
|
+ zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
|
|
+ kfree(zdev);
|
|
return rc;
|
|
}
|
|
|
|
@@ -740,7 +785,7 @@ void zpci_release_device(struct kref *kref)
|
|
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
|
|
|
|
if (zdev->zbus->bus)
|
|
- zpci_remove_device(zdev);
|
|
+ zpci_remove_device(zdev, false);
|
|
|
|
switch (zdev->state) {
|
|
case ZPCI_FN_STATE_ONLINE:
|
|
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
|
|
index 153720d21ae7f..d3331596ddbe1 100644
|
|
--- a/arch/s390/pci/pci_clp.c
|
|
+++ b/arch/s390/pci/pci_clp.c
|
|
@@ -181,7 +181,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
|
|
return 0;
|
|
}
|
|
|
|
-static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
|
|
+int clp_query_pci_fn(struct zpci_dev *zdev)
|
|
{
|
|
struct clp_req_rsp_query_pci *rrb;
|
|
int rc;
|
|
@@ -194,7 +194,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
|
|
rrb->request.hdr.len = sizeof(rrb->request);
|
|
rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
|
|
rrb->response.hdr.len = sizeof(rrb->response);
|
|
- rrb->request.fh = fh;
|
|
+ rrb->request.fh = zdev->fh;
|
|
|
|
rc = clp_req(rrb, CLP_LPS_PCI);
|
|
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
|
|
@@ -212,40 +212,6 @@ out:
|
|
return rc;
|
|
}
|
|
|
|
-int clp_add_pci_device(u32 fid, u32 fh, int configured)
|
|
-{
|
|
- struct zpci_dev *zdev;
|
|
- int rc = -ENOMEM;
|
|
-
|
|
- zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
|
|
- zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
|
|
- if (!zdev)
|
|
- goto error;
|
|
-
|
|
- zdev->fh = fh;
|
|
- zdev->fid = fid;
|
|
-
|
|
- /* Query function properties and update zdev */
|
|
- rc = clp_query_pci_fn(zdev, fh);
|
|
- if (rc)
|
|
- goto error;
|
|
-
|
|
- if (configured)
|
|
- zdev->state = ZPCI_FN_STATE_CONFIGURED;
|
|
- else
|
|
- zdev->state = ZPCI_FN_STATE_STANDBY;
|
|
-
|
|
- rc = zpci_create_device(zdev);
|
|
- if (rc)
|
|
- goto error;
|
|
- return 0;
|
|
-
|
|
-error:
|
|
- zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
|
|
- kfree(zdev);
|
|
- return rc;
|
|
-}
|
|
-
|
|
static int clp_refresh_fh(u32 fid);
|
|
/*
|
|
* Enable/Disable a given PCI function and update its function handle if
|
|
@@ -408,7 +374,7 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
|
|
|
|
zdev = get_zdev_by_fid(entry->fid);
|
|
if (!zdev)
|
|
- clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
|
|
+ zpci_create_device(entry->fid, entry->fh, entry->config_state);
|
|
}
|
|
|
|
int clp_scan_pci_devices(void)
|
|
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
|
|
index 9a6bae503fe61..ac0c65cdd69d9 100644
|
|
--- a/arch/s390/pci/pci_event.c
|
|
+++ b/arch/s390/pci/pci_event.c
|
|
@@ -76,20 +76,17 @@ void zpci_event_error(void *data)
|
|
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
|
{
|
|
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
|
|
- struct pci_dev *pdev = NULL;
|
|
enum zpci_state state;
|
|
+ struct pci_dev *pdev;
|
|
int ret;
|
|
|
|
- if (zdev && zdev->zbus && zdev->zbus->bus)
|
|
- pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
|
|
-
|
|
zpci_err("avail CCDF:\n");
|
|
zpci_err_hex(ccdf, sizeof(*ccdf));
|
|
|
|
switch (ccdf->pec) {
|
|
case 0x0301: /* Reserved|Standby -> Configured */
|
|
if (!zdev) {
|
|
- ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1);
|
|
+ zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED);
|
|
break;
|
|
}
|
|
/* the configuration request may be stale */
|
|
@@ -116,7 +113,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
|
break;
|
|
case 0x0302: /* Reserved -> Standby */
|
|
if (!zdev) {
|
|
- clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
|
|
+ zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
|
|
break;
|
|
}
|
|
zdev->fh = ccdf->fh;
|
|
@@ -124,8 +121,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
|
case 0x0303: /* Deconfiguration requested */
|
|
if (!zdev)
|
|
break;
|
|
- if (pdev)
|
|
- zpci_remove_device(zdev);
|
|
+ zpci_remove_device(zdev, false);
|
|
|
|
ret = zpci_disable_device(zdev);
|
|
if (ret)
|
|
@@ -140,12 +136,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
|
case 0x0304: /* Configured -> Standby|Reserved */
|
|
if (!zdev)
|
|
break;
|
|
- if (pdev) {
|
|
- /* Give the driver a hint that the function is
|
|
- * already unusable. */
|
|
- pdev->error_state = pci_channel_io_perm_failure;
|
|
- zpci_remove_device(zdev);
|
|
- }
|
|
+ /* Give the driver a hint that the function is
|
|
+ * already unusable.
|
|
+ */
|
|
+ zpci_remove_device(zdev, true);
|
|
|
|
zdev->fh = ccdf->fh;
|
|
zpci_disable_device(zdev);
|
|
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
|
|
index d3f5cf70c1a09..bfd42e0853ed6 100644
|
|
--- a/arch/x86/events/intel/core.c
|
|
+++ b/arch/x86/events/intel/core.c
|
|
@@ -3575,6 +3575,9 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|
return ret;
|
|
|
|
if (event->attr.precise_ip) {
|
|
+ if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
|
|
+ return -EINVAL;
|
|
+
|
|
if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
|
|
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
|
|
if (!(event->attr.sample_type &
|
|
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
|
|
index 67dbc91bccfee..6e84e79bea720 100644
|
|
--- a/arch/x86/events/intel/ds.c
|
|
+++ b/arch/x86/events/intel/ds.c
|
|
@@ -1899,7 +1899,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
|
|
*/
|
|
if (!pebs_status && cpuc->pebs_enabled &&
|
|
!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
|
|
- pebs_status = cpuc->pebs_enabled;
|
|
+ pebs_status = p->status = cpuc->pebs_enabled;
|
|
|
|
bit = find_first_bit((unsigned long *)&pebs_status,
|
|
x86_pmu.max_pebs_events);
|
|
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
|
|
index c20a52b5534b4..c66df6368909f 100644
|
|
--- a/arch/x86/include/asm/processor.h
|
|
+++ b/arch/x86/include/asm/processor.h
|
|
@@ -552,15 +552,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
|
|
*size = fpu_kernel_xstate_size;
|
|
}
|
|
|
|
-/*
|
|
- * Thread-synchronous status.
|
|
- *
|
|
- * This is different from the flags in that nobody else
|
|
- * ever touches our thread-synchronous status, so we don't
|
|
- * have to worry about atomic accesses.
|
|
- */
|
|
-#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
|
|
-
|
|
static inline void
|
|
native_load_sp0(unsigned long sp0)
|
|
{
|
|
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
|
|
index 0d751d5da702e..30d1d187019f8 100644
|
|
--- a/arch/x86/include/asm/thread_info.h
|
|
+++ b/arch/x86/include/asm/thread_info.h
|
|
@@ -205,10 +205,31 @@ static inline int arch_within_stack_frames(const void * const stack,
|
|
|
|
#endif
|
|
|
|
+/*
|
|
+ * Thread-synchronous status.
|
|
+ *
|
|
+ * This is different from the flags in that nobody else
|
|
+ * ever touches our thread-synchronous status, so we don't
|
|
+ * have to worry about atomic accesses.
|
|
+ */
|
|
+#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
|
|
+
|
|
+#ifndef __ASSEMBLY__
|
|
#ifdef CONFIG_COMPAT
|
|
#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */
|
|
+#define TS_COMPAT_RESTART 0x0008
|
|
+
|
|
+#define arch_set_restart_data arch_set_restart_data
|
|
+
|
|
+static inline void arch_set_restart_data(struct restart_block *restart)
|
|
+{
|
|
+ struct thread_info *ti = current_thread_info();
|
|
+ if (ti->status & TS_COMPAT)
|
|
+ ti->status |= TS_COMPAT_RESTART;
|
|
+ else
|
|
+ ti->status &= ~TS_COMPAT_RESTART;
|
|
+}
|
|
#endif
|
|
-#ifndef __ASSEMBLY__
|
|
|
|
#ifdef CONFIG_X86_32
|
|
#define in_ia32_syscall() true
|
|
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
|
|
index 7f4c081f59f0c..2745c24453f2b 100644
|
|
--- a/arch/x86/kernel/apic/apic.c
|
|
+++ b/arch/x86/kernel/apic/apic.c
|
|
@@ -2334,6 +2334,11 @@ static int cpuid_to_apicid[] = {
|
|
[0 ... NR_CPUS - 1] = -1,
|
|
};
|
|
|
|
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
|
|
+{
|
|
+ return phys_id == cpuid_to_apicid[cpu];
|
|
+}
|
|
+
|
|
#ifdef CONFIG_SMP
|
|
/**
|
|
* apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
|
|
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
|
|
index e4ab4804b20df..04ef995d1200a 100644
|
|
--- a/arch/x86/kernel/apic/io_apic.c
|
|
+++ b/arch/x86/kernel/apic/io_apic.c
|
|
@@ -1032,6 +1032,16 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
|
|
if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
|
|
irq = mp_irqs[idx].srcbusirq;
|
|
legacy = mp_is_legacy_irq(irq);
|
|
+ /*
|
|
+ * IRQ2 is unusable for historical reasons on systems which
|
|
+ * have a legacy PIC. See the comment vs. IRQ2 further down.
|
|
+ *
|
|
+ * If this gets removed at some point then the related code
|
|
+ * in lapic_assign_system_vectors() needs to be adjusted as
|
|
+ * well.
|
|
+ */
|
|
+ if (legacy && irq == PIC_CASCADE_IR)
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
mutex_lock(&ioapic_mutex);
|
|
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
|
|
index ea794a083c44e..6c26d2c3a2e4c 100644
|
|
--- a/arch/x86/kernel/signal.c
|
|
+++ b/arch/x86/kernel/signal.c
|
|
@@ -766,30 +766,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
|
|
|
static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
|
|
{
|
|
- /*
|
|
- * This function is fundamentally broken as currently
|
|
- * implemented.
|
|
- *
|
|
- * The idea is that we want to trigger a call to the
|
|
- * restart_block() syscall and that we want in_ia32_syscall(),
|
|
- * in_x32_syscall(), etc. to match whatever they were in the
|
|
- * syscall being restarted. We assume that the syscall
|
|
- * instruction at (regs->ip - 2) matches whatever syscall
|
|
- * instruction we used to enter in the first place.
|
|
- *
|
|
- * The problem is that we can get here when ptrace pokes
|
|
- * syscall-like values into regs even if we're not in a syscall
|
|
- * at all.
|
|
- *
|
|
- * For now, we maintain historical behavior and guess based on
|
|
- * stored state. We could do better by saving the actual
|
|
- * syscall arch in restart_block or (with caveats on x32) by
|
|
- * checking if regs->ip points to 'int $0x80'. The current
|
|
- * behavior is incorrect if a tracer has a different bitness
|
|
- * than the tracee.
|
|
- */
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
- if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED))
|
|
+ if (current_thread_info()->status & TS_COMPAT_RESTART)
|
|
return __NR_ia32_restart_syscall;
|
|
#endif
|
|
#ifdef CONFIG_X86_X32_ABI
|
|
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
|
|
index 87682dcb64ec3..bfda153b1a41d 100644
|
|
--- a/drivers/base/power/runtime.c
|
|
+++ b/drivers/base/power/runtime.c
|
|
@@ -325,22 +325,22 @@ static void rpm_put_suppliers(struct device *dev)
|
|
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
|
__releases(&dev->power.lock) __acquires(&dev->power.lock)
|
|
{
|
|
- bool use_links = dev->power.links_count > 0;
|
|
- bool get = false;
|
|
int retval, idx;
|
|
- bool put;
|
|
+ bool use_links = dev->power.links_count > 0;
|
|
|
|
if (dev->power.irq_safe) {
|
|
spin_unlock(&dev->power.lock);
|
|
- } else if (!use_links) {
|
|
- spin_unlock_irq(&dev->power.lock);
|
|
} else {
|
|
- get = dev->power.runtime_status == RPM_RESUMING;
|
|
-
|
|
spin_unlock_irq(&dev->power.lock);
|
|
|
|
- /* Resume suppliers if necessary. */
|
|
- if (get) {
|
|
+ /*
|
|
+ * Resume suppliers if necessary.
|
|
+ *
|
|
+ * The device's runtime PM status cannot change until this
|
|
+ * routine returns, so it is safe to read the status outside of
|
|
+ * the lock.
|
|
+ */
|
|
+ if (use_links && dev->power.runtime_status == RPM_RESUMING) {
|
|
idx = device_links_read_lock();
|
|
|
|
retval = rpm_get_suppliers(dev);
|
|
@@ -355,36 +355,24 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
|
|
|
if (dev->power.irq_safe) {
|
|
spin_lock(&dev->power.lock);
|
|
- return retval;
|
|
- }
|
|
-
|
|
- spin_lock_irq(&dev->power.lock);
|
|
-
|
|
- if (!use_links)
|
|
- return retval;
|
|
-
|
|
- /*
|
|
- * If the device is suspending and the callback has returned success,
|
|
- * drop the usage counters of the suppliers that have been reference
|
|
- * counted on its resume.
|
|
- *
|
|
- * Do that if the resume fails too.
|
|
- */
|
|
- put = dev->power.runtime_status == RPM_SUSPENDING && !retval;
|
|
- if (put)
|
|
- __update_runtime_status(dev, RPM_SUSPENDED);
|
|
- else
|
|
- put = get && retval;
|
|
-
|
|
- if (put) {
|
|
- spin_unlock_irq(&dev->power.lock);
|
|
-
|
|
- idx = device_links_read_lock();
|
|
+ } else {
|
|
+ /*
|
|
+ * If the device is suspending and the callback has returned
|
|
+ * success, drop the usage counters of the suppliers that have
|
|
+ * been reference counted on its resume.
|
|
+ *
|
|
+ * Do that if resume fails too.
|
|
+ */
|
|
+ if (use_links
|
|
+ && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
|
|
+ || (dev->power.runtime_status == RPM_RESUMING && retval))) {
|
|
+ idx = device_links_read_lock();
|
|
|
|
-fail:
|
|
- rpm_put_suppliers(dev);
|
|
+ fail:
|
|
+ rpm_put_suppliers(dev);
|
|
|
|
- device_links_read_unlock(idx);
|
|
+ device_links_read_unlock(idx);
|
|
+ }
|
|
|
|
spin_lock_irq(&dev->power.lock);
|
|
}
|
|
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
|
|
index ef2a974a2f105..75bc401fdd189 100644
|
|
--- a/drivers/counter/stm32-timer-cnt.c
|
|
+++ b/drivers/counter/stm32-timer-cnt.c
|
|
@@ -31,7 +31,7 @@ struct stm32_timer_cnt {
|
|
struct counter_device counter;
|
|
struct regmap *regmap;
|
|
struct clk *clk;
|
|
- u32 ceiling;
|
|
+ u32 max_arr;
|
|
bool enabled;
|
|
struct stm32_timer_regs bak;
|
|
};
|
|
@@ -44,13 +44,14 @@ struct stm32_timer_cnt {
|
|
* @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges
|
|
*/
|
|
enum stm32_count_function {
|
|
- STM32_COUNT_SLAVE_MODE_DISABLED = -1,
|
|
+ STM32_COUNT_SLAVE_MODE_DISABLED,
|
|
STM32_COUNT_ENCODER_MODE_1,
|
|
STM32_COUNT_ENCODER_MODE_2,
|
|
STM32_COUNT_ENCODER_MODE_3,
|
|
};
|
|
|
|
static enum counter_count_function stm32_count_functions[] = {
|
|
+ [STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_COUNT_FUNCTION_INCREASE,
|
|
[STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
|
|
[STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
|
|
[STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
|
|
@@ -73,8 +74,10 @@ static int stm32_count_write(struct counter_device *counter,
|
|
const unsigned long val)
|
|
{
|
|
struct stm32_timer_cnt *const priv = counter->priv;
|
|
+ u32 ceiling;
|
|
|
|
- if (val > priv->ceiling)
|
|
+ regmap_read(priv->regmap, TIM_ARR, &ceiling);
|
|
+ if (val > ceiling)
|
|
return -EINVAL;
|
|
|
|
return regmap_write(priv->regmap, TIM_CNT, val);
|
|
@@ -90,6 +93,9 @@ static int stm32_count_function_get(struct counter_device *counter,
|
|
regmap_read(priv->regmap, TIM_SMCR, &smcr);
|
|
|
|
switch (smcr & TIM_SMCR_SMS) {
|
|
+ case 0:
|
|
+ *function = STM32_COUNT_SLAVE_MODE_DISABLED;
|
|
+ return 0;
|
|
case 1:
|
|
*function = STM32_COUNT_ENCODER_MODE_1;
|
|
return 0;
|
|
@@ -99,9 +105,9 @@ static int stm32_count_function_get(struct counter_device *counter,
|
|
case 3:
|
|
*function = STM32_COUNT_ENCODER_MODE_3;
|
|
return 0;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
}
|
|
-
|
|
- return -EINVAL;
|
|
}
|
|
|
|
static int stm32_count_function_set(struct counter_device *counter,
|
|
@@ -112,6 +118,9 @@ static int stm32_count_function_set(struct counter_device *counter,
|
|
u32 cr1, sms;
|
|
|
|
switch (function) {
|
|
+ case STM32_COUNT_SLAVE_MODE_DISABLED:
|
|
+ sms = 0;
|
|
+ break;
|
|
case STM32_COUNT_ENCODER_MODE_1:
|
|
sms = 1;
|
|
break;
|
|
@@ -122,8 +131,7 @@ static int stm32_count_function_set(struct counter_device *counter,
|
|
sms = 3;
|
|
break;
|
|
default:
|
|
- sms = 0;
|
|
- break;
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
/* Store enable status */
|
|
@@ -131,10 +139,6 @@ static int stm32_count_function_set(struct counter_device *counter,
|
|
|
|
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
|
|
|
|
- /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
|
|
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
|
|
- regmap_write(priv->regmap, TIM_ARR, priv->ceiling);
|
|
-
|
|
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
|
|
|
|
/* Make sure that registers are updated */
|
|
@@ -185,11 +189,13 @@ static ssize_t stm32_count_ceiling_write(struct counter_device *counter,
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ if (ceiling > priv->max_arr)
|
|
+ return -ERANGE;
|
|
+
|
|
/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
|
|
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
|
|
regmap_write(priv->regmap, TIM_ARR, ceiling);
|
|
|
|
- priv->ceiling = ceiling;
|
|
return len;
|
|
}
|
|
|
|
@@ -274,31 +280,36 @@ static int stm32_action_get(struct counter_device *counter,
|
|
size_t function;
|
|
int err;
|
|
|
|
- /* Default action mode (e.g. STM32_COUNT_SLAVE_MODE_DISABLED) */
|
|
- *action = STM32_SYNAPSE_ACTION_NONE;
|
|
-
|
|
err = stm32_count_function_get(counter, count, &function);
|
|
if (err)
|
|
- return 0;
|
|
+ return err;
|
|
|
|
switch (function) {
|
|
+ case STM32_COUNT_SLAVE_MODE_DISABLED:
|
|
+ /* counts on internal clock when CEN=1 */
|
|
+ *action = STM32_SYNAPSE_ACTION_NONE;
|
|
+ return 0;
|
|
case STM32_COUNT_ENCODER_MODE_1:
|
|
/* counts up/down on TI1FP1 edge depending on TI2FP2 level */
|
|
if (synapse->signal->id == count->synapses[0].signal->id)
|
|
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
|
- break;
|
|
+ else
|
|
+ *action = STM32_SYNAPSE_ACTION_NONE;
|
|
+ return 0;
|
|
case STM32_COUNT_ENCODER_MODE_2:
|
|
/* counts up/down on TI2FP2 edge depending on TI1FP1 level */
|
|
if (synapse->signal->id == count->synapses[1].signal->id)
|
|
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
|
- break;
|
|
+ else
|
|
+ *action = STM32_SYNAPSE_ACTION_NONE;
|
|
+ return 0;
|
|
case STM32_COUNT_ENCODER_MODE_3:
|
|
/* counts up/down on both TI1FP1 and TI2FP2 edges */
|
|
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
|
- break;
|
|
+ return 0;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
}
|
|
-
|
|
- return 0;
|
|
}
|
|
|
|
static const struct counter_ops stm32_timer_cnt_ops = {
|
|
@@ -359,7 +370,7 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev)
|
|
|
|
priv->regmap = ddata->regmap;
|
|
priv->clk = ddata->clk;
|
|
- priv->ceiling = ddata->max_arr;
|
|
+ priv->max_arr = ddata->max_arr;
|
|
|
|
priv->counter.name = dev_name(dev);
|
|
priv->counter.parent = dev;
|
|
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
|
|
index df3f9bcab581c..4b7ee3fa9224f 100644
|
|
--- a/drivers/firmware/efi/efi.c
|
|
+++ b/drivers/firmware/efi/efi.c
|
|
@@ -927,7 +927,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
|
|
}
|
|
|
|
/* first try to find a slot in an existing linked list entry */
|
|
- for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
|
|
+ for (prsv = efi_memreserve_root->next; prsv; ) {
|
|
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
|
|
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
|
|
if (index < rsv->size) {
|
|
@@ -937,6 +937,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
|
|
memunmap(rsv);
|
|
return efi_mem_reserve_iomem(addr, size);
|
|
}
|
|
+ prsv = rsv->next;
|
|
memunmap(rsv);
|
|
}
|
|
|
|
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
|
|
index 41c1d00bf933c..abdc8a6a39631 100644
|
|
--- a/drivers/firmware/efi/vars.c
|
|
+++ b/drivers/firmware/efi/vars.c
|
|
@@ -484,6 +484,10 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
|
|
}
|
|
}
|
|
|
|
+ break;
|
|
+ case EFI_UNSUPPORTED:
|
|
+ err = -EOPNOTSUPP;
|
|
+ status = EFI_NOT_FOUND;
|
|
break;
|
|
case EFI_NOT_FOUND:
|
|
break;
|
|
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
|
|
index e4cfa27f6893d..a4a47305574cb 100644
|
|
--- a/drivers/gpio/gpiolib.c
|
|
+++ b/drivers/gpio/gpiolib.c
|
|
@@ -573,6 +573,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
|
struct lock_class_key *lock_key,
|
|
struct lock_class_key *request_key)
|
|
{
|
|
+ struct fwnode_handle *fwnode = gc->parent ? dev_fwnode(gc->parent) : NULL;
|
|
unsigned long flags;
|
|
int ret = 0;
|
|
unsigned i;
|
|
@@ -602,6 +603,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
|
gc->of_node = gdev->dev.of_node;
|
|
#endif
|
|
|
|
+ /*
|
|
+ * Assign fwnode depending on the result of the previous calls,
|
|
+ * if none of them succeed, assign it to the parent's one.
|
|
+ */
|
|
+ gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode;
|
|
+
|
|
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
|
|
if (gdev->id < 0) {
|
|
ret = gdev->id;
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
|
|
index 480d928cb1ca6..09b9732424e15 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
|
|
@@ -1501,38 +1501,8 @@ static void dcn20_update_dchubp_dpp(
|
|
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
|
|
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|
|
|| pipe_ctx->stream->update_flags.bits.out_csc) {
|
|
- struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
|
|
-
|
|
- if (mpc->funcs->set_gamut_remap) {
|
|
- int i;
|
|
- int mpcc_id = hubp->inst;
|
|
- struct mpc_grph_gamut_adjustment adjust;
|
|
- bool enable_remap_dpp = false;
|
|
-
|
|
- memset(&adjust, 0, sizeof(adjust));
|
|
- adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
|
|
-
|
|
- /* save the enablement of gamut remap for dpp */
|
|
- enable_remap_dpp = pipe_ctx->stream->gamut_remap_matrix.enable_remap;
|
|
-
|
|
- /* force bypass gamut remap for dpp/cm */
|
|
- pipe_ctx->stream->gamut_remap_matrix.enable_remap = false;
|
|
- dc->hwss.program_gamut_remap(pipe_ctx);
|
|
-
|
|
- /* restore gamut remap flag and use this remap into mpc */
|
|
- pipe_ctx->stream->gamut_remap_matrix.enable_remap = enable_remap_dpp;
|
|
-
|
|
- /* build remap matrix for top plane if enabled */
|
|
- if (enable_remap_dpp && pipe_ctx->top_pipe == NULL) {
|
|
- adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
|
|
- for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
|
|
- adjust.temperature_matrix[i] =
|
|
- pipe_ctx->stream->gamut_remap_matrix.matrix[i];
|
|
- }
|
|
- mpc->funcs->set_gamut_remap(mpc, mpcc_id, &adjust);
|
|
- } else
|
|
- /* dpp/cm gamut remap*/
|
|
- dc->hwss.program_gamut_remap(pipe_ctx);
|
|
+ /* dpp/cm gamut remap*/
|
|
+ dc->hwss.program_gamut_remap(pipe_ctx);
|
|
|
|
/*call the dcn2 method which uses mpc csc*/
|
|
dc->hwss.program_output_csc(dc,
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
|
index 94ee2cab26b7c..4caeab6a09b3d 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
|
@@ -1595,6 +1595,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
|
|
dcn2_1_soc.num_chans = bw_params->num_channels;
|
|
|
|
ASSERT(clk_table->num_entries);
|
|
+ /* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */
|
|
+ for (i = 0; i < dcn2_1_soc.num_states + 1; i++) {
|
|
+ clock_limits[i] = dcn2_1_soc.clock_limits[i];
|
|
+ }
|
|
+
|
|
for (i = 0; i < clk_table->num_entries; i++) {
|
|
/* loop backwards*/
|
|
for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
|
|
index 41a1d0e9b7e20..e0df9b0065f9c 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
|
|
@@ -113,6 +113,7 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|
struct pwl_result_data *rgb_resulted;
|
|
struct pwl_result_data *rgb;
|
|
struct pwl_result_data *rgb_plus_1;
|
|
+ struct pwl_result_data *rgb_minus_1;
|
|
struct fixed31_32 end_value;
|
|
|
|
int32_t region_start, region_end;
|
|
@@ -140,7 +141,7 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|
region_start = -MAX_LOW_POINT;
|
|
region_end = NUMBER_REGIONS - MAX_LOW_POINT;
|
|
} else {
|
|
- /* 10 segments
|
|
+ /* 11 segments
|
|
* segment is from 2^-10 to 2^0
|
|
* There are less than 256 points, for optimization
|
|
*/
|
|
@@ -154,9 +155,10 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|
seg_distr[7] = 4;
|
|
seg_distr[8] = 4;
|
|
seg_distr[9] = 4;
|
|
+ seg_distr[10] = 1;
|
|
|
|
region_start = -10;
|
|
- region_end = 0;
|
|
+ region_end = 1;
|
|
}
|
|
|
|
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
|
|
@@ -189,6 +191,10 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
|
|
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
|
|
|
|
+ rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
|
|
+ rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
|
|
+ rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
|
|
+
|
|
// All 3 color channels have same x
|
|
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
|
|
dc_fixpt_from_int(region_start));
|
|
@@ -259,15 +265,18 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|
|
|
rgb = rgb_resulted;
|
|
rgb_plus_1 = rgb_resulted + 1;
|
|
+ rgb_minus_1 = rgb;
|
|
|
|
i = 1;
|
|
while (i != hw_points + 1) {
|
|
- if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
|
|
- rgb_plus_1->red = rgb->red;
|
|
- if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
|
|
- rgb_plus_1->green = rgb->green;
|
|
- if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
|
|
- rgb_plus_1->blue = rgb->blue;
|
|
+ if (i >= hw_points - 1) {
|
|
+ if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
|
|
+ rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red);
|
|
+ if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
|
|
+ rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green);
|
|
+ if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
|
|
+ rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue);
|
|
+ }
|
|
|
|
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
|
|
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
|
|
@@ -283,6 +292,7 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|
}
|
|
|
|
++rgb_plus_1;
|
|
+ rgb_minus_1 = rgb;
|
|
++rgb;
|
|
++i;
|
|
}
|
|
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
|
|
index 649c26518d26d..8d9ab4a91544b 100644
|
|
--- a/drivers/gpu/drm/i915/i915_perf.c
|
|
+++ b/drivers/gpu/drm/i915/i915_perf.c
|
|
@@ -600,7 +600,6 @@ static int append_oa_sample(struct i915_perf_stream *stream,
|
|
{
|
|
int report_size = stream->oa_buffer.format_size;
|
|
struct drm_i915_perf_record_header header;
|
|
- u32 sample_flags = stream->sample_flags;
|
|
|
|
header.type = DRM_I915_PERF_RECORD_SAMPLE;
|
|
header.pad = 0;
|
|
@@ -614,10 +613,8 @@ static int append_oa_sample(struct i915_perf_stream *stream,
|
|
return -EFAULT;
|
|
buf += sizeof(header);
|
|
|
|
- if (sample_flags & SAMPLE_OA_REPORT) {
|
|
- if (copy_to_user(buf, report, report_size))
|
|
- return -EFAULT;
|
|
- }
|
|
+ if (copy_to_user(buf, report, report_size))
|
|
+ return -EFAULT;
|
|
|
|
(*offset) += header.size;
|
|
|
|
@@ -2678,7 +2675,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
|
|
|
|
stream->perf->ops.oa_enable(stream);
|
|
|
|
- if (stream->periodic)
|
|
+ if (stream->sample_flags & SAMPLE_OA_REPORT)
|
|
hrtimer_start(&stream->poll_check_timer,
|
|
ns_to_ktime(stream->poll_oa_period),
|
|
HRTIMER_MODE_REL_PINNED);
|
|
@@ -2741,7 +2738,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
|
|
{
|
|
stream->perf->ops.oa_disable(stream);
|
|
|
|
- if (stream->periodic)
|
|
+ if (stream->sample_flags & SAMPLE_OA_REPORT)
|
|
hrtimer_cancel(&stream->poll_check_timer);
|
|
}
|
|
|
|
@@ -3024,7 +3021,7 @@ static ssize_t i915_perf_read(struct file *file,
|
|
* disabled stream as an error. In particular it might otherwise lead
|
|
* to a deadlock for blocking file descriptors...
|
|
*/
|
|
- if (!stream->enabled)
|
|
+ if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
|
|
return -EIO;
|
|
|
|
if (!(file->f_flags & O_NONBLOCK)) {
|
|
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
|
|
index 22073e77fdf9a..a76eb2c14e8c5 100644
|
|
--- a/drivers/gpu/drm/ttm/ttm_bo.c
|
|
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
|
|
@@ -514,7 +514,7 @@ static void ttm_bo_release(struct kref *kref)
|
|
* shrinkers, now that they are queued for
|
|
* destruction.
|
|
*/
|
|
- if (bo->pin_count) {
|
|
+ if (WARN_ON(bo->pin_count)) {
|
|
bo->pin_count = 0;
|
|
ttm_bo_del_from_lru(bo);
|
|
ttm_bo_add_mem_to_lru(bo, &bo->mem);
|
|
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
|
|
index 15587a1bc80d0..be1f73166a32b 100644
|
|
--- a/drivers/iio/adc/Kconfig
|
|
+++ b/drivers/iio/adc/Kconfig
|
|
@@ -266,6 +266,8 @@ config ADI_AXI_ADC
|
|
select IIO_BUFFER
|
|
select IIO_BUFFER_HW_CONSUMER
|
|
select IIO_BUFFER_DMAENGINE
|
|
+ depends on HAS_IOMEM
|
|
+ depends on OF
|
|
help
|
|
Say yes here to build support for Analog Devices Generic
|
|
AXI ADC IP core. The IP core is used for interfacing with
|
|
@@ -923,6 +925,7 @@ config STM32_ADC_CORE
|
|
depends on ARCH_STM32 || COMPILE_TEST
|
|
depends on OF
|
|
depends on REGULATOR
|
|
+ depends on HAS_IOMEM
|
|
select IIO_BUFFER
|
|
select MFD_STM32_TIMERS
|
|
select IIO_STM32_TIMER_TRIGGER
|
|
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
|
|
index 1bb987a4acbab..8d81505282dd3 100644
|
|
--- a/drivers/iio/adc/ab8500-gpadc.c
|
|
+++ b/drivers/iio/adc/ab8500-gpadc.c
|
|
@@ -918,7 +918,7 @@ static int ab8500_gpadc_read_raw(struct iio_dev *indio_dev,
|
|
return processed;
|
|
|
|
/* Return millivolt or milliamps or millicentigrades */
|
|
- *val = processed * 1000;
|
|
+ *val = processed;
|
|
return IIO_VAL_INT;
|
|
}
|
|
|
|
diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
|
|
index 5d597e5050f68..1b4b3203e4285 100644
|
|
--- a/drivers/iio/adc/ad7949.c
|
|
+++ b/drivers/iio/adc/ad7949.c
|
|
@@ -91,7 +91,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
|
|
int ret;
|
|
int i;
|
|
int bits_per_word = ad7949_adc->resolution;
|
|
- int mask = GENMASK(ad7949_adc->resolution, 0);
|
|
+ int mask = GENMASK(ad7949_adc->resolution - 1, 0);
|
|
struct spi_message msg;
|
|
struct spi_transfer tx[] = {
|
|
{
|
|
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
|
|
index b0388f8a69f42..7e7d408452eca 100644
|
|
--- a/drivers/iio/adc/qcom-spmi-vadc.c
|
|
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
|
|
@@ -598,7 +598,7 @@ static const struct vadc_channels vadc_chans[] = {
|
|
VADC_CHAN_NO_SCALE(P_MUX16_1_3, 1)
|
|
|
|
VADC_CHAN_NO_SCALE(LR_MUX1_BAT_THERM, 0)
|
|
- VADC_CHAN_NO_SCALE(LR_MUX2_BAT_ID, 0)
|
|
+ VADC_CHAN_VOLT(LR_MUX2_BAT_ID, 0, SCALE_DEFAULT)
|
|
VADC_CHAN_NO_SCALE(LR_MUX3_XO_THERM, 0)
|
|
VADC_CHAN_NO_SCALE(LR_MUX4_AMUX_THM1, 0)
|
|
VADC_CHAN_NO_SCALE(LR_MUX5_AMUX_THM2, 0)
|
|
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
|
|
index dfa31a23500f0..ac90be03332af 100644
|
|
--- a/drivers/iio/gyro/mpu3050-core.c
|
|
+++ b/drivers/iio/gyro/mpu3050-core.c
|
|
@@ -551,6 +551,8 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
|
|
MPU3050_FIFO_R,
|
|
&fifo_values[offset],
|
|
toread);
|
|
+ if (ret)
|
|
+ goto out_trigger_unlock;
|
|
|
|
dev_dbg(mpu3050->dev,
|
|
"%04x %04x %04x %04x %04x\n",
|
|
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
|
|
index 52f605114ef77..d62705448ae25 100644
|
|
--- a/drivers/iio/humidity/hid-sensor-humidity.c
|
|
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
|
|
@@ -15,7 +15,10 @@
|
|
struct hid_humidity_state {
|
|
struct hid_sensor_common common_attributes;
|
|
struct hid_sensor_hub_attribute_info humidity_attr;
|
|
- s32 humidity_data;
|
|
+ struct {
|
|
+ s32 humidity_data;
|
|
+ u64 timestamp __aligned(8);
|
|
+ } scan;
|
|
int scale_pre_decml;
|
|
int scale_post_decml;
|
|
int scale_precision;
|
|
@@ -125,9 +128,8 @@ static int humidity_proc_event(struct hid_sensor_hub_device *hsdev,
|
|
struct hid_humidity_state *humid_st = iio_priv(indio_dev);
|
|
|
|
if (atomic_read(&humid_st->common_attributes.data_ready))
|
|
- iio_push_to_buffers_with_timestamp(indio_dev,
|
|
- &humid_st->humidity_data,
|
|
- iio_get_time_ns(indio_dev));
|
|
+ iio_push_to_buffers_with_timestamp(indio_dev, &humid_st->scan,
|
|
+ iio_get_time_ns(indio_dev));
|
|
|
|
return 0;
|
|
}
|
|
@@ -142,7 +144,7 @@ static int humidity_capture_sample(struct hid_sensor_hub_device *hsdev,
|
|
|
|
switch (usage_id) {
|
|
case HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY:
|
|
- humid_st->humidity_data = *(s32 *)raw_data;
|
|
+ humid_st->scan.humidity_data = *(s32 *)raw_data;
|
|
|
|
return 0;
|
|
default:
|
|
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
|
|
index 54af2ed664f6f..785a4ce606d89 100644
|
|
--- a/drivers/iio/imu/adis16400.c
|
|
+++ b/drivers/iio/imu/adis16400.c
|
|
@@ -462,8 +462,7 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev)
|
|
if (ret)
|
|
goto err_ret;
|
|
|
|
- ret = sscanf(indio_dev->name, "adis%u\n", &device_id);
|
|
- if (ret != 1) {
|
|
+ if (sscanf(indio_dev->name, "adis%u\n", &device_id) != 1) {
|
|
ret = -EINVAL;
|
|
goto err_ret;
|
|
}
|
|
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
|
|
index 330cf359e0b81..e9e00ce0c6d4d 100644
|
|
--- a/drivers/iio/light/hid-sensor-prox.c
|
|
+++ b/drivers/iio/light/hid-sensor-prox.c
|
|
@@ -23,6 +23,9 @@ struct prox_state {
|
|
struct hid_sensor_common common_attributes;
|
|
struct hid_sensor_hub_attribute_info prox_attr;
|
|
u32 human_presence;
|
|
+ int scale_pre_decml;
|
|
+ int scale_post_decml;
|
|
+ int scale_precision;
|
|
};
|
|
|
|
/* Channel definitions */
|
|
@@ -93,8 +96,9 @@ static int prox_read_raw(struct iio_dev *indio_dev,
|
|
ret_type = IIO_VAL_INT;
|
|
break;
|
|
case IIO_CHAN_INFO_SCALE:
|
|
- *val = prox_state->prox_attr.units;
|
|
- ret_type = IIO_VAL_INT;
|
|
+ *val = prox_state->scale_pre_decml;
|
|
+ *val2 = prox_state->scale_post_decml;
|
|
+ ret_type = prox_state->scale_precision;
|
|
break;
|
|
case IIO_CHAN_INFO_OFFSET:
|
|
*val = hid_sensor_convert_exponent(
|
|
@@ -234,6 +238,11 @@ static int prox_parse_report(struct platform_device *pdev,
|
|
HID_USAGE_SENSOR_HUMAN_PRESENCE,
|
|
&st->common_attributes.sensitivity);
|
|
|
|
+ st->scale_precision = hid_sensor_format_scale(
|
|
+ hsdev->usage,
|
|
+ &st->prox_attr,
|
|
+ &st->scale_pre_decml, &st->scale_post_decml);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
|
|
index 81688f1b932f1..da9a247097fa2 100644
|
|
--- a/drivers/iio/temperature/hid-sensor-temperature.c
|
|
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
|
|
@@ -15,7 +15,10 @@
|
|
struct temperature_state {
|
|
struct hid_sensor_common common_attributes;
|
|
struct hid_sensor_hub_attribute_info temperature_attr;
|
|
- s32 temperature_data;
|
|
+ struct {
|
|
+ s32 temperature_data;
|
|
+ u64 timestamp __aligned(8);
|
|
+ } scan;
|
|
int scale_pre_decml;
|
|
int scale_post_decml;
|
|
int scale_precision;
|
|
@@ -32,7 +35,7 @@ static const struct iio_chan_spec temperature_channels[] = {
|
|
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
|
|
BIT(IIO_CHAN_INFO_HYSTERESIS),
|
|
},
|
|
- IIO_CHAN_SOFT_TIMESTAMP(3),
|
|
+ IIO_CHAN_SOFT_TIMESTAMP(1),
|
|
};
|
|
|
|
/* Adjust channel real bits based on report descriptor */
|
|
@@ -123,9 +126,8 @@ static int temperature_proc_event(struct hid_sensor_hub_device *hsdev,
|
|
struct temperature_state *temp_st = iio_priv(indio_dev);
|
|
|
|
if (atomic_read(&temp_st->common_attributes.data_ready))
|
|
- iio_push_to_buffers_with_timestamp(indio_dev,
|
|
- &temp_st->temperature_data,
|
|
- iio_get_time_ns(indio_dev));
|
|
+ iio_push_to_buffers_with_timestamp(indio_dev, &temp_st->scan,
|
|
+ iio_get_time_ns(indio_dev));
|
|
|
|
return 0;
|
|
}
|
|
@@ -140,7 +142,7 @@ static int temperature_capture_sample(struct hid_sensor_hub_device *hsdev,
|
|
|
|
switch (usage_id) {
|
|
case HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE:
|
|
- temp_st->temperature_data = *(s32 *)raw_data;
|
|
+ temp_st->scan.temperature_data = *(s32 *)raw_data;
|
|
return 0;
|
|
default:
|
|
return -EINVAL;
|
|
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
|
|
index 01da76dc1caa8..78339b0bb8e58 100644
|
|
--- a/drivers/iommu/amd/init.c
|
|
+++ b/drivers/iommu/amd/init.c
|
|
@@ -2712,7 +2712,6 @@ static int __init early_amd_iommu_init(void)
|
|
struct acpi_table_header *ivrs_base;
|
|
acpi_status status;
|
|
int i, remap_cache_sz, ret = 0;
|
|
- u32 pci_id;
|
|
|
|
if (!amd_iommu_detected)
|
|
return -ENODEV;
|
|
@@ -2802,16 +2801,6 @@ static int __init early_amd_iommu_init(void)
|
|
if (ret)
|
|
goto out;
|
|
|
|
- /* Disable IOMMU if there's Stoney Ridge graphics */
|
|
- for (i = 0; i < 32; i++) {
|
|
- pci_id = read_pci_config(0, i, 0, 0);
|
|
- if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
|
|
- pr_info("Disable IOMMU on Stoney Ridge\n");
|
|
- amd_iommu_disabled = true;
|
|
- break;
|
|
- }
|
|
- }
|
|
-
|
|
/* Disable any previously enabled IOMMUs */
|
|
if (!is_kdump_kernel() || amd_iommu_disabled)
|
|
disable_iommus();
|
|
@@ -2879,6 +2868,7 @@ static bool detect_ivrs(void)
|
|
{
|
|
struct acpi_table_header *ivrs_base;
|
|
acpi_status status;
|
|
+ int i;
|
|
|
|
status = acpi_get_table("IVRS", 0, &ivrs_base);
|
|
if (status == AE_NOT_FOUND)
|
|
@@ -2891,6 +2881,17 @@ static bool detect_ivrs(void)
|
|
|
|
acpi_put_table(ivrs_base);
|
|
|
|
+ /* Don't use IOMMU if there is Stoney Ridge graphics */
|
|
+ for (i = 0; i < 32; i++) {
|
|
+ u32 pci_id;
|
|
+
|
|
+ pci_id = read_pci_config(0, i, 0, 0);
|
|
+ if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
|
|
+ pr_info("Disable IOMMU on Stoney Ridge\n");
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
/* Make sure ACS will be enabled during PCI probe */
|
|
pci_request_acs();
|
|
|
|
@@ -2917,12 +2918,12 @@ static int __init state_next(void)
|
|
}
|
|
break;
|
|
case IOMMU_IVRS_DETECTED:
|
|
- ret = early_amd_iommu_init();
|
|
- init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
|
|
- if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
|
|
- pr_info("AMD IOMMU disabled\n");
|
|
+ if (amd_iommu_disabled) {
|
|
init_state = IOMMU_CMDLINE_DISABLED;
|
|
ret = -EINVAL;
|
|
+ } else {
|
|
+ ret = early_amd_iommu_init();
|
|
+ init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
|
|
}
|
|
break;
|
|
case IOMMU_ACPI_FINISHED:
|
|
@@ -3000,8 +3001,11 @@ int __init amd_iommu_prepare(void)
|
|
amd_iommu_irq_remap = true;
|
|
|
|
ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ amd_iommu_irq_remap = false;
|
|
return ret;
|
|
+ }
|
|
+
|
|
return amd_iommu_irq_remap ? 0 : -ENODEV;
|
|
}
|
|
|
|
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
|
|
index 97eb62f667d22..602aab98c0794 100644
|
|
--- a/drivers/iommu/tegra-smmu.c
|
|
+++ b/drivers/iommu/tegra-smmu.c
|
|
@@ -849,12 +849,11 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
|
|
smmu = tegra_smmu_find(args.np);
|
|
if (smmu) {
|
|
err = tegra_smmu_configure(smmu, dev, &args);
|
|
- of_node_put(args.np);
|
|
|
|
- if (err < 0)
|
|
+ if (err < 0) {
|
|
+ of_node_put(args.np);
|
|
return ERR_PTR(err);
|
|
-
|
|
- break;
|
|
+ }
|
|
}
|
|
|
|
of_node_put(args.np);
|
|
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
|
|
index 129e2b6bd6d3f..f848ba16427eb 100644
|
|
--- a/drivers/nvme/host/core.c
|
|
+++ b/drivers/nvme/host/core.c
|
|
@@ -1948,30 +1948,18 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
|
|
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
|
|
}
|
|
|
|
-static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
|
|
+/*
|
|
+ * Even though NVMe spec explicitly states that MDTS is not applicable to the
|
|
+ * write-zeroes, we are cautious and limit the size to the controllers
|
|
+ * max_hw_sectors value, which is based on the MDTS field and possibly other
|
|
+ * limiting factors.
|
|
+ */
|
|
+static void nvme_config_write_zeroes(struct request_queue *q,
|
|
+ struct nvme_ctrl *ctrl)
|
|
{
|
|
- u64 max_blocks;
|
|
-
|
|
- if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
|
|
- (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
|
|
- return;
|
|
- /*
|
|
- * Even though NVMe spec explicitly states that MDTS is not
|
|
- * applicable to the write-zeroes:- "The restriction does not apply to
|
|
- * commands that do not transfer data between the host and the
|
|
- * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
|
|
- * In order to be more cautious use controller's max_hw_sectors value
|
|
- * to configure the maximum sectors for the write-zeroes which is
|
|
- * configured based on the controller's MDTS field in the
|
|
- * nvme_init_identify() if available.
|
|
- */
|
|
- if (ns->ctrl->max_hw_sectors == UINT_MAX)
|
|
- max_blocks = (u64)USHRT_MAX + 1;
|
|
- else
|
|
- max_blocks = ns->ctrl->max_hw_sectors + 1;
|
|
-
|
|
- blk_queue_max_write_zeroes_sectors(disk->queue,
|
|
- nvme_lba_to_sect(ns, max_blocks));
|
|
+ if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
|
|
+ !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
|
|
+ blk_queue_max_write_zeroes_sectors(q, ctrl->max_hw_sectors);
|
|
}
|
|
|
|
static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
|
|
@@ -2143,7 +2131,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
|
|
set_capacity_and_notify(disk, capacity);
|
|
|
|
nvme_config_discard(disk, ns);
|
|
- nvme_config_write_zeroes(disk, ns);
|
|
+ nvme_config_write_zeroes(disk->queue, ns->ctrl);
|
|
|
|
if ((id->nsattr & NVME_NS_ATTR_RO) ||
|
|
test_bit(NVME_NS_FORCE_RO, &ns->flags))
|
|
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
|
|
index 746392eade455..0c3da10c1f29c 100644
|
|
--- a/drivers/nvme/host/rdma.c
|
|
+++ b/drivers/nvme/host/rdma.c
|
|
@@ -736,8 +736,11 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
|
|
return ret;
|
|
|
|
ctrl->ctrl.queue_count = nr_io_queues + 1;
|
|
- if (ctrl->ctrl.queue_count < 2)
|
|
- return 0;
|
|
+ if (ctrl->ctrl.queue_count < 2) {
|
|
+ dev_err(ctrl->ctrl.device,
|
|
+ "unable to set any I/O queues\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
|
|
dev_info(ctrl->ctrl.device,
|
|
"creating %d I/O queues.\n", nr_io_queues);
|
|
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
|
|
index 30d24a5a5b826..c6958e5bc91d5 100644
|
|
--- a/drivers/nvme/host/tcp.c
|
|
+++ b/drivers/nvme/host/tcp.c
|
|
@@ -287,7 +287,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
|
* directly, otherwise queue io_work. Also, only do that if we
|
|
* are on the same cpu, so we don't introduce contention.
|
|
*/
|
|
- if (queue->io_cpu == __smp_processor_id() &&
|
|
+ if (queue->io_cpu == raw_smp_processor_id() &&
|
|
sync && empty && mutex_trylock(&queue->send_mutex)) {
|
|
queue->more_requests = !last;
|
|
nvme_tcp_send_all(queue);
|
|
@@ -568,6 +568,13 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
|
|
req->pdu_len = le32_to_cpu(pdu->r2t_length);
|
|
req->pdu_sent = 0;
|
|
|
|
+ if (unlikely(!req->pdu_len)) {
|
|
+ dev_err(queue->ctrl->ctrl.device,
|
|
+ "req %d r2t len is %u, probably a bug...\n",
|
|
+ rq->tag, req->pdu_len);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
|
|
dev_err(queue->ctrl->ctrl.device,
|
|
"req %d r2t len %u exceeded data len %u (%zu sent)\n",
|
|
@@ -1748,8 +1755,11 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
|
|
return ret;
|
|
|
|
ctrl->queue_count = nr_io_queues + 1;
|
|
- if (ctrl->queue_count < 2)
|
|
- return 0;
|
|
+ if (ctrl->queue_count < 2) {
|
|
+ dev_err(ctrl->device,
|
|
+ "unable to set any I/O queues\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
|
|
dev_info(ctrl->device,
|
|
"creating %d I/O queues.\n", nr_io_queues);
|
|
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
|
|
index 8ce4d59cc9e75..870d06cfd815a 100644
|
|
--- a/drivers/nvme/target/core.c
|
|
+++ b/drivers/nvme/target/core.c
|
|
@@ -1107,9 +1107,20 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
|
|
{
|
|
lockdep_assert_held(&ctrl->lock);
|
|
|
|
- if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
|
|
- nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
|
|
- nvmet_cc_mps(ctrl->cc) != 0 ||
|
|
+ /*
|
|
+ * Only I/O controllers should verify iosqes,iocqes.
|
|
+ * Strictly speaking, the spec says a discovery controller
|
|
+ * should verify iosqes,iocqes are zeroed, however that
|
|
+ * would break backwards compatibility, so don't enforce it.
|
|
+ */
|
|
+ if (ctrl->subsys->type != NVME_NQN_DISC &&
|
|
+ (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
|
|
+ nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
|
|
+ ctrl->csts = NVME_CSTS_CFS;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (nvmet_cc_mps(ctrl->cc) != 0 ||
|
|
nvmet_cc_ams(ctrl->cc) != 0 ||
|
|
nvmet_cc_css(ctrl->cc) != 0) {
|
|
ctrl->csts = NVME_CSTS_CFS;
|
|
diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
|
|
index cdbfa5df3a51f..dbfa0b55d31a5 100644
|
|
--- a/drivers/pci/hotplug/rpadlpar_sysfs.c
|
|
+++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
|
|
@@ -34,12 +34,11 @@ static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|
if (nbytes >= MAX_DRC_NAME_LEN)
|
|
return 0;
|
|
|
|
- memcpy(drc_name, buf, nbytes);
|
|
+ strscpy(drc_name, buf, nbytes + 1);
|
|
|
|
end = strchr(drc_name, '\n');
|
|
- if (!end)
|
|
- end = &drc_name[nbytes];
|
|
- *end = '\0';
|
|
+ if (end)
|
|
+ *end = '\0';
|
|
|
|
rc = dlpar_add_slot(drc_name);
|
|
if (rc)
|
|
@@ -65,12 +64,11 @@ static ssize_t remove_slot_store(struct kobject *kobj,
|
|
if (nbytes >= MAX_DRC_NAME_LEN)
|
|
return 0;
|
|
|
|
- memcpy(drc_name, buf, nbytes);
|
|
+ strscpy(drc_name, buf, nbytes + 1);
|
|
|
|
end = strchr(drc_name, '\n');
|
|
- if (!end)
|
|
- end = &drc_name[nbytes];
|
|
- *end = '\0';
|
|
+ if (end)
|
|
+ *end = '\0';
|
|
|
|
rc = dlpar_remove_slot(drc_name);
|
|
if (rc)
|
|
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
|
|
index c9e790c74051f..a047c421debe2 100644
|
|
--- a/drivers/pci/hotplug/s390_pci_hpc.c
|
|
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
|
|
@@ -93,8 +93,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
|
|
pci_dev_put(pdev);
|
|
return -EBUSY;
|
|
}
|
|
+ pci_dev_put(pdev);
|
|
|
|
- zpci_remove_device(zdev);
|
|
+ zpci_remove_device(zdev, false);
|
|
|
|
rc = zpci_disable_device(zdev);
|
|
if (rc)
|
|
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
|
|
index bc79a017e1a21..46a8f2d1d2b83 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
|
|
@@ -2421,7 +2421,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
|
|
memset(dstbuf, 0, 33);
|
|
size = (nbytes < 32) ? nbytes : 32;
|
|
if (copy_from_user(dstbuf, buf, size))
|
|
- return 0;
|
|
+ return -EFAULT;
|
|
|
|
if (dent == phba->debug_InjErrLBA) {
|
|
if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') &&
|
|
@@ -2430,7 +2430,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
|
|
}
|
|
|
|
if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
|
|
- return 0;
|
|
+ return -EINVAL;
|
|
|
|
if (dent == phba->debug_writeGuard)
|
|
phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
index c8b09a81834d6..72439d6aa0578 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
@@ -407,7 +407,7 @@ mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
|
|
* And add this object to port_table_list.
|
|
*/
|
|
if (!ioc->multipath_on_hba) {
|
|
- port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
|
|
+ port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
|
|
if (!port)
|
|
return NULL;
|
|
|
|
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
|
|
index 4adf9ded296aa..329fd025c7189 100644
|
|
--- a/drivers/scsi/myrs.c
|
|
+++ b/drivers/scsi/myrs.c
|
|
@@ -2273,12 +2273,12 @@ static void myrs_cleanup(struct myrs_hba *cs)
|
|
if (cs->mmio_base) {
|
|
cs->disable_intr(cs);
|
|
iounmap(cs->mmio_base);
|
|
+ cs->mmio_base = NULL;
|
|
}
|
|
if (cs->irq)
|
|
free_irq(cs->irq, cs);
|
|
if (cs->io_addr)
|
|
release_region(cs->io_addr, 0x80);
|
|
- iounmap(cs->mmio_base);
|
|
pci_set_drvdata(pdev, NULL);
|
|
pci_disable_device(pdev);
|
|
scsi_host_put(cs->host);
|
|
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
|
|
index c55202b92a43a..a981f261b3043 100644
|
|
--- a/drivers/scsi/ufs/ufs-mediatek.c
|
|
+++ b/drivers/scsi/ufs/ufs-mediatek.c
|
|
@@ -911,7 +911,7 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
|
|
if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
|
|
return;
|
|
|
|
- if (lpm & !hba->vreg_info.vcc->enabled)
|
|
+ if (lpm && !hba->vreg_info.vcc->enabled)
|
|
regulator_set_mode(hba->vreg_info.vccq2->reg,
|
|
REGULATOR_MODE_IDLE);
|
|
else if (!lpm)
|
|
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
|
|
index 826b01f346246..2e1255bf1b429 100644
|
|
--- a/drivers/spi/spi-cadence-quadspi.c
|
|
+++ b/drivers/spi/spi-cadence-quadspi.c
|
|
@@ -1198,6 +1198,7 @@ static int cqspi_probe(struct platform_device *pdev)
|
|
cqspi = spi_master_get_devdata(master);
|
|
|
|
cqspi->pdev = pdev;
|
|
+ platform_set_drvdata(pdev, cqspi);
|
|
|
|
/* Obtain configuration from OF. */
|
|
ret = cqspi_of_get_pdata(cqspi);
|
|
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
|
|
index a8572f49d3adc..0fc2dae329e54 100644
|
|
--- a/drivers/thunderbolt/switch.c
|
|
+++ b/drivers/thunderbolt/switch.c
|
|
@@ -762,12 +762,6 @@ static int tb_init_port(struct tb_port *port)
|
|
|
|
tb_dump_port(port->sw->tb, &port->config);
|
|
|
|
- /* Control port does not need HopID allocation */
|
|
- if (port->port) {
|
|
- ida_init(&port->in_hopids);
|
|
- ida_init(&port->out_hopids);
|
|
- }
|
|
-
|
|
INIT_LIST_HEAD(&port->list);
|
|
return 0;
|
|
|
|
@@ -1789,10 +1783,8 @@ static void tb_switch_release(struct device *dev)
|
|
dma_port_free(sw->dma_port);
|
|
|
|
tb_switch_for_each_port(sw, port) {
|
|
- if (!port->disabled) {
|
|
- ida_destroy(&port->in_hopids);
|
|
- ida_destroy(&port->out_hopids);
|
|
- }
|
|
+ ida_destroy(&port->in_hopids);
|
|
+ ida_destroy(&port->out_hopids);
|
|
}
|
|
|
|
kfree(sw->uuid);
|
|
@@ -1972,6 +1964,12 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
|
|
/* minimum setup for tb_find_cap and tb_drom_read to work */
|
|
sw->ports[i].sw = sw;
|
|
sw->ports[i].port = i;
|
|
+
|
|
+ /* Control port does not need HopID allocation */
|
|
+ if (i) {
|
|
+ ida_init(&sw->ports[i].in_hopids);
|
|
+ ida_init(&sw->ports[i].out_hopids);
|
|
+ }
|
|
}
|
|
|
|
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
|
|
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
|
|
index 51d5b031cada5..9932b1870e56f 100644
|
|
--- a/drivers/thunderbolt/tb.c
|
|
+++ b/drivers/thunderbolt/tb.c
|
|
@@ -138,6 +138,10 @@ static void tb_discover_tunnels(struct tb_switch *sw)
|
|
parent->boot = true;
|
|
parent = tb_switch_parent(parent);
|
|
}
|
|
+ } else if (tb_tunnel_is_dp(tunnel)) {
|
|
+ /* Keep the domain from powering down */
|
|
+ pm_runtime_get_sync(&tunnel->src_port->sw->dev);
|
|
+ pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
|
|
}
|
|
|
|
list_add_tail(&tunnel->list, &tcm->tunnel_list);
|
|
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
|
|
index 56f7235bc068c..2a86ad4b12b34 100644
|
|
--- a/drivers/usb/dwc3/gadget.c
|
|
+++ b/drivers/usb/dwc3/gadget.c
|
|
@@ -783,8 +783,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
|
|
|
|
trace_dwc3_gadget_ep_disable(dep);
|
|
|
|
- dwc3_remove_requests(dwc, dep);
|
|
-
|
|
/* make sure HW endpoint isn't stalled */
|
|
if (dep->flags & DWC3_EP_STALL)
|
|
__dwc3_gadget_ep_set_halt(dep, 0, false);
|
|
@@ -803,6 +801,8 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
|
|
dep->endpoint.desc = NULL;
|
|
}
|
|
|
|
+ dwc3_remove_requests(dwc, dep);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -1617,7 +1617,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
|
|
{
|
|
struct dwc3 *dwc = dep->dwc;
|
|
|
|
- if (!dep->endpoint.desc || !dwc->pullups_connected) {
|
|
+ if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
|
|
dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
|
|
dep->name);
|
|
return -ESHUTDOWN;
|
|
@@ -2125,6 +2125,17 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
|
|
}
|
|
}
|
|
|
|
+ /*
|
|
+ * Check the return value for successful resume, or error. For a
|
|
+ * successful resume, the DWC3 runtime PM resume routine will handle
|
|
+ * the run stop sequence, so avoid duplicate operations here.
|
|
+ */
|
|
+ ret = pm_runtime_get_sync(dwc->dev);
|
|
+ if (!ret || ret < 0) {
|
|
+ pm_runtime_put(dwc->dev);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
/*
|
|
* Synchronize any pending event handling before executing the controller
|
|
* halt routine.
|
|
@@ -2139,6 +2150,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
|
|
if (!is_on) {
|
|
u32 count;
|
|
|
|
+ dwc->connected = false;
|
|
/*
|
|
* In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
|
|
* Section 4.1.8 Table 4-7, it states that for a device-initiated
|
|
@@ -2169,6 +2181,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
|
|
|
|
ret = dwc3_gadget_run_stop(dwc, is_on, false);
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
+ pm_runtime_put(dwc->dev);
|
|
|
|
return ret;
|
|
}
|
|
@@ -3254,8 +3267,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
|
|
{
|
|
u32 reg;
|
|
|
|
- dwc->connected = true;
|
|
-
|
|
/*
|
|
* WORKAROUND: DWC3 revisions <1.88a have an issue which
|
|
* would cause a missing Disconnect Event if there's a
|
|
@@ -3295,6 +3306,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
|
|
* transfers."
|
|
*/
|
|
dwc3_stop_active_transfers(dwc);
|
|
+ dwc->connected = true;
|
|
|
|
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
|
|
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
|
|
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
|
|
index 36ffb43f9c1a0..9b7fa53d6642b 100644
|
|
--- a/drivers/usb/gadget/configfs.c
|
|
+++ b/drivers/usb/gadget/configfs.c
|
|
@@ -97,6 +97,8 @@ struct gadget_config_name {
|
|
struct list_head list;
|
|
};
|
|
|
|
+#define USB_MAX_STRING_WITH_NULL_LEN (USB_MAX_STRING_LEN+1)
|
|
+
|
|
static int usb_string_copy(const char *s, char **s_copy)
|
|
{
|
|
int ret;
|
|
@@ -106,12 +108,16 @@ static int usb_string_copy(const char *s, char **s_copy)
|
|
if (ret > USB_MAX_STRING_LEN)
|
|
return -EOVERFLOW;
|
|
|
|
- str = kstrdup(s, GFP_KERNEL);
|
|
- if (!str)
|
|
- return -ENOMEM;
|
|
+ if (copy) {
|
|
+ str = copy;
|
|
+ } else {
|
|
+ str = kmalloc(USB_MAX_STRING_WITH_NULL_LEN, GFP_KERNEL);
|
|
+ if (!str)
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ strcpy(str, s);
|
|
if (str[ret - 1] == '\n')
|
|
str[ret - 1] = '\0';
|
|
- kfree(copy);
|
|
*s_copy = str;
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
|
|
index 5eb895b19c558..f4304ce69350e 100644
|
|
--- a/drivers/usb/storage/transport.c
|
|
+++ b/drivers/usb/storage/transport.c
|
|
@@ -656,6 +656,13 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
|
|
need_auto_sense = 1;
|
|
}
|
|
|
|
+ /* Some devices (Kindle) require another command after SYNC CACHE */
|
|
+ if ((us->fflags & US_FL_SENSE_AFTER_SYNC) &&
|
|
+ srb->cmnd[0] == SYNCHRONIZE_CACHE) {
|
|
+ usb_stor_dbg(us, "-- sense after SYNC CACHE\n");
|
|
+ need_auto_sense = 1;
|
|
+ }
|
|
+
|
|
/*
|
|
* If we have a failure, we're going to do a REQUEST_SENSE
|
|
* automatically. Note that we differentiate between a command
|
|
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
|
|
index 5732e9691f08f..efa972be2ee34 100644
|
|
--- a/drivers/usb/storage/unusual_devs.h
|
|
+++ b/drivers/usb/storage/unusual_devs.h
|
|
@@ -2211,6 +2211,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
|
|
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
US_FL_NO_READ_DISC_INFO ),
|
|
|
|
+/*
|
|
+ * Reported by Matthias Schwarzott <zzam@gentoo.org>
|
|
+ * The Amazon Kindle treats SYNCHRONIZE CACHE as an indication that
|
|
+ * the host may be finished with it, and automatically ejects its
|
|
+ * emulated media unless it receives another command within one second.
|
|
+ */
|
|
+UNUSUAL_DEV( 0x1949, 0x0004, 0x0000, 0x9999,
|
|
+ "Amazon",
|
|
+ "Kindle",
|
|
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
+ US_FL_SENSE_AFTER_SYNC ),
|
|
+
|
|
/*
|
|
* Reported by Oliver Neukum <oneukum@suse.com>
|
|
* This device morphes spontaneously into another device if the access
|
|
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
|
|
index 22a85b396f698..3cd4859ffab58 100644
|
|
--- a/drivers/usb/typec/tcpm/tcpm.c
|
|
+++ b/drivers/usb/typec/tcpm/tcpm.c
|
|
@@ -797,6 +797,7 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
|
|
|
|
port->supply_voltage = mv;
|
|
port->current_limit = max_ma;
|
|
+ power_supply_changed(port->psy);
|
|
|
|
if (port->tcpc->set_current_limit)
|
|
ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
|
|
@@ -2345,6 +2346,7 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
|
|
|
|
port->pps_data.supported = false;
|
|
port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
|
|
+ power_supply_changed(port->psy);
|
|
|
|
/*
|
|
* Select the source PDO providing the most power which has a
|
|
@@ -2369,6 +2371,7 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
|
|
port->pps_data.supported = true;
|
|
port->usb_type =
|
|
POWER_SUPPLY_USB_TYPE_PD_PPS;
|
|
+ power_supply_changed(port->psy);
|
|
}
|
|
continue;
|
|
default:
|
|
@@ -2526,6 +2529,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
|
|
port->pps_data.out_volt));
|
|
port->pps_data.op_curr = min(port->pps_data.max_curr,
|
|
port->pps_data.op_curr);
|
|
+ power_supply_changed(port->psy);
|
|
}
|
|
|
|
return src_pdo;
|
|
@@ -2761,6 +2765,7 @@ static int tcpm_set_charge(struct tcpm_port *port, bool charge)
|
|
return ret;
|
|
}
|
|
port->vbus_charge = charge;
|
|
+ power_supply_changed(port->psy);
|
|
return 0;
|
|
}
|
|
|
|
@@ -2935,6 +2940,7 @@ static void tcpm_reset_port(struct tcpm_port *port)
|
|
port->try_src_count = 0;
|
|
port->try_snk_count = 0;
|
|
port->usb_type = POWER_SUPPLY_USB_TYPE_C;
|
|
+ power_supply_changed(port->psy);
|
|
port->nr_sink_caps = 0;
|
|
port->sink_cap_done = false;
|
|
if (port->tcpc->enable_frs)
|
|
@@ -5129,7 +5135,7 @@ static int tcpm_psy_set_prop(struct power_supply *psy,
|
|
ret = -EINVAL;
|
|
break;
|
|
}
|
|
-
|
|
+ power_supply_changed(port->psy);
|
|
return ret;
|
|
}
|
|
|
|
@@ -5281,6 +5287,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
|
|
err = devm_tcpm_psy_register(port);
|
|
if (err)
|
|
goto out_role_sw_put;
|
|
+ power_supply_changed(port->psy);
|
|
|
|
port->typec_port = typec_register_port(port->dev, &port->typec_caps);
|
|
if (IS_ERR(port->typec_port)) {
|
|
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
|
|
index 6e6ef63175237..29bd1c5a283cd 100644
|
|
--- a/drivers/usb/typec/tps6598x.c
|
|
+++ b/drivers/usb/typec/tps6598x.c
|
|
@@ -64,7 +64,6 @@ enum {
|
|
struct tps6598x_rx_identity_reg {
|
|
u8 status;
|
|
struct usb_pd_identity identity;
|
|
- u32 vdo[3];
|
|
} __packed;
|
|
|
|
/* Standard Task return codes */
|
|
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
|
|
index a3ec39fc61778..7383a543c6d12 100644
|
|
--- a/drivers/usb/usbip/vudc_sysfs.c
|
|
+++ b/drivers/usb/usbip/vudc_sysfs.c
|
|
@@ -174,7 +174,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
|
|
|
|
udc->ud.tcp_socket = socket;
|
|
udc->ud.tcp_rx = tcp_rx;
|
|
- udc->ud.tcp_rx = tcp_tx;
|
|
+ udc->ud.tcp_tx = tcp_tx;
|
|
udc->ud.status = SDEV_ST_USED;
|
|
|
|
spin_unlock_irq(&udc->ud.lock);
|
|
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
|
|
index 5533df91b257d..90c0525b1e0cf 100644
|
|
--- a/drivers/vfio/Kconfig
|
|
+++ b/drivers/vfio/Kconfig
|
|
@@ -21,7 +21,7 @@ config VFIO_VIRQFD
|
|
|
|
menuconfig VFIO
|
|
tristate "VFIO Non-Privileged userspace driver framework"
|
|
- depends on IOMMU_API
|
|
+ select IOMMU_API
|
|
select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM || ARM64)
|
|
help
|
|
VFIO provides a framework for secure userspace device drivers.
|
|
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
|
|
index ef688c8c0e0e6..e0a27e3362935 100644
|
|
--- a/drivers/vhost/vdpa.c
|
|
+++ b/drivers/vhost/vdpa.c
|
|
@@ -308,8 +308,10 @@ static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
|
|
|
|
static void vhost_vdpa_config_put(struct vhost_vdpa *v)
|
|
{
|
|
- if (v->config_ctx)
|
|
+ if (v->config_ctx) {
|
|
eventfd_ctx_put(v->config_ctx);
|
|
+ v->config_ctx = NULL;
|
|
+ }
|
|
}
|
|
|
|
static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
|
|
@@ -329,8 +331,12 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
|
|
if (!IS_ERR_OR_NULL(ctx))
|
|
eventfd_ctx_put(ctx);
|
|
|
|
- if (IS_ERR(v->config_ctx))
|
|
- return PTR_ERR(v->config_ctx);
|
|
+ if (IS_ERR(v->config_ctx)) {
|
|
+ long ret = PTR_ERR(v->config_ctx);
|
|
+
|
|
+ v->config_ctx = NULL;
|
|
+ return ret;
|
|
+ }
|
|
|
|
v->vdpa->config->set_config_cb(v->vdpa, &cb);
|
|
|
|
@@ -900,14 +906,10 @@ err:
|
|
|
|
static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
|
|
{
|
|
- struct vhost_virtqueue *vq;
|
|
int i;
|
|
|
|
- for (i = 0; i < v->nvqs; i++) {
|
|
- vq = &v->vqs[i];
|
|
- if (vq->call_ctx.producer.irq)
|
|
- irq_bypass_unregister_producer(&vq->call_ctx.producer);
|
|
- }
|
|
+ for (i = 0; i < v->nvqs; i++)
|
|
+ vhost_vdpa_unsetup_vq_irq(v, i);
|
|
}
|
|
|
|
static int vhost_vdpa_release(struct inode *inode, struct file *filep)
|
|
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
|
|
index 7bd659ad959ec..7cb0604e2841f 100644
|
|
--- a/fs/afs/dir.c
|
|
+++ b/fs/afs/dir.c
|
|
@@ -69,7 +69,6 @@ const struct inode_operations afs_dir_inode_operations = {
|
|
.permission = afs_permission,
|
|
.getattr = afs_getattr,
|
|
.setattr = afs_setattr,
|
|
- .listxattr = afs_listxattr,
|
|
};
|
|
|
|
const struct address_space_operations afs_dir_aops = {
|
|
diff --git a/fs/afs/file.c b/fs/afs/file.c
|
|
index 85f5adf21aa08..960b64268623e 100644
|
|
--- a/fs/afs/file.c
|
|
+++ b/fs/afs/file.c
|
|
@@ -43,7 +43,6 @@ const struct inode_operations afs_file_inode_operations = {
|
|
.getattr = afs_getattr,
|
|
.setattr = afs_setattr,
|
|
.permission = afs_permission,
|
|
- .listxattr = afs_listxattr,
|
|
};
|
|
|
|
const struct address_space_operations afs_fs_aops = {
|
|
diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
|
|
index 97cab12b0a6c2..71c58723763d2 100644
|
|
--- a/fs/afs/fs_operation.c
|
|
+++ b/fs/afs/fs_operation.c
|
|
@@ -181,10 +181,13 @@ void afs_wait_for_operation(struct afs_operation *op)
|
|
if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) &&
|
|
op->ops->issue_yfs_rpc)
|
|
op->ops->issue_yfs_rpc(op);
|
|
- else
|
|
+ else if (op->ops->issue_afs_rpc)
|
|
op->ops->issue_afs_rpc(op);
|
|
+ else
|
|
+ op->ac.error = -ENOTSUPP;
|
|
|
|
- op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
|
|
+ if (op->call)
|
|
+ op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
|
|
}
|
|
|
|
switch (op->error) {
|
|
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
|
|
index b0d7b892090da..1d03eb1920ec0 100644
|
|
--- a/fs/afs/inode.c
|
|
+++ b/fs/afs/inode.c
|
|
@@ -27,7 +27,6 @@
|
|
|
|
static const struct inode_operations afs_symlink_inode_operations = {
|
|
.get_link = page_get_link,
|
|
- .listxattr = afs_listxattr,
|
|
};
|
|
|
|
static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode)
|
|
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
|
|
index 0d150a29e39ec..525ef075fcd90 100644
|
|
--- a/fs/afs/internal.h
|
|
+++ b/fs/afs/internal.h
|
|
@@ -1508,7 +1508,6 @@ extern int afs_launder_page(struct page *);
|
|
* xattr.c
|
|
*/
|
|
extern const struct xattr_handler *afs_xattr_handlers[];
|
|
-extern ssize_t afs_listxattr(struct dentry *, char *, size_t);
|
|
|
|
/*
|
|
* yfsclient.c
|
|
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
|
|
index 052dab2f5c03a..bbb2c210d139d 100644
|
|
--- a/fs/afs/mntpt.c
|
|
+++ b/fs/afs/mntpt.c
|
|
@@ -32,7 +32,6 @@ const struct inode_operations afs_mntpt_inode_operations = {
|
|
.lookup = afs_mntpt_lookup,
|
|
.readlink = page_readlink,
|
|
.getattr = afs_getattr,
|
|
- .listxattr = afs_listxattr,
|
|
};
|
|
|
|
const struct inode_operations afs_autocell_inode_operations = {
|
|
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
|
|
index 95c573dcda116..6a29337bd562f 100644
|
|
--- a/fs/afs/xattr.c
|
|
+++ b/fs/afs/xattr.c
|
|
@@ -11,29 +11,6 @@
|
|
#include <linux/xattr.h>
|
|
#include "internal.h"
|
|
|
|
-static const char afs_xattr_list[] =
|
|
- "afs.acl\0"
|
|
- "afs.cell\0"
|
|
- "afs.fid\0"
|
|
- "afs.volume\0"
|
|
- "afs.yfs.acl\0"
|
|
- "afs.yfs.acl_inherited\0"
|
|
- "afs.yfs.acl_num_cleaned\0"
|
|
- "afs.yfs.vol_acl";
|
|
-
|
|
-/*
|
|
- * Retrieve a list of the supported xattrs.
|
|
- */
|
|
-ssize_t afs_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
|
-{
|
|
- if (size == 0)
|
|
- return sizeof(afs_xattr_list);
|
|
- if (size < sizeof(afs_xattr_list))
|
|
- return -ERANGE;
|
|
- memcpy(buffer, afs_xattr_list, sizeof(afs_xattr_list));
|
|
- return sizeof(afs_xattr_list);
|
|
-}
|
|
-
|
|
/*
|
|
* Deal with the result of a successful fetch ACL operation.
|
|
*/
|
|
@@ -230,6 +207,8 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler,
|
|
else
|
|
ret = -ERANGE;
|
|
}
|
|
+ } else if (ret == -ENOTSUPP) {
|
|
+ ret = -ENODATA;
|
|
}
|
|
|
|
error_yacl:
|
|
@@ -254,6 +233,7 @@ static int afs_xattr_set_yfs(const struct xattr_handler *handler,
|
|
{
|
|
struct afs_operation *op;
|
|
struct afs_vnode *vnode = AFS_FS_I(inode);
|
|
+ int ret;
|
|
|
|
if (flags == XATTR_CREATE ||
|
|
strcmp(name, "acl") != 0)
|
|
@@ -268,7 +248,10 @@ static int afs_xattr_set_yfs(const struct xattr_handler *handler,
|
|
return afs_put_operation(op);
|
|
|
|
op->ops = &yfs_store_opaque_acl2_operation;
|
|
- return afs_do_sync_operation(op);
|
|
+ ret = afs_do_sync_operation(op);
|
|
+ if (ret == -ENOTSUPP)
|
|
+ ret = -ENODATA;
|
|
+ return ret;
|
|
}
|
|
|
|
static const struct xattr_handler afs_xattr_yfs_handler = {
|
|
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
|
|
index 40bf27a65c5d5..33fe5d839c110 100644
|
|
--- a/fs/btrfs/ctree.c
|
|
+++ b/fs/btrfs/ctree.c
|
|
@@ -1365,7 +1365,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
|
|
"failed to read tree block %llu from get_old_root",
|
|
logical);
|
|
} else {
|
|
+ btrfs_tree_read_lock(old);
|
|
eb = btrfs_clone_extent_buffer(old);
|
|
+ btrfs_tree_read_unlock(old);
|
|
free_extent_buffer(old);
|
|
}
|
|
} else if (old_root) {
|
|
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
|
|
index 9b4f75568261e..df25d3e300f07 100644
|
|
--- a/fs/btrfs/inode.c
|
|
+++ b/fs/btrfs/inode.c
|
|
@@ -8806,7 +8806,7 @@ int __init btrfs_init_cachep(void)
|
|
|
|
btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
|
|
PAGE_SIZE, PAGE_SIZE,
|
|
- SLAB_RED_ZONE, NULL);
|
|
+ SLAB_MEM_SPREAD, NULL);
|
|
if (!btrfs_free_space_bitmap_cachep)
|
|
goto fail;
|
|
|
|
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
|
|
index 798c32cab146f..3a26ad47b220c 100644
|
|
--- a/fs/cifs/fs_context.c
|
|
+++ b/fs/cifs/fs_context.c
|
|
@@ -1175,9 +1175,11 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
|
|
pr_warn_once("Witness protocol support is experimental\n");
|
|
break;
|
|
case Opt_rootfs:
|
|
-#ifdef CONFIG_CIFS_ROOT
|
|
- ctx->rootfs = true;
|
|
+#ifndef CONFIG_CIFS_ROOT
|
|
+ cifs_dbg(VFS, "rootfs support requires CONFIG_CIFS_ROOT config option\n");
|
|
+ goto cifs_parse_mount_err;
|
|
#endif
|
|
+ ctx->rootfs = true;
|
|
break;
|
|
case Opt_posixpaths:
|
|
if (result.negated)
|
|
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
|
|
index a83b3a8ffaacc..cbff8a7e36a99 100644
|
|
--- a/fs/cifs/inode.c
|
|
+++ b/fs/cifs/inode.c
|
|
@@ -2383,7 +2383,7 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
|
|
* We need to be sure that all dirty pages are written and the server
|
|
* has actual ctime, mtime and file length.
|
|
*/
|
|
- if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE)) &&
|
|
+ if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE | STATX_BLOCKS)) &&
|
|
!CIFS_CACHE_READ(CIFS_I(inode)) &&
|
|
inode->i_mapping && inode->i_mapping->nrpages != 0) {
|
|
rc = filemap_fdatawait(inode->i_mapping);
|
|
@@ -2573,6 +2573,14 @@ set_size_out:
|
|
if (rc == 0) {
|
|
cifsInode->server_eof = attrs->ia_size;
|
|
cifs_setsize(inode, attrs->ia_size);
|
|
+ /*
|
|
+ * i_blocks is not related to (i_size / i_blksize), but instead
|
|
+ * 512 byte (2**9) size is required for calculating num blocks.
|
|
+ * Until we can query the server for actual allocation size,
|
|
+ * this is best estimate we have for blocks allocated for a file
|
|
+ * Number of blocks must be rounded up so size 1 is not 0 blocks
|
|
+ */
|
|
+ inode->i_blocks = (512 - 1 + attrs->ia_size) >> 9;
|
|
|
|
/*
|
|
* The man page of truncate says if the size changed,
|
|
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
|
|
index 14ecf1a9f11a3..64fccb8809ecb 100644
|
|
--- a/fs/cifs/transport.c
|
|
+++ b/fs/cifs/transport.c
|
|
@@ -1171,9 +1171,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
|
/*
|
|
* Compounding is never used during session establish.
|
|
*/
|
|
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
|
|
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
|
|
+ mutex_lock(&server->srv_mutex);
|
|
smb311_update_preauth_hash(ses, rqst[0].rq_iov,
|
|
rqst[0].rq_nvec);
|
|
+ mutex_unlock(&server->srv_mutex);
|
|
+ }
|
|
|
|
for (i = 0; i < num_rqst; i++) {
|
|
rc = wait_for_response(server, midQ[i]);
|
|
@@ -1241,7 +1244,9 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
|
.iov_base = resp_iov[0].iov_base,
|
|
.iov_len = resp_iov[0].iov_len
|
|
};
|
|
+ mutex_lock(&server->srv_mutex);
|
|
smb311_update_preauth_hash(ses, &iov, 1);
|
|
+ mutex_unlock(&server->srv_mutex);
|
|
}
|
|
|
|
out:
|
|
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
|
|
index 2866d249f3d26..e5c81593d972c 100644
|
|
--- a/fs/ext4/ext4.h
|
|
+++ b/fs/ext4/ext4.h
|
|
@@ -2792,6 +2792,8 @@ void __ext4_fc_track_link(handle_t *handle, struct inode *inode,
|
|
struct dentry *dentry);
|
|
void ext4_fc_track_unlink(handle_t *handle, struct dentry *dentry);
|
|
void ext4_fc_track_link(handle_t *handle, struct dentry *dentry);
|
|
+void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
|
|
+ struct dentry *dentry);
|
|
void ext4_fc_track_create(handle_t *handle, struct dentry *dentry);
|
|
void ext4_fc_track_inode(handle_t *handle, struct inode *inode);
|
|
void ext4_fc_mark_ineligible(struct super_block *sb, int reason);
|
|
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
|
|
index 0a14a7c87bf82..62e9e5535fa76 100644
|
|
--- a/fs/ext4/fast_commit.c
|
|
+++ b/fs/ext4/fast_commit.c
|
|
@@ -513,10 +513,10 @@ void ext4_fc_track_link(handle_t *handle, struct dentry *dentry)
|
|
__ext4_fc_track_link(handle, d_inode(dentry), dentry);
|
|
}
|
|
|
|
-void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
|
|
+void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
|
|
+ struct dentry *dentry)
|
|
{
|
|
struct __track_dentry_update_args args;
|
|
- struct inode *inode = d_inode(dentry);
|
|
int ret;
|
|
|
|
args.dentry = dentry;
|
|
@@ -527,6 +527,11 @@ void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
|
|
trace_ext4_fc_track_create(inode, dentry, ret);
|
|
}
|
|
|
|
+void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
|
|
+{
|
|
+ __ext4_fc_track_create(handle, d_inode(dentry), dentry);
|
|
+}
|
|
+
|
|
/* __track_fn for inode tracking */
|
|
static int __track_inode(struct inode *inode, void *arg, bool update)
|
|
{
|
|
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
|
|
index c173c84058561..ed498538a7499 100644
|
|
--- a/fs/ext4/inode.c
|
|
+++ b/fs/ext4/inode.c
|
|
@@ -5029,7 +5029,7 @@ static int ext4_do_update_inode(handle_t *handle,
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
struct buffer_head *bh = iloc->bh;
|
|
struct super_block *sb = inode->i_sb;
|
|
- int err = 0, rc, block;
|
|
+ int err = 0, block;
|
|
int need_datasync = 0, set_large_file = 0;
|
|
uid_t i_uid;
|
|
gid_t i_gid;
|
|
@@ -5141,9 +5141,9 @@ static int ext4_do_update_inode(handle_t *handle,
|
|
bh->b_data);
|
|
|
|
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
|
|
- rc = ext4_handle_dirty_metadata(handle, NULL, bh);
|
|
- if (!err)
|
|
- err = rc;
|
|
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
|
|
+ if (err)
|
|
+ goto out_brelse;
|
|
ext4_clear_inode_state(inode, EXT4_STATE_NEW);
|
|
if (set_large_file) {
|
|
BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
|
|
@@ -5389,8 +5389,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
|
|
inode->i_gid = attr->ia_gid;
|
|
error = ext4_mark_inode_dirty(handle, inode);
|
|
ext4_journal_stop(handle);
|
|
- if (unlikely(error))
|
|
+ if (unlikely(error)) {
|
|
+ ext4_fc_stop_update(inode);
|
|
return error;
|
|
+ }
|
|
}
|
|
|
|
if (attr->ia_valid & ATTR_SIZE) {
|
|
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
|
|
index df0368d578b16..078f26f4b56e3 100644
|
|
--- a/fs/ext4/namei.c
|
|
+++ b/fs/ext4/namei.c
|
|
@@ -3601,6 +3601,31 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
|
|
return retval;
|
|
}
|
|
|
|
+static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,
|
|
+ unsigned ino, unsigned file_type)
|
|
+{
|
|
+ struct ext4_renament old = *ent;
|
|
+ int retval = 0;
|
|
+
|
|
+ /*
|
|
+ * old->de could have moved from under us during make indexed dir,
|
|
+ * so the old->de may no longer valid and need to find it again
|
|
+ * before reset old inode info.
|
|
+ */
|
|
+ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
|
|
+ if (IS_ERR(old.bh))
|
|
+ retval = PTR_ERR(old.bh);
|
|
+ if (!old.bh)
|
|
+ retval = -ENOENT;
|
|
+ if (retval) {
|
|
+ ext4_std_error(old.dir->i_sb, retval);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ext4_setent(handle, &old, ino, file_type);
|
|
+ brelse(old.bh);
|
|
+}
|
|
+
|
|
static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
|
|
const struct qstr *d_name)
|
|
{
|
|
@@ -3836,6 +3861,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
retval = ext4_mark_inode_dirty(handle, whiteout);
|
|
if (unlikely(retval))
|
|
goto end_rename;
|
|
+
|
|
}
|
|
if (!new.bh) {
|
|
retval = ext4_add_entry(handle, new.dentry, old.inode);
|
|
@@ -3909,6 +3935,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
ext4_fc_track_unlink(handle, new.dentry);
|
|
__ext4_fc_track_link(handle, old.inode, new.dentry);
|
|
__ext4_fc_track_unlink(handle, old.inode, old.dentry);
|
|
+ if (whiteout)
|
|
+ __ext4_fc_track_create(handle, whiteout, old.dentry);
|
|
}
|
|
|
|
if (new.inode) {
|
|
@@ -3923,8 +3951,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
end_rename:
|
|
if (whiteout) {
|
|
if (retval) {
|
|
- ext4_setent(handle, &old,
|
|
- old.inode->i_ino, old_file_type);
|
|
+ ext4_resetent(handle, &old,
|
|
+ old.inode->i_ino, old_file_type);
|
|
drop_nlink(whiteout);
|
|
}
|
|
unlock_new_inode(whiteout);
|
|
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
|
|
index 2ae0af1c88c78..a1353b0825ea3 100644
|
|
--- a/fs/ext4/super.c
|
|
+++ b/fs/ext4/super.c
|
|
@@ -5149,8 +5149,8 @@ failed_mount_wq:
|
|
failed_mount3a:
|
|
ext4_es_unregister_shrinker(sbi);
|
|
failed_mount3:
|
|
- del_timer_sync(&sbi->s_err_report);
|
|
flush_work(&sbi->s_error_work);
|
|
+ del_timer_sync(&sbi->s_err_report);
|
|
if (sbi->s_mmp_tsk)
|
|
kthread_stop(sbi->s_mmp_tsk);
|
|
failed_mount2:
|
|
diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
|
|
index 5b7ba8f711538..00e3cbde472e4 100644
|
|
--- a/fs/ext4/verity.c
|
|
+++ b/fs/ext4/verity.c
|
|
@@ -201,55 +201,76 @@ static int ext4_end_enable_verity(struct file *filp, const void *desc,
|
|
struct inode *inode = file_inode(filp);
|
|
const int credits = 2; /* superblock and inode for ext4_orphan_del() */
|
|
handle_t *handle;
|
|
+ struct ext4_iloc iloc;
|
|
int err = 0;
|
|
- int err2;
|
|
|
|
- if (desc != NULL) {
|
|
- /* Succeeded; write the verity descriptor. */
|
|
- err = ext4_write_verity_descriptor(inode, desc, desc_size,
|
|
- merkle_tree_size);
|
|
-
|
|
- /* Write all pages before clearing VERITY_IN_PROGRESS. */
|
|
- if (!err)
|
|
- err = filemap_write_and_wait(inode->i_mapping);
|
|
- }
|
|
+ /*
|
|
+ * If an error already occurred (which fs/verity/ signals by passing
|
|
+ * desc == NULL), then only clean-up is needed.
|
|
+ */
|
|
+ if (desc == NULL)
|
|
+ goto cleanup;
|
|
|
|
- /* If we failed, truncate anything we wrote past i_size. */
|
|
- if (desc == NULL || err)
|
|
- ext4_truncate(inode);
|
|
+ /* Append the verity descriptor. */
|
|
+ err = ext4_write_verity_descriptor(inode, desc, desc_size,
|
|
+ merkle_tree_size);
|
|
+ if (err)
|
|
+ goto cleanup;
|
|
|
|
/*
|
|
- * We must always clean up by clearing EXT4_STATE_VERITY_IN_PROGRESS and
|
|
- * deleting the inode from the orphan list, even if something failed.
|
|
- * If everything succeeded, we'll also set the verity bit in the same
|
|
- * transaction.
|
|
+ * Write all pages (both data and verity metadata). Note that this must
|
|
+ * happen before clearing EXT4_STATE_VERITY_IN_PROGRESS; otherwise pages
|
|
+ * beyond i_size won't be written properly. For crash consistency, this
|
|
+ * also must happen before the verity inode flag gets persisted.
|
|
*/
|
|
+ err = filemap_write_and_wait(inode->i_mapping);
|
|
+ if (err)
|
|
+ goto cleanup;
|
|
|
|
- ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
|
|
+ /*
|
|
+ * Finally, set the verity inode flag and remove the inode from the
|
|
+ * orphan list (in a single transaction).
|
|
+ */
|
|
|
|
handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);
|
|
if (IS_ERR(handle)) {
|
|
- ext4_orphan_del(NULL, inode);
|
|
- return PTR_ERR(handle);
|
|
+ err = PTR_ERR(handle);
|
|
+ goto cleanup;
|
|
}
|
|
|
|
- err2 = ext4_orphan_del(handle, inode);
|
|
- if (err2)
|
|
- goto out_stop;
|
|
+ err = ext4_orphan_del(handle, inode);
|
|
+ if (err)
|
|
+ goto stop_and_cleanup;
|
|
|
|
- if (desc != NULL && !err) {
|
|
- struct ext4_iloc iloc;
|
|
+ err = ext4_reserve_inode_write(handle, inode, &iloc);
|
|
+ if (err)
|
|
+ goto stop_and_cleanup;
|
|
|
|
- err = ext4_reserve_inode_write(handle, inode, &iloc);
|
|
- if (err)
|
|
- goto out_stop;
|
|
- ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
|
|
- ext4_set_inode_flags(inode, false);
|
|
- err = ext4_mark_iloc_dirty(handle, inode, &iloc);
|
|
- }
|
|
-out_stop:
|
|
+ ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
|
|
+ ext4_set_inode_flags(inode, false);
|
|
+ err = ext4_mark_iloc_dirty(handle, inode, &iloc);
|
|
+ if (err)
|
|
+ goto stop_and_cleanup;
|
|
+
|
|
+ ext4_journal_stop(handle);
|
|
+
|
|
+ ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
|
|
+ return 0;
|
|
+
|
|
+stop_and_cleanup:
|
|
ext4_journal_stop(handle);
|
|
- return err ?: err2;
|
|
+cleanup:
|
|
+ /*
|
|
+ * Verity failed to be enabled, so clean up by truncating any verity
|
|
+ * metadata that was written beyond i_size (both from cache and from
|
|
+ * disk), removing the inode from the orphan list (if it wasn't done
|
|
+ * already), and clearing EXT4_STATE_VERITY_IN_PROGRESS.
|
|
+ */
|
|
+ truncate_inode_pages(inode->i_mapping, inode->i_size);
|
|
+ ext4_truncate(inode);
|
|
+ ext4_orphan_del(NULL, inode);
|
|
+ ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
|
|
+ return err;
|
|
}
|
|
|
|
static int ext4_get_verity_descriptor_location(struct inode *inode,
|
|
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
|
|
index 372208500f4e7..6aef74f7c9eea 100644
|
|
--- a/fs/ext4/xattr.c
|
|
+++ b/fs/ext4/xattr.c
|
|
@@ -2400,7 +2400,7 @@ retry_inode:
|
|
* external inode if possible.
|
|
*/
|
|
if (ext4_has_feature_ea_inode(inode->i_sb) &&
|
|
- !i.in_inode) {
|
|
+ i.value_len && !i.in_inode) {
|
|
i.in_inode = 1;
|
|
goto retry_inode;
|
|
}
|
|
diff --git a/fs/io_uring.c b/fs/io_uring.c
|
|
index 262fd4cfd3ad5..ef078182e7ca4 100644
|
|
--- a/fs/io_uring.c
|
|
+++ b/fs/io_uring.c
|
|
@@ -2221,6 +2221,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
|
|
__io_req_task_cancel(req, -EFAULT);
|
|
mutex_unlock(&ctx->uring_lock);
|
|
|
|
+ ctx->flags &= ~IORING_SETUP_R_DISABLED;
|
|
if (ctx->flags & IORING_SETUP_SQPOLL)
|
|
io_sq_thread_drop_mm_files();
|
|
}
|
|
@@ -8965,6 +8966,8 @@ static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
|
|
{
|
|
mutex_lock(&ctx->uring_lock);
|
|
ctx->sqo_dead = 1;
|
|
+ if (ctx->flags & IORING_SETUP_R_DISABLED)
|
|
+ io_sq_offload_start(ctx);
|
|
mutex_unlock(&ctx->uring_lock);
|
|
|
|
/* make sure callers enter the ring to get error */
|
|
@@ -9980,10 +9983,7 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
|
|
if (ctx->restrictions.registered)
|
|
ctx->restricted = 1;
|
|
|
|
- ctx->flags &= ~IORING_SETUP_R_DISABLED;
|
|
-
|
|
io_sq_offload_start(ctx);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
|
|
index 53fcbf79bdca3..7629248fdd532 100644
|
|
--- a/fs/nfsd/filecache.c
|
|
+++ b/fs/nfsd/filecache.c
|
|
@@ -898,6 +898,8 @@ nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
|
|
continue;
|
|
if (!nfsd_match_cred(nf->nf_cred, current_cred()))
|
|
continue;
|
|
+ if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags))
|
|
+ continue;
|
|
if (nfsd_file_get(nf) != NULL)
|
|
return nf;
|
|
}
|
|
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
|
|
index 8d6d2678abade..3581ce737e853 100644
|
|
--- a/fs/nfsd/nfs4proc.c
|
|
+++ b/fs/nfsd/nfs4proc.c
|
|
@@ -1304,7 +1304,7 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
|
|
struct nfsd_file *dst)
|
|
{
|
|
nfs42_ssc_close(src->nf_file);
|
|
- /* 'src' is freed by nfsd4_do_async_copy */
|
|
+ fput(src->nf_file);
|
|
nfsd_file_put(dst);
|
|
mntput(ss_mnt);
|
|
}
|
|
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
|
|
index cf8b91b1ed373..a501bb9a2fac1 100644
|
|
--- a/fs/nfsd/nfs4state.c
|
|
+++ b/fs/nfsd/nfs4state.c
|
|
@@ -5372,7 +5372,7 @@ nfs4_laundromat(struct nfsd_net *nn)
|
|
idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
|
|
cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
|
|
if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
|
|
- cps->cpntf_time > cutoff)
|
|
+ cps->cpntf_time < cutoff)
|
|
_free_cpntf_state_locked(nn, cps);
|
|
}
|
|
spin_unlock(&nn->s2s_cp_lock);
|
|
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
|
|
index 93a217e4f5630..14658b009f1bb 100644
|
|
--- a/fs/pstore/inode.c
|
|
+++ b/fs/pstore/inode.c
|
|
@@ -467,7 +467,7 @@ static struct dentry *pstore_mount(struct file_system_type *fs_type,
|
|
static void pstore_kill_sb(struct super_block *sb)
|
|
{
|
|
mutex_lock(&pstore_sb_lock);
|
|
- WARN_ON(pstore_sb != sb);
|
|
+ WARN_ON(pstore_sb && pstore_sb != sb);
|
|
|
|
kill_litter_super(sb);
|
|
pstore_sb = NULL;
|
|
diff --git a/fs/select.c b/fs/select.c
|
|
index 37aaa8317f3ae..945896d0ac9e7 100644
|
|
--- a/fs/select.c
|
|
+++ b/fs/select.c
|
|
@@ -1055,10 +1055,9 @@ static long do_restart_poll(struct restart_block *restart_block)
|
|
|
|
ret = do_sys_poll(ufds, nfds, to);
|
|
|
|
- if (ret == -ERESTARTNOHAND) {
|
|
- restart_block->fn = do_restart_poll;
|
|
- ret = -ERESTART_RESTARTBLOCK;
|
|
- }
|
|
+ if (ret == -ERESTARTNOHAND)
|
|
+ ret = set_restart_fn(restart_block, do_restart_poll);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -1080,7 +1079,6 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
|
|
struct restart_block *restart_block;
|
|
|
|
restart_block = ¤t->restart_block;
|
|
- restart_block->fn = do_restart_poll;
|
|
restart_block->poll.ufds = ufds;
|
|
restart_block->poll.nfds = nfds;
|
|
|
|
@@ -1091,7 +1089,7 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
|
|
} else
|
|
restart_block->poll.has_timeout = 0;
|
|
|
|
- ret = -ERESTART_RESTARTBLOCK;
|
|
+ ret = set_restart_fn(restart_block, do_restart_poll);
|
|
}
|
|
return ret;
|
|
}
|
|
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
|
|
index 3fe933b1010c3..2243dc1fb48fe 100644
|
|
--- a/fs/zonefs/super.c
|
|
+++ b/fs/zonefs/super.c
|
|
@@ -159,6 +159,21 @@ static int zonefs_writepages(struct address_space *mapping,
|
|
return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
|
|
}
|
|
|
|
+static int zonefs_swap_activate(struct swap_info_struct *sis,
|
|
+ struct file *swap_file, sector_t *span)
|
|
+{
|
|
+ struct inode *inode = file_inode(swap_file);
|
|
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
|
|
+
|
|
+ if (zi->i_ztype != ZONEFS_ZTYPE_CNV) {
|
|
+ zonefs_err(inode->i_sb,
|
|
+ "swap file: not a conventional zone file\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops);
|
|
+}
|
|
+
|
|
static const struct address_space_operations zonefs_file_aops = {
|
|
.readpage = zonefs_readpage,
|
|
.readahead = zonefs_readahead,
|
|
@@ -171,6 +186,7 @@ static const struct address_space_operations zonefs_file_aops = {
|
|
.is_partially_uptodate = iomap_is_partially_uptodate,
|
|
.error_remove_page = generic_error_remove_page,
|
|
.direct_IO = noop_direct_IO,
|
|
+ .swap_activate = zonefs_swap_activate,
|
|
};
|
|
|
|
static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
|
|
@@ -719,6 +735,68 @@ out_release:
|
|
return ret;
|
|
}
|
|
|
|
+/*
|
|
+ * Do not exceed the LFS limits nor the file zone size. If pos is under the
|
|
+ * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
|
|
+ */
|
|
+static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
|
|
+ loff_t count)
|
|
+{
|
|
+ struct inode *inode = file_inode(file);
|
|
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
|
|
+ loff_t limit = rlimit(RLIMIT_FSIZE);
|
|
+ loff_t max_size = zi->i_max_size;
|
|
+
|
|
+ if (limit != RLIM_INFINITY) {
|
|
+ if (pos >= limit) {
|
|
+ send_sig(SIGXFSZ, current, 0);
|
|
+ return -EFBIG;
|
|
+ }
|
|
+ count = min(count, limit - pos);
|
|
+ }
|
|
+
|
|
+ if (!(file->f_flags & O_LARGEFILE))
|
|
+ max_size = min_t(loff_t, MAX_NON_LFS, max_size);
|
|
+
|
|
+ if (unlikely(pos >= max_size))
|
|
+ return -EFBIG;
|
|
+
|
|
+ return min(count, max_size - pos);
|
|
+}
|
|
+
|
|
+static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
|
|
+{
|
|
+ struct file *file = iocb->ki_filp;
|
|
+ struct inode *inode = file_inode(file);
|
|
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
|
|
+ loff_t count;
|
|
+
|
|
+ if (IS_SWAPFILE(inode))
|
|
+ return -ETXTBSY;
|
|
+
|
|
+ if (!iov_iter_count(from))
|
|
+ return 0;
|
|
+
|
|
+ if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (iocb->ki_flags & IOCB_APPEND) {
|
|
+ if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
|
|
+ return -EINVAL;
|
|
+ mutex_lock(&zi->i_truncate_mutex);
|
|
+ iocb->ki_pos = zi->i_wpoffset;
|
|
+ mutex_unlock(&zi->i_truncate_mutex);
|
|
+ }
|
|
+
|
|
+ count = zonefs_write_check_limits(file, iocb->ki_pos,
|
|
+ iov_iter_count(from));
|
|
+ if (count < 0)
|
|
+ return count;
|
|
+
|
|
+ iov_iter_truncate(from, count);
|
|
+ return iov_iter_count(from);
|
|
+}
|
|
+
|
|
/*
|
|
* Handle direct writes. For sequential zone files, this is the only possible
|
|
* write path. For these files, check that the user is issuing writes
|
|
@@ -736,8 +814,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
|
|
struct super_block *sb = inode->i_sb;
|
|
bool sync = is_sync_kiocb(iocb);
|
|
bool append = false;
|
|
- size_t count;
|
|
- ssize_t ret;
|
|
+ ssize_t ret, count;
|
|
|
|
/*
|
|
* For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
|
|
@@ -755,12 +832,11 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
|
|
inode_lock(inode);
|
|
}
|
|
|
|
- ret = generic_write_checks(iocb, from);
|
|
- if (ret <= 0)
|
|
+ count = zonefs_write_checks(iocb, from);
|
|
+ if (count <= 0) {
|
|
+ ret = count;
|
|
goto inode_unlock;
|
|
-
|
|
- iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
|
|
- count = iov_iter_count(from);
|
|
+ }
|
|
|
|
if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
|
|
ret = -EINVAL;
|
|
@@ -820,12 +896,10 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
|
|
inode_lock(inode);
|
|
}
|
|
|
|
- ret = generic_write_checks(iocb, from);
|
|
+ ret = zonefs_write_checks(iocb, from);
|
|
if (ret <= 0)
|
|
goto inode_unlock;
|
|
|
|
- iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
|
|
-
|
|
ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
|
|
if (ret > 0)
|
|
iocb->ki_pos += ret;
|
|
@@ -958,9 +1032,7 @@ static int zonefs_open_zone(struct inode *inode)
|
|
|
|
mutex_lock(&zi->i_truncate_mutex);
|
|
|
|
- zi->i_wr_refcnt++;
|
|
- if (zi->i_wr_refcnt == 1) {
|
|
-
|
|
+ if (!zi->i_wr_refcnt) {
|
|
if (atomic_inc_return(&sbi->s_open_zones) > sbi->s_max_open_zones) {
|
|
atomic_dec(&sbi->s_open_zones);
|
|
ret = -EBUSY;
|
|
@@ -970,7 +1042,6 @@ static int zonefs_open_zone(struct inode *inode)
|
|
if (i_size_read(inode) < zi->i_max_size) {
|
|
ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
|
|
if (ret) {
|
|
- zi->i_wr_refcnt--;
|
|
atomic_dec(&sbi->s_open_zones);
|
|
goto unlock;
|
|
}
|
|
@@ -978,6 +1049,8 @@ static int zonefs_open_zone(struct inode *inode)
|
|
}
|
|
}
|
|
|
|
+ zi->i_wr_refcnt++;
|
|
+
|
|
unlock:
|
|
mutex_unlock(&zi->i_truncate_mutex);
|
|
|
|
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
|
|
index 2564e66e67d74..b5bef31991967 100644
|
|
--- a/include/drm/ttm/ttm_bo_api.h
|
|
+++ b/include/drm/ttm/ttm_bo_api.h
|
|
@@ -600,6 +600,7 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
|
|
static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
|
|
{
|
|
dma_resv_assert_held(bo->base.resv);
|
|
+ WARN_ON_ONCE(!kref_read(&bo->kref));
|
|
++bo->pin_count;
|
|
}
|
|
|
|
@@ -612,8 +613,11 @@ static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
|
|
static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
|
|
{
|
|
dma_resv_assert_held(bo->base.resv);
|
|
- WARN_ON_ONCE(!bo->pin_count);
|
|
- --bo->pin_count;
|
|
+ WARN_ON_ONCE(!kref_read(&bo->kref));
|
|
+ if (bo->pin_count)
|
|
+ --bo->pin_count;
|
|
+ else
|
|
+ WARN_ON_ONCE(true);
|
|
}
|
|
|
|
int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
|
diff --git a/include/linux/efi.h b/include/linux/efi.h
|
|
index 763b816ba19ca..119262585e9b3 100644
|
|
--- a/include/linux/efi.h
|
|
+++ b/include/linux/efi.h
|
|
@@ -72,8 +72,10 @@ typedef void *efi_handle_t;
|
|
*/
|
|
typedef guid_t efi_guid_t __aligned(__alignof__(u32));
|
|
|
|
-#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
|
|
- GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
|
|
+#define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \
|
|
+ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
|
|
+ (b) & 0xff, ((b) >> 8) & 0xff, \
|
|
+ (c) & 0xff, ((c) >> 8) & 0xff, d } }
|
|
|
|
/*
|
|
* Generic EFI table header
|
|
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
|
|
index 9b2158c69275e..157762db9d4bf 100644
|
|
--- a/include/linux/thread_info.h
|
|
+++ b/include/linux/thread_info.h
|
|
@@ -11,6 +11,7 @@
|
|
#include <linux/types.h>
|
|
#include <linux/bug.h>
|
|
#include <linux/restart_block.h>
|
|
+#include <linux/errno.h>
|
|
|
|
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
|
/*
|
|
@@ -59,6 +60,18 @@ enum syscall_work_bit {
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
+#ifndef arch_set_restart_data
|
|
+#define arch_set_restart_data(restart) do { } while (0)
|
|
+#endif
|
|
+
|
|
+static inline long set_restart_fn(struct restart_block *restart,
|
|
+ long (*fn)(struct restart_block *))
|
|
+{
|
|
+ restart->fn = fn;
|
|
+ arch_set_restart_data(restart);
|
|
+ return -ERESTART_RESTARTBLOCK;
|
|
+}
|
|
+
|
|
#ifndef THREAD_ALIGN
|
|
#define THREAD_ALIGN THREAD_SIZE
|
|
#endif
|
|
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
|
|
index 6b03fdd69d274..712363c7a2e8e 100644
|
|
--- a/include/linux/usb_usual.h
|
|
+++ b/include/linux/usb_usual.h
|
|
@@ -86,6 +86,8 @@
|
|
/* lies about caching, so always sync */ \
|
|
US_FLAG(NO_SAME, 0x40000000) \
|
|
/* Cannot handle WRITE_SAME */ \
|
|
+ US_FLAG(SENSE_AFTER_SYNC, 0x80000000) \
|
|
+ /* Do REQUEST_SENSE after SYNCHRONIZE_CACHE */ \
|
|
|
|
#define US_FLAG(name, value) US_FL_##name = value ,
|
|
enum { US_DO_ALL_FLAGS };
|
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
|
index 45a13eb8894e5..ab3df9e86a1fc 100644
|
|
--- a/kernel/futex.c
|
|
+++ b/kernel/futex.c
|
|
@@ -2728,14 +2728,13 @@ retry:
|
|
goto out;
|
|
|
|
restart = ¤t->restart_block;
|
|
- restart->fn = futex_wait_restart;
|
|
restart->futex.uaddr = uaddr;
|
|
restart->futex.val = val;
|
|
restart->futex.time = *abs_time;
|
|
restart->futex.bitset = bitset;
|
|
restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
|
|
|
|
- ret = -ERESTART_RESTARTBLOCK;
|
|
+ ret = set_restart_fn(restart, futex_wait_restart);
|
|
|
|
out:
|
|
if (to) {
|
|
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
|
|
index dec3f73e8db92..21ea370fccda7 100644
|
|
--- a/kernel/irq/manage.c
|
|
+++ b/kernel/irq/manage.c
|
|
@@ -1142,11 +1142,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
|
|
irqreturn_t ret;
|
|
|
|
local_bh_disable();
|
|
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
+ local_irq_disable();
|
|
ret = action->thread_fn(action->irq, action->dev_id);
|
|
if (ret == IRQ_HANDLED)
|
|
atomic_inc(&desc->threads_handled);
|
|
|
|
irq_finalize_oneshot(desc, action);
|
|
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
+ local_irq_enable();
|
|
local_bh_enable();
|
|
return ret;
|
|
}
|
|
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
|
|
index c6a39d662935e..ba39fbb1f8e73 100644
|
|
--- a/kernel/jump_label.c
|
|
+++ b/kernel/jump_label.c
|
|
@@ -407,6 +407,14 @@ static bool jump_label_can_update(struct jump_entry *entry, bool init)
|
|
return false;
|
|
|
|
if (!kernel_text_address(jump_entry_code(entry))) {
|
|
+ /*
|
|
+ * This skips patching built-in __exit, which
|
|
+ * is part of init_section_contains() but is
|
|
+ * not part of kernel_text_address().
|
|
+ *
|
|
+ * Skipping built-in __exit is fine since it
|
|
+ * will never be executed.
|
|
+ */
|
|
WARN_ONCE(!jump_entry_is_init(entry),
|
|
"can't patch jump_label at %pS",
|
|
(void *)jump_entry_code(entry));
|
|
diff --git a/kernel/static_call.c b/kernel/static_call.c
|
|
index 84565c2a41b8f..db914da6e7854 100644
|
|
--- a/kernel/static_call.c
|
|
+++ b/kernel/static_call.c
|
|
@@ -182,7 +182,16 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
|
|
}
|
|
|
|
if (!kernel_text_address((unsigned long)site_addr)) {
|
|
- WARN_ONCE(1, "can't patch static call site at %pS",
|
|
+ /*
|
|
+ * This skips patching built-in __exit, which
|
|
+ * is part of init_section_contains() but is
|
|
+ * not part of kernel_text_address().
|
|
+ *
|
|
+ * Skipping built-in __exit is fine since it
|
|
+ * will never be executed.
|
|
+ */
|
|
+ WARN_ONCE(!static_call_is_init(site),
|
|
+ "can't patch static call site at %pS",
|
|
site_addr);
|
|
continue;
|
|
}
|
|
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
|
|
index f4ace1bf83828..daeaa7140d0aa 100644
|
|
--- a/kernel/time/alarmtimer.c
|
|
+++ b/kernel/time/alarmtimer.c
|
|
@@ -848,9 +848,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
|
|
if (flags == TIMER_ABSTIME)
|
|
return -ERESTARTNOHAND;
|
|
|
|
- restart->fn = alarm_timer_nsleep_restart;
|
|
restart->nanosleep.clockid = type;
|
|
restart->nanosleep.expires = exp;
|
|
+ set_restart_fn(restart, alarm_timer_nsleep_restart);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
|
|
index 788b9d137de4c..5c9d968187ae8 100644
|
|
--- a/kernel/time/hrtimer.c
|
|
+++ b/kernel/time/hrtimer.c
|
|
@@ -1957,9 +1957,9 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
|
|
}
|
|
|
|
restart = ¤t->restart_block;
|
|
- restart->fn = hrtimer_nanosleep_restart;
|
|
restart->nanosleep.clockid = t.timer.base->clockid;
|
|
restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
|
|
+ set_restart_fn(restart, hrtimer_nanosleep_restart);
|
|
out:
|
|
destroy_hrtimer_on_stack(&t.timer);
|
|
return ret;
|
|
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
|
|
index a71758e34e456..9abe15255bc4e 100644
|
|
--- a/kernel/time/posix-cpu-timers.c
|
|
+++ b/kernel/time/posix-cpu-timers.c
|
|
@@ -1480,8 +1480,8 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
|
|
if (flags & TIMER_ABSTIME)
|
|
return -ERESTARTNOHAND;
|
|
|
|
- restart_block->fn = posix_cpu_nsleep_restart;
|
|
restart_block->nanosleep.clockid = which_clock;
|
|
+ set_restart_fn(restart_block, posix_cpu_nsleep_restart);
|
|
}
|
|
return error;
|
|
}
|
|
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
|
|
index ac2a4a7711da4..edb6ac17cecab 100644
|
|
--- a/net/qrtr/qrtr.c
|
|
+++ b/net/qrtr/qrtr.c
|
|
@@ -439,7 +439,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
|
|
if (len == 0 || len & 3)
|
|
return -EINVAL;
|
|
|
|
- skb = netdev_alloc_skb(NULL, len);
|
|
+ skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN);
|
|
if (!skb)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
|
|
index 4187745887f0f..7034b4755fa18 100644
|
|
--- a/net/sunrpc/svc.c
|
|
+++ b/net/sunrpc/svc.c
|
|
@@ -1413,7 +1413,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
|
|
|
|
sendit:
|
|
if (svc_authorise(rqstp))
|
|
- goto close;
|
|
+ goto close_xprt;
|
|
return 1; /* Caller can now send it */
|
|
|
|
release_dropit:
|
|
@@ -1425,6 +1425,8 @@ release_dropit:
|
|
return 0;
|
|
|
|
close:
|
|
+ svc_authorise(rqstp);
|
|
+close_xprt:
|
|
if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
|
|
svc_close_xprt(rqstp->rq_xprt);
|
|
dprintk("svc: svc_process close\n");
|
|
@@ -1433,7 +1435,7 @@ release_dropit:
|
|
err_short_len:
|
|
svc_printk(rqstp, "short len %zd, dropping request\n",
|
|
argv->iov_len);
|
|
- goto close;
|
|
+ goto close_xprt;
|
|
|
|
err_bad_rpc:
|
|
serv->sv_stats->rpcbadfmt++;
|
|
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
|
|
index dcc50ae545506..3cdd71a8df1e7 100644
|
|
--- a/net/sunrpc/svc_xprt.c
|
|
+++ b/net/sunrpc/svc_xprt.c
|
|
@@ -1060,7 +1060,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
|
|
struct svc_xprt *xprt;
|
|
int ret = 0;
|
|
|
|
- spin_lock(&serv->sv_lock);
|
|
+ spin_lock_bh(&serv->sv_lock);
|
|
list_for_each_entry(xprt, xprt_list, xpt_list) {
|
|
if (xprt->xpt_net != net)
|
|
continue;
|
|
@@ -1068,7 +1068,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
|
|
set_bit(XPT_CLOSE, &xprt->xpt_flags);
|
|
svc_xprt_enqueue(xprt);
|
|
}
|
|
- spin_unlock(&serv->sv_lock);
|
|
+ spin_unlock_bh(&serv->sv_lock);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
|
|
index 63f8be974df20..8186ab6f99f19 100644
|
|
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
|
|
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
|
|
@@ -252,9 +252,9 @@ xprt_setup_rdma_bc(struct xprt_create *args)
|
|
xprt->timeout = &xprt_rdma_bc_timeout;
|
|
xprt_set_bound(xprt);
|
|
xprt_set_connected(xprt);
|
|
- xprt->bind_timeout = RPCRDMA_BIND_TO;
|
|
- xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
|
|
- xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
|
|
+ xprt->bind_timeout = 0;
|
|
+ xprt->reestablish_timeout = 0;
|
|
+ xprt->idle_timeout = 0;
|
|
|
|
xprt->prot = XPRT_TRANSPORT_BC_RDMA;
|
|
xprt->ops = &xprt_rdma_bc_procs;
|
|
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
|
|
index 8e0c0380b4c4b..1a14c083e8cea 100644
|
|
--- a/sound/firewire/dice/dice-stream.c
|
|
+++ b/sound/firewire/dice/dice-stream.c
|
|
@@ -493,11 +493,10 @@ void snd_dice_stream_stop_duplex(struct snd_dice *dice)
|
|
struct reg_params tx_params, rx_params;
|
|
|
|
if (dice->substreams_counter == 0) {
|
|
- if (get_register_params(dice, &tx_params, &rx_params) >= 0) {
|
|
- amdtp_domain_stop(&dice->domain);
|
|
+ if (get_register_params(dice, &tx_params, &rx_params) >= 0)
|
|
finish_session(dice, &tx_params, &rx_params);
|
|
- }
|
|
|
|
+ amdtp_domain_stop(&dice->domain);
|
|
release_resources(dice);
|
|
}
|
|
}
|
|
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
|
|
index 8060cc86dfea3..96903295a9677 100644
|
|
--- a/sound/pci/hda/hda_generic.c
|
|
+++ b/sound/pci/hda/hda_generic.c
|
|
@@ -4065,7 +4065,7 @@ static int add_micmute_led_hook(struct hda_codec *codec)
|
|
|
|
spec->micmute_led.led_mode = MICMUTE_LED_FOLLOW_MUTE;
|
|
spec->micmute_led.capture = 0;
|
|
- spec->micmute_led.led_value = 0;
|
|
+ spec->micmute_led.led_value = -1;
|
|
spec->micmute_led.old_hook = spec->cap_sync_hook;
|
|
spec->cap_sync_hook = update_micmute_led;
|
|
if (!snd_hda_gen_add_kctl(spec, NULL, &micmute_led_mode_ctl))
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index b47504fa8dfd0..316b9b4ccb32d 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -4225,6 +4225,12 @@ static void alc_fixup_hp_gpio_led(struct hda_codec *codec,
|
|
}
|
|
}
|
|
|
|
+static void alc236_fixup_hp_gpio_led(struct hda_codec *codec,
|
|
+ const struct hda_fixup *fix, int action)
|
|
+{
|
|
+ alc_fixup_hp_gpio_led(codec, action, 0x02, 0x01);
|
|
+}
|
|
+
|
|
static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
|
|
const struct hda_fixup *fix, int action)
|
|
{
|
|
@@ -6381,6 +6387,7 @@ enum {
|
|
ALC294_FIXUP_ASUS_GX502_VERBS,
|
|
ALC285_FIXUP_HP_GPIO_LED,
|
|
ALC285_FIXUP_HP_MUTE_LED,
|
|
+ ALC236_FIXUP_HP_GPIO_LED,
|
|
ALC236_FIXUP_HP_MUTE_LED,
|
|
ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
|
|
ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
|
|
@@ -7616,6 +7623,10 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc285_fixup_hp_mute_led,
|
|
},
|
|
+ [ALC236_FIXUP_HP_GPIO_LED] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc236_fixup_hp_gpio_led,
|
|
+ },
|
|
[ALC236_FIXUP_HP_MUTE_LED] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc236_fixup_hp_mute_led,
|
|
@@ -8045,9 +8056,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation",
|
|
ALC285_FIXUP_HP_GPIO_AMP_INIT),
|
|
SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
|
|
+ SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
|
|
+ SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
|
|
+ SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
|
|
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
|
|
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
|
|
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
|
@@ -8242,7 +8256,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
|
|
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
|
|
SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
|
|
SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
|
|
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
|
|
diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c
|
|
index 472caad17012e..85a1d00894a9c 100644
|
|
--- a/sound/soc/codecs/ak4458.c
|
|
+++ b/sound/soc/codecs/ak4458.c
|
|
@@ -812,6 +812,7 @@ static const struct of_device_id ak4458_of_match[] = {
|
|
{ .compatible = "asahi-kasei,ak4497", .data = &ak4497_drvdata},
|
|
{ },
|
|
};
|
|
+MODULE_DEVICE_TABLE(of, ak4458_of_match);
|
|
|
|
static struct i2c_driver ak4458_i2c_driver = {
|
|
.driver = {
|
|
diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
|
|
index 8a32b0139cb0c..85bdd05341803 100644
|
|
--- a/sound/soc/codecs/ak5558.c
|
|
+++ b/sound/soc/codecs/ak5558.c
|
|
@@ -419,6 +419,7 @@ static const struct of_device_id ak5558_i2c_dt_ids[] __maybe_unused = {
|
|
{ .compatible = "asahi-kasei,ak5558"},
|
|
{ }
|
|
};
|
|
+MODULE_DEVICE_TABLE(of, ak5558_i2c_dt_ids);
|
|
|
|
static struct i2c_driver ak5558_i2c_driver = {
|
|
.driver = {
|
|
diff --git a/sound/soc/codecs/lpass-va-macro.c b/sound/soc/codecs/lpass-va-macro.c
|
|
index 91e6890d6efcb..3d6976a3d9e42 100644
|
|
--- a/sound/soc/codecs/lpass-va-macro.c
|
|
+++ b/sound/soc/codecs/lpass-va-macro.c
|
|
@@ -189,7 +189,6 @@ struct va_macro {
|
|
struct device *dev;
|
|
unsigned long active_ch_mask[VA_MACRO_MAX_DAIS];
|
|
unsigned long active_ch_cnt[VA_MACRO_MAX_DAIS];
|
|
- unsigned long active_decimator[VA_MACRO_MAX_DAIS];
|
|
u16 dmic_clk_div;
|
|
|
|
int dec_mode[VA_MACRO_NUM_DECIMATORS];
|
|
@@ -549,11 +548,9 @@ static int va_macro_tx_mixer_put(struct snd_kcontrol *kcontrol,
|
|
if (enable) {
|
|
set_bit(dec_id, &va->active_ch_mask[dai_id]);
|
|
va->active_ch_cnt[dai_id]++;
|
|
- va->active_decimator[dai_id] = dec_id;
|
|
} else {
|
|
clear_bit(dec_id, &va->active_ch_mask[dai_id]);
|
|
va->active_ch_cnt[dai_id]--;
|
|
- va->active_decimator[dai_id] = -1;
|
|
}
|
|
|
|
snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, update);
|
|
@@ -880,18 +877,19 @@ static int va_macro_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
|
|
struct va_macro *va = snd_soc_component_get_drvdata(component);
|
|
u16 tx_vol_ctl_reg, decimator;
|
|
|
|
- decimator = va->active_decimator[dai->id];
|
|
-
|
|
- tx_vol_ctl_reg = CDC_VA_TX0_TX_PATH_CTL +
|
|
- VA_MACRO_TX_PATH_OFFSET * decimator;
|
|
- if (mute)
|
|
- snd_soc_component_update_bits(component, tx_vol_ctl_reg,
|
|
- CDC_VA_TX_PATH_PGA_MUTE_EN_MASK,
|
|
- CDC_VA_TX_PATH_PGA_MUTE_EN);
|
|
- else
|
|
- snd_soc_component_update_bits(component, tx_vol_ctl_reg,
|
|
- CDC_VA_TX_PATH_PGA_MUTE_EN_MASK,
|
|
- CDC_VA_TX_PATH_PGA_MUTE_DISABLE);
|
|
+ for_each_set_bit(decimator, &va->active_ch_mask[dai->id],
|
|
+ VA_MACRO_DEC_MAX) {
|
|
+ tx_vol_ctl_reg = CDC_VA_TX0_TX_PATH_CTL +
|
|
+ VA_MACRO_TX_PATH_OFFSET * decimator;
|
|
+ if (mute)
|
|
+ snd_soc_component_update_bits(component, tx_vol_ctl_reg,
|
|
+ CDC_VA_TX_PATH_PGA_MUTE_EN_MASK,
|
|
+ CDC_VA_TX_PATH_PGA_MUTE_EN);
|
|
+ else
|
|
+ snd_soc_component_update_bits(component, tx_vol_ctl_reg,
|
|
+ CDC_VA_TX_PATH_PGA_MUTE_EN_MASK,
|
|
+ CDC_VA_TX_PATH_PGA_MUTE_DISABLE);
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
|
|
index 25f1df214ca5d..cd59aa4393738 100644
|
|
--- a/sound/soc/codecs/lpass-wsa-macro.c
|
|
+++ b/sound/soc/codecs/lpass-wsa-macro.c
|
|
@@ -1214,14 +1214,16 @@ static int wsa_macro_enable_mix_path(struct snd_soc_dapm_widget *w,
|
|
struct snd_kcontrol *kcontrol, int event)
|
|
{
|
|
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
|
|
- u16 gain_reg;
|
|
+ u16 path_reg, gain_reg;
|
|
int val;
|
|
|
|
- switch (w->reg) {
|
|
- case CDC_WSA_RX0_RX_PATH_MIX_CTL:
|
|
+ switch (w->shift) {
|
|
+ case WSA_MACRO_RX_MIX0:
|
|
+ path_reg = CDC_WSA_RX0_RX_PATH_MIX_CTL;
|
|
gain_reg = CDC_WSA_RX0_RX_VOL_MIX_CTL;
|
|
break;
|
|
- case CDC_WSA_RX1_RX_PATH_MIX_CTL:
|
|
+ case WSA_MACRO_RX_MIX1:
|
|
+ path_reg = CDC_WSA_RX1_RX_PATH_MIX_CTL;
|
|
gain_reg = CDC_WSA_RX1_RX_VOL_MIX_CTL;
|
|
break;
|
|
default:
|
|
@@ -1234,7 +1236,7 @@ static int wsa_macro_enable_mix_path(struct snd_soc_dapm_widget *w,
|
|
snd_soc_component_write(component, gain_reg, val);
|
|
break;
|
|
case SND_SOC_DAPM_POST_PMD:
|
|
- snd_soc_component_update_bits(component, w->reg,
|
|
+ snd_soc_component_update_bits(component, path_reg,
|
|
CDC_WSA_RX_PATH_MIX_CLK_EN_MASK,
|
|
CDC_WSA_RX_PATH_MIX_CLK_DISABLE);
|
|
break;
|
|
@@ -2071,14 +2073,14 @@ static const struct snd_soc_dapm_widget wsa_macro_dapm_widgets[] = {
|
|
SND_SOC_DAPM_MUX("WSA_RX0 INP0", SND_SOC_NOPM, 0, 0, &rx0_prim_inp0_mux),
|
|
SND_SOC_DAPM_MUX("WSA_RX0 INP1", SND_SOC_NOPM, 0, 0, &rx0_prim_inp1_mux),
|
|
SND_SOC_DAPM_MUX("WSA_RX0 INP2", SND_SOC_NOPM, 0, 0, &rx0_prim_inp2_mux),
|
|
- SND_SOC_DAPM_MUX_E("WSA_RX0 MIX INP", CDC_WSA_RX0_RX_PATH_MIX_CTL,
|
|
- 0, 0, &rx0_mix_mux, wsa_macro_enable_mix_path,
|
|
+ SND_SOC_DAPM_MUX_E("WSA_RX0 MIX INP", SND_SOC_NOPM, WSA_MACRO_RX_MIX0,
|
|
+ 0, &rx0_mix_mux, wsa_macro_enable_mix_path,
|
|
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
|
|
SND_SOC_DAPM_MUX("WSA_RX1 INP0", SND_SOC_NOPM, 0, 0, &rx1_prim_inp0_mux),
|
|
SND_SOC_DAPM_MUX("WSA_RX1 INP1", SND_SOC_NOPM, 0, 0, &rx1_prim_inp1_mux),
|
|
SND_SOC_DAPM_MUX("WSA_RX1 INP2", SND_SOC_NOPM, 0, 0, &rx1_prim_inp2_mux),
|
|
- SND_SOC_DAPM_MUX_E("WSA_RX1 MIX INP", CDC_WSA_RX1_RX_PATH_MIX_CTL,
|
|
- 0, 0, &rx1_mix_mux, wsa_macro_enable_mix_path,
|
|
+ SND_SOC_DAPM_MUX_E("WSA_RX1 MIX INP", SND_SOC_NOPM, WSA_MACRO_RX_MIX1,
|
|
+ 0, &rx1_mix_mux, wsa_macro_enable_mix_path,
|
|
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
|
|
|
|
SND_SOC_DAPM_MIXER_E("WSA_RX INT0 MIX", SND_SOC_NOPM, 0, 0, NULL, 0,
|
|
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
|
|
index 40f682f5dab8b..d18ae5e3ee809 100644
|
|
--- a/sound/soc/codecs/wcd934x.c
|
|
+++ b/sound/soc/codecs/wcd934x.c
|
|
@@ -1873,6 +1873,12 @@ static int wcd934x_set_channel_map(struct snd_soc_dai *dai,
|
|
|
|
wcd = snd_soc_component_get_drvdata(dai->component);
|
|
|
|
+ if (tx_num > WCD934X_TX_MAX || rx_num > WCD934X_RX_MAX) {
|
|
+ dev_err(wcd->dev, "Invalid tx %d or rx %d channel count\n",
|
|
+ tx_num, rx_num);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
if (!tx_slot || !rx_slot) {
|
|
dev_err(wcd->dev, "Invalid tx_slot=%p, rx_slot=%p\n",
|
|
tx_slot, rx_slot);
|
|
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
|
|
index 404be27c15fed..1d774c876c52e 100644
|
|
--- a/sound/soc/fsl/fsl_ssi.c
|
|
+++ b/sound/soc/fsl/fsl_ssi.c
|
|
@@ -878,6 +878,7 @@ static int fsl_ssi_hw_free(struct snd_pcm_substream *substream,
|
|
static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt)
|
|
{
|
|
u32 strcr = 0, scr = 0, stcr, srcr, mask;
|
|
+ unsigned int slots;
|
|
|
|
ssi->dai_fmt = fmt;
|
|
|
|
@@ -909,10 +910,11 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ slots = ssi->slots ? : 2;
|
|
regmap_update_bits(ssi->regs, REG_SSI_STCCR,
|
|
- SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
|
|
+ SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
|
|
regmap_update_bits(ssi->regs, REG_SSI_SRCCR,
|
|
- SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
|
|
+ SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
|
|
|
|
/* Data on rising edge of bclk, frame low, 1clk before data */
|
|
strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP | SSI_STCR_TEFS;
|
|
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
|
|
index ab31045cfc952..6cada4c1e283b 100644
|
|
--- a/sound/soc/generic/simple-card-utils.c
|
|
+++ b/sound/soc/generic/simple-card-utils.c
|
|
@@ -172,15 +172,16 @@ int asoc_simple_parse_clk(struct device *dev,
|
|
* or device's module clock.
|
|
*/
|
|
clk = devm_get_clk_from_child(dev, node, NULL);
|
|
- if (IS_ERR(clk))
|
|
- clk = devm_get_clk_from_child(dev, dlc->of_node, NULL);
|
|
-
|
|
if (!IS_ERR(clk)) {
|
|
- simple_dai->clk = clk;
|
|
simple_dai->sysclk = clk_get_rate(clk);
|
|
- } else if (!of_property_read_u32(node, "system-clock-frequency",
|
|
- &val)) {
|
|
+
|
|
+ simple_dai->clk = clk;
|
|
+ } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
|
|
simple_dai->sysclk = val;
|
|
+ } else {
|
|
+ clk = devm_get_clk_from_child(dev, dlc->of_node, NULL);
|
|
+ if (!IS_ERR(clk))
|
|
+ simple_dai->sysclk = clk_get_rate(clk);
|
|
}
|
|
|
|
if (of_property_read_bool(node, "system-clock-direction-out"))
|
|
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
|
|
index f00d4e417b6cf..21d2e1cba3803 100644
|
|
--- a/sound/soc/intel/boards/bytcr_rt5640.c
|
|
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
|
|
@@ -577,7 +577,7 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
|
|
},
|
|
.driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
|
|
BYT_RT5640_JD_SRC_JD1_IN4P |
|
|
- BYT_RT5640_OVCD_TH_1500UA |
|
|
+ BYT_RT5640_OVCD_TH_2000UA |
|
|
BYT_RT5640_OVCD_SF_0P75 |
|
|
BYT_RT5640_MCLK_EN),
|
|
},
|
|
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
|
|
index d55851d2049e2..cd4fb77e9d519 100644
|
|
--- a/sound/soc/qcom/lpass-cpu.c
|
|
+++ b/sound/soc/qcom/lpass-cpu.c
|
|
@@ -737,7 +737,7 @@ static void of_lpass_cpu_parse_dai_data(struct device *dev,
|
|
|
|
for_each_child_of_node(dev->of_node, node) {
|
|
ret = of_property_read_u32(node, "reg", &id);
|
|
- if (ret || id < 0 || id >= data->variant->num_dai) {
|
|
+ if (ret || id < 0) {
|
|
dev_err(dev, "valid dai id not found: %d\n", ret);
|
|
continue;
|
|
}
|
|
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
|
|
index 6c2760e27ea6f..153e9b2de0b53 100644
|
|
--- a/sound/soc/qcom/sdm845.c
|
|
+++ b/sound/soc/qcom/sdm845.c
|
|
@@ -27,18 +27,18 @@
|
|
#define SPK_TDM_RX_MASK 0x03
|
|
#define NUM_TDM_SLOTS 8
|
|
#define SLIM_MAX_TX_PORTS 16
|
|
-#define SLIM_MAX_RX_PORTS 16
|
|
+#define SLIM_MAX_RX_PORTS 13
|
|
#define WCD934X_DEFAULT_MCLK_RATE 9600000
|
|
|
|
struct sdm845_snd_data {
|
|
struct snd_soc_jack jack;
|
|
bool jack_setup;
|
|
- bool stream_prepared[SLIM_MAX_RX_PORTS];
|
|
+ bool stream_prepared[AFE_PORT_MAX];
|
|
struct snd_soc_card *card;
|
|
uint32_t pri_mi2s_clk_count;
|
|
uint32_t sec_mi2s_clk_count;
|
|
uint32_t quat_tdm_clk_count;
|
|
- struct sdw_stream_runtime *sruntime[SLIM_MAX_RX_PORTS];
|
|
+ struct sdw_stream_runtime *sruntime[AFE_PORT_MAX];
|
|
};
|
|
|
|
static unsigned int tdm_slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
|
|
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
|
|
index 1799fc56a3e41..012bac41fee0a 100644
|
|
--- a/sound/soc/sof/intel/hda-dsp.c
|
|
+++ b/sound/soc/sof/intel/hda-dsp.c
|
|
@@ -207,7 +207,7 @@ int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
|
|
|
|
ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
|
|
HDA_DSP_REG_ADSPCS, adspcs,
|
|
- !(adspcs & HDA_DSP_ADSPCS_SPA_MASK(core_mask)),
|
|
+ !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
|
|
HDA_DSP_REG_POLL_INTERVAL_US,
|
|
HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
|
|
if (ret < 0)
|
|
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
|
|
index 509a9b2564230..de6bc501f1b5f 100644
|
|
--- a/sound/soc/sof/intel/hda.c
|
|
+++ b/sound/soc/sof/intel/hda.c
|
|
@@ -896,6 +896,7 @@ free_streams:
|
|
/* dsp_unmap: not currently used */
|
|
iounmap(sdev->bar[HDA_DSP_BAR]);
|
|
hdac_bus_unmap:
|
|
+ platform_device_unregister(hdev->dmic_dev);
|
|
iounmap(bus->remap_addr);
|
|
hda_codec_i915_exit(sdev);
|
|
err:
|
|
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
|
|
index 448de77f43fd8..5171b3dc1eb9e 100644
|
|
--- a/sound/usb/mixer_quirks.c
|
|
+++ b/sound/usb/mixer_quirks.c
|
|
@@ -2883,7 +2883,7 @@ static int snd_djm_controls_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_v
|
|
u8 group = (private_value & SND_DJM_GROUP_MASK) >> SND_DJM_GROUP_SHIFT;
|
|
u16 value = elem->value.enumerated.item[0];
|
|
|
|
- kctl->private_value = ((device << SND_DJM_DEVICE_SHIFT) |
|
|
+ kctl->private_value = (((unsigned long)device << SND_DJM_DEVICE_SHIFT) |
|
|
(group << SND_DJM_GROUP_SHIFT) |
|
|
value);
|
|
|
|
@@ -2921,7 +2921,7 @@ static int snd_djm_controls_create(struct usb_mixer_interface *mixer,
|
|
value = device->controls[i].default_value;
|
|
knew.name = device->controls[i].name;
|
|
knew.private_value = (
|
|
- (device_idx << SND_DJM_DEVICE_SHIFT) |
|
|
+ ((unsigned long)device_idx << SND_DJM_DEVICE_SHIFT) |
|
|
(i << SND_DJM_GROUP_SHIFT) |
|
|
value);
|
|
err = snd_djm_controls_update(mixer, device_idx, i, value);
|