mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-31 11:11:49 +00:00
2512 lines
84 KiB
Diff
2512 lines
84 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index 21e58bd54715..adfc88f00f07 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 5
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 20
|
|
+SUBLEVEL = 21
|
|
EXTRAVERSION =
|
|
NAME = Kleptomaniac Octopus
|
|
|
|
diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig
|
|
index 880bc2a5cada..7f7002dc2b21 100644
|
|
--- a/arch/arm/mach-npcm/Kconfig
|
|
+++ b/arch/arm/mach-npcm/Kconfig
|
|
@@ -11,7 +11,7 @@ config ARCH_NPCM7XX
|
|
depends on ARCH_MULTI_V7
|
|
select PINCTRL_NPCM7XX
|
|
select NPCM7XX_TIMER
|
|
- select ARCH_REQUIRE_GPIOLIB
|
|
+ select GPIOLIB
|
|
select CACHE_L2X0
|
|
select ARM_GIC
|
|
select HAVE_ARM_TWD if SMP
|
|
diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
|
|
index 62ab0d54ff71..335fff762451 100644
|
|
--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
|
|
+++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
|
|
@@ -161,10 +161,10 @@
|
|
bus-range = <0x0 0x1>;
|
|
reg = <0x0 0x40000000 0x0 0x10000000>;
|
|
ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>;
|
|
- interrupt-map = <0 0 0 1 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
|
|
- <0 0 0 2 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
|
|
- <0 0 0 3 &gic GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
|
|
- <0 0 0 4 &gic GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <0 0 0 2 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <0 0 0 3 &gic 0 0 GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <0 0 0 4 &gic 0 0 GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
|
|
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
|
|
msi-map = <0x0 &its 0x0 0x10000>;
|
|
iommu-map = <0x0 &smmu 0x0 0x10000>;
|
|
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
|
|
index d54586d5b031..fab013c5ee8c 100644
|
|
--- a/arch/arm64/kernel/process.c
|
|
+++ b/arch/arm64/kernel/process.c
|
|
@@ -466,6 +466,13 @@ static void ssbs_thread_switch(struct task_struct *next)
|
|
if (unlikely(next->flags & PF_KTHREAD))
|
|
return;
|
|
|
|
+ /*
|
|
+ * If all CPUs implement the SSBS extension, then we just need to
|
|
+ * context-switch the PSTATE field.
|
|
+ */
|
|
+ if (cpu_have_feature(cpu_feature(SSBS)))
|
|
+ return;
|
|
+
|
|
/* If the mitigation is enabled, then we leave SSBS clear. */
|
|
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
|
|
test_tsk_thread_flag(next, TIF_SSBD))
|
|
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
|
|
index ed007f4a6444..3f501159ee9f 100644
|
|
--- a/arch/s390/boot/uv.c
|
|
+++ b/arch/s390/boot/uv.c
|
|
@@ -15,7 +15,8 @@ void uv_query_info(void)
|
|
if (!test_facility(158))
|
|
return;
|
|
|
|
- if (uv_call(0, (uint64_t)&uvcb))
|
|
+ /* rc==0x100 means that there is additional data we do not process */
|
|
+ if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100)
|
|
return;
|
|
|
|
if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
|
|
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
|
|
index 2dc9eb4e1acc..b6a4ce9dafaf 100644
|
|
--- a/arch/s390/include/asm/timex.h
|
|
+++ b/arch/s390/include/asm/timex.h
|
|
@@ -155,7 +155,7 @@ static inline void get_tod_clock_ext(char *clk)
|
|
|
|
static inline unsigned long long get_tod_clock(void)
|
|
{
|
|
- unsigned char clk[STORE_CLOCK_EXT_SIZE];
|
|
+ char clk[STORE_CLOCK_EXT_SIZE];
|
|
|
|
get_tod_clock_ext(clk);
|
|
return *((unsigned long long *)&clk[1]);
|
|
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
|
|
index 64c3e70b0556..beffafd7dcc3 100644
|
|
--- a/arch/x86/events/amd/core.c
|
|
+++ b/arch/x86/events/amd/core.c
|
|
@@ -246,6 +246,7 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
|
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
|
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
|
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
|
|
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
|
|
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
|
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
|
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
|
|
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
|
|
index ce83950036c5..e5ad97a82342 100644
|
|
--- a/arch/x86/events/intel/ds.c
|
|
+++ b/arch/x86/events/intel/ds.c
|
|
@@ -1713,6 +1713,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
|
|
old = ((s64)(prev_raw_count << shift) >> shift);
|
|
local64_add(new - old + count * period, &event->count);
|
|
|
|
+ local64_set(&hwc->period_left, -new);
|
|
+
|
|
perf_event_update_userpage(event);
|
|
|
|
return 0;
|
|
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
|
|
index c1d7b866a03f..4e3f137ffa8c 100644
|
|
--- a/arch/x86/kvm/paging_tmpl.h
|
|
+++ b/arch/x86/kvm/paging_tmpl.h
|
|
@@ -33,7 +33,7 @@
|
|
#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
|
|
#define PT_HAVE_ACCESSED_DIRTY(mmu) true
|
|
#ifdef CONFIG_X86_64
|
|
- #define PT_MAX_FULL_LEVELS 4
|
|
+ #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
|
|
#define CMPXCHG cmpxchg
|
|
#else
|
|
#define CMPXCHG cmpxchg64
|
|
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
|
|
index dc7c166c4335..84b57b461ad6 100644
|
|
--- a/arch/x86/kvm/vmx/vmx.c
|
|
+++ b/arch/x86/kvm/vmx/vmx.c
|
|
@@ -2975,6 +2975,9 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|
|
|
static int get_ept_level(struct kvm_vcpu *vcpu)
|
|
{
|
|
+ /* Nested EPT currently only supports 4-level walks. */
|
|
+ if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)))
|
|
+ return 4;
|
|
if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
|
|
return 5;
|
|
return 4;
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index edde5ee8c6f5..95180d67d570 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -445,6 +445,14 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
|
|
* for #DB exceptions under VMX.
|
|
*/
|
|
vcpu->arch.dr6 ^= payload & DR6_RTM;
|
|
+
|
|
+ /*
|
|
+ * The #DB payload is defined as compatible with the 'pending
|
|
+ * debug exceptions' field under VMX, not DR6. While bit 12 is
|
|
+ * defined in the 'pending debug exceptions' field (enabled
|
|
+ * breakpoint), it is reserved and must be zero in DR6.
|
|
+ */
|
|
+ vcpu->arch.dr6 &= ~BIT(12);
|
|
break;
|
|
case PF_VECTOR:
|
|
vcpu->arch.cr2 = payload;
|
|
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
|
|
index bcf8f7501db7..a74c1a0e892d 100644
|
|
--- a/drivers/acpi/acpica/achware.h
|
|
+++ b/drivers/acpi/acpica/achware.h
|
|
@@ -101,6 +101,8 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
|
|
|
|
acpi_status acpi_hw_enable_all_wakeup_gpes(void);
|
|
|
|
+u8 acpi_hw_check_all_gpes(void);
|
|
+
|
|
acpi_status
|
|
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|
struct acpi_gpe_block_info *gpe_block,
|
|
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
|
|
index 04a40d563dd6..84b0b410310e 100644
|
|
--- a/drivers/acpi/acpica/evxfgpe.c
|
|
+++ b/drivers/acpi/acpica/evxfgpe.c
|
|
@@ -795,6 +795,38 @@ acpi_status acpi_enable_all_wakeup_gpes(void)
|
|
|
|
ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
|
|
|
|
+/******************************************************************************
|
|
+ *
|
|
+ * FUNCTION: acpi_any_gpe_status_set
|
|
+ *
|
|
+ * PARAMETERS: None
|
|
+ *
|
|
+ * RETURN: Whether or not the status bit is set for any GPE
|
|
+ *
|
|
+ * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any
|
|
+ * of them is set or FALSE otherwise.
|
|
+ *
|
|
+ ******************************************************************************/
|
|
+u32 acpi_any_gpe_status_set(void)
|
|
+{
|
|
+ acpi_status status;
|
|
+ u8 ret;
|
|
+
|
|
+ ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
|
|
+
|
|
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
|
+ if (ACPI_FAILURE(status)) {
|
|
+ return (FALSE);
|
|
+ }
|
|
+
|
|
+ ret = acpi_hw_check_all_gpes();
|
|
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
|
+
|
|
+ return (ret);
|
|
+}
|
|
+
|
|
+ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set)
|
|
+
|
|
/*******************************************************************************
|
|
*
|
|
* FUNCTION: acpi_install_gpe_block
|
|
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
|
|
index 565bd3f29f31..b1d7d5f92495 100644
|
|
--- a/drivers/acpi/acpica/hwgpe.c
|
|
+++ b/drivers/acpi/acpica/hwgpe.c
|
|
@@ -444,6 +444,53 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|
return (AE_OK);
|
|
}
|
|
|
|
+/******************************************************************************
|
|
+ *
|
|
+ * FUNCTION: acpi_hw_get_gpe_block_status
|
|
+ *
|
|
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
|
|
+ * gpe_block - Gpe Block info
|
|
+ *
|
|
+ * RETURN: Success
|
|
+ *
|
|
+ * DESCRIPTION: Produce a combined GPE status bits mask for the given block.
|
|
+ *
|
|
+ ******************************************************************************/
|
|
+
|
|
+static acpi_status
|
|
+acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|
+ struct acpi_gpe_block_info *gpe_block,
|
|
+ void *ret_ptr)
|
|
+{
|
|
+ struct acpi_gpe_register_info *gpe_register_info;
|
|
+ u64 in_enable, in_status;
|
|
+ acpi_status status;
|
|
+ u8 *ret = ret_ptr;
|
|
+ u32 i;
|
|
+
|
|
+ /* Examine each GPE Register within the block */
|
|
+
|
|
+ for (i = 0; i < gpe_block->register_count; i++) {
|
|
+ gpe_register_info = &gpe_block->register_info[i];
|
|
+
|
|
+ status = acpi_hw_read(&in_enable,
|
|
+ &gpe_register_info->enable_address);
|
|
+ if (ACPI_FAILURE(status)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ status = acpi_hw_read(&in_status,
|
|
+ &gpe_register_info->status_address);
|
|
+ if (ACPI_FAILURE(status)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ *ret |= in_enable & in_status;
|
|
+ }
|
|
+
|
|
+ return (AE_OK);
|
|
+}
|
|
+
|
|
/******************************************************************************
|
|
*
|
|
* FUNCTION: acpi_hw_disable_all_gpes
|
|
@@ -510,4 +557,28 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
|
|
return_ACPI_STATUS(status);
|
|
}
|
|
|
|
+/******************************************************************************
|
|
+ *
|
|
+ * FUNCTION: acpi_hw_check_all_gpes
|
|
+ *
|
|
+ * PARAMETERS: None
|
|
+ *
|
|
+ * RETURN: Combined status of all GPEs
|
|
+ *
|
|
+ * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the
|
|
+ * status bit is set for at least one of them of FALSE otherwise.
|
|
+ *
|
|
+ ******************************************************************************/
|
|
+
|
|
+u8 acpi_hw_check_all_gpes(void)
|
|
+{
|
|
+ u8 ret = 0;
|
|
+
|
|
+ ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
|
|
+
|
|
+ (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret);
|
|
+
|
|
+ return (ret != 0);
|
|
+}
|
|
+
|
|
#endif /* !ACPI_REDUCED_HARDWARE */
|
|
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
|
|
index bd75caff8322..ca5cdb621c2a 100644
|
|
--- a/drivers/acpi/ec.c
|
|
+++ b/drivers/acpi/ec.c
|
|
@@ -179,6 +179,7 @@ EXPORT_SYMBOL(first_ec);
|
|
|
|
static struct acpi_ec *boot_ec;
|
|
static bool boot_ec_is_ecdt = false;
|
|
+static struct workqueue_struct *ec_wq;
|
|
static struct workqueue_struct *ec_query_wq;
|
|
|
|
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
|
|
@@ -461,7 +462,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec)
|
|
ec_dbg_evt("Command(%s) submitted/blocked",
|
|
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
|
|
ec->nr_pending_queries++;
|
|
- schedule_work(&ec->work);
|
|
+ queue_work(ec_wq, &ec->work);
|
|
}
|
|
}
|
|
|
|
@@ -527,7 +528,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
|
|
#ifdef CONFIG_PM_SLEEP
|
|
static void __acpi_ec_flush_work(void)
|
|
{
|
|
- flush_scheduled_work(); /* flush ec->work */
|
|
+ drain_workqueue(ec_wq); /* flush ec->work */
|
|
flush_workqueue(ec_query_wq); /* flush queries */
|
|
}
|
|
|
|
@@ -548,8 +549,8 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
|
|
|
|
void acpi_ec_flush_work(void)
|
|
{
|
|
- /* Without ec_query_wq there is nothing to flush. */
|
|
- if (!ec_query_wq)
|
|
+ /* Without ec_wq there is nothing to flush. */
|
|
+ if (!ec_wq)
|
|
return;
|
|
|
|
__acpi_ec_flush_work();
|
|
@@ -2032,25 +2033,33 @@ static struct acpi_driver acpi_ec_driver = {
|
|
.drv.pm = &acpi_ec_pm,
|
|
};
|
|
|
|
-static inline int acpi_ec_query_init(void)
|
|
+static void acpi_ec_destroy_workqueues(void)
|
|
{
|
|
- if (!ec_query_wq) {
|
|
- ec_query_wq = alloc_workqueue("kec_query", 0,
|
|
- ec_max_queries);
|
|
- if (!ec_query_wq)
|
|
- return -ENODEV;
|
|
+ if (ec_wq) {
|
|
+ destroy_workqueue(ec_wq);
|
|
+ ec_wq = NULL;
|
|
}
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static inline void acpi_ec_query_exit(void)
|
|
-{
|
|
if (ec_query_wq) {
|
|
destroy_workqueue(ec_query_wq);
|
|
ec_query_wq = NULL;
|
|
}
|
|
}
|
|
|
|
+static int acpi_ec_init_workqueues(void)
|
|
+{
|
|
+ if (!ec_wq)
|
|
+ ec_wq = alloc_ordered_workqueue("kec", 0);
|
|
+
|
|
+ if (!ec_query_wq)
|
|
+ ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
|
|
+
|
|
+ if (!ec_wq || !ec_query_wq) {
|
|
+ acpi_ec_destroy_workqueues();
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static const struct dmi_system_id acpi_ec_no_wakeup[] = {
|
|
{
|
|
.ident = "Thinkpad X1 Carbon 6th",
|
|
@@ -2081,8 +2090,7 @@ int __init acpi_ec_init(void)
|
|
int result;
|
|
int ecdt_fail, dsdt_fail;
|
|
|
|
- /* register workqueue for _Qxx evaluations */
|
|
- result = acpi_ec_query_init();
|
|
+ result = acpi_ec_init_workqueues();
|
|
if (result)
|
|
return result;
|
|
|
|
@@ -2113,6 +2121,6 @@ static void __exit acpi_ec_exit(void)
|
|
{
|
|
|
|
acpi_bus_unregister_driver(&acpi_ec_driver);
|
|
- acpi_ec_query_exit();
|
|
+ acpi_ec_destroy_workqueues();
|
|
}
|
|
#endif /* 0 */
|
|
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
|
|
index 2af937a8b1c5..62348ec2a807 100644
|
|
--- a/drivers/acpi/sleep.c
|
|
+++ b/drivers/acpi/sleep.c
|
|
@@ -977,21 +977,34 @@ static int acpi_s2idle_prepare_late(void)
|
|
return 0;
|
|
}
|
|
|
|
-static void acpi_s2idle_wake(void)
|
|
+static bool acpi_s2idle_wake(void)
|
|
{
|
|
- /*
|
|
- * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the SCI has
|
|
- * not triggered while suspended, so bail out.
|
|
- */
|
|
- if (!acpi_sci_irq_valid() ||
|
|
- irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
|
|
- return;
|
|
+ if (!acpi_sci_irq_valid())
|
|
+ return pm_wakeup_pending();
|
|
+
|
|
+ while (pm_wakeup_pending()) {
|
|
+ /*
|
|
+ * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
|
|
+ * SCI has not triggered while suspended, so bail out (the
|
|
+ * wakeup is pending anyway and the SCI is not the source of
|
|
+ * it).
|
|
+ */
|
|
+ if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
|
|
+ return true;
|
|
+
|
|
+ /*
|
|
+ * If there are no EC events to process and at least one of the
|
|
+ * other enabled GPEs is active, the wakeup is regarded as a
|
|
+ * genuine one.
|
|
+ *
|
|
+ * Note that the checks below must be carried out in this order
|
|
+ * to avoid returning prematurely due to a change of the EC GPE
|
|
+ * status bit from unset to set between the checks with the
|
|
+ * status bits of all the other GPEs unset.
|
|
+ */
|
|
+ if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe())
|
|
+ return true;
|
|
|
|
- /*
|
|
- * If there are EC events to process, the wakeup may be a spurious one
|
|
- * coming from the EC.
|
|
- */
|
|
- if (acpi_ec_dispatch_gpe()) {
|
|
/*
|
|
* Cancel the wakeup and process all pending events in case
|
|
* there are any wakeup ones in there.
|
|
@@ -1009,8 +1022,19 @@ static void acpi_s2idle_wake(void)
|
|
acpi_ec_flush_work();
|
|
acpi_os_wait_events_complete(); /* synchronize Notify handling */
|
|
|
|
+ /*
|
|
+ * The SCI is in the "suspended" state now and it cannot produce
|
|
+ * new wakeup events till the rearming below, so if any of them
|
|
+ * are pending here, they must be resulting from the processing
|
|
+ * of EC events above or coming from somewhere else.
|
|
+ */
|
|
+ if (pm_wakeup_pending())
|
|
+ return true;
|
|
+
|
|
rearm_wake_irq(acpi_sci_irq);
|
|
}
|
|
+
|
|
+ return false;
|
|
}
|
|
|
|
static void acpi_s2idle_restore_early(void)
|
|
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
|
|
index 36cf13eee6b8..68413bf9cf87 100644
|
|
--- a/drivers/bus/moxtet.c
|
|
+++ b/drivers/bus/moxtet.c
|
|
@@ -466,7 +466,7 @@ static ssize_t input_read(struct file *file, char __user *buf, size_t len,
|
|
{
|
|
struct moxtet *moxtet = file->private_data;
|
|
u8 bin[TURRIS_MOX_MAX_MODULES];
|
|
- u8 hex[sizeof(buf) * 2 + 1];
|
|
+ u8 hex[sizeof(bin) * 2 + 1];
|
|
int ret, n;
|
|
|
|
ret = moxtet_spi_read(moxtet, bin);
|
|
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
|
|
index 285e0b8f9a97..09e3e25562a8 100644
|
|
--- a/drivers/char/ipmi/ipmb_dev_int.c
|
|
+++ b/drivers/char/ipmi/ipmb_dev_int.c
|
|
@@ -265,7 +265,7 @@ static int ipmb_slave_cb(struct i2c_client *client,
|
|
break;
|
|
|
|
case I2C_SLAVE_WRITE_RECEIVED:
|
|
- if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg))
|
|
+ if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1)
|
|
break;
|
|
|
|
buf[++ipmb_dev->msg_idx] = *val;
|
|
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
|
|
index e6fd079783bd..e73ca303f1a7 100644
|
|
--- a/drivers/edac/edac_mc.c
|
|
+++ b/drivers/edac/edac_mc.c
|
|
@@ -503,16 +503,10 @@ void edac_mc_free(struct mem_ctl_info *mci)
|
|
{
|
|
edac_dbg(1, "\n");
|
|
|
|
- /* If we're not yet registered with sysfs free only what was allocated
|
|
- * in edac_mc_alloc().
|
|
- */
|
|
- if (!device_is_registered(&mci->dev)) {
|
|
- _edac_mc_free(mci);
|
|
- return;
|
|
- }
|
|
+ if (device_is_registered(&mci->dev))
|
|
+ edac_unregister_sysfs(mci);
|
|
|
|
- /* the mci instance is freed here, when the sysfs object is dropped */
|
|
- edac_unregister_sysfs(mci);
|
|
+ _edac_mc_free(mci);
|
|
}
|
|
EXPORT_SYMBOL_GPL(edac_mc_free);
|
|
|
|
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
|
|
index 32d016f1ecd1..0287884ae28c 100644
|
|
--- a/drivers/edac/edac_mc_sysfs.c
|
|
+++ b/drivers/edac/edac_mc_sysfs.c
|
|
@@ -276,10 +276,7 @@ static const struct attribute_group *csrow_attr_groups[] = {
|
|
|
|
static void csrow_attr_release(struct device *dev)
|
|
{
|
|
- struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
|
|
-
|
|
- edac_dbg(1, "device %s released\n", dev_name(dev));
|
|
- kfree(csrow);
|
|
+ /* release device with _edac_mc_free() */
|
|
}
|
|
|
|
static const struct device_type csrow_attr_type = {
|
|
@@ -447,8 +444,7 @@ error:
|
|
csrow = mci->csrows[i];
|
|
if (!nr_pages_per_csrow(csrow))
|
|
continue;
|
|
-
|
|
- device_del(&mci->csrows[i]->dev);
|
|
+ device_unregister(&mci->csrows[i]->dev);
|
|
}
|
|
|
|
return err;
|
|
@@ -620,10 +616,7 @@ static const struct attribute_group *dimm_attr_groups[] = {
|
|
|
|
static void dimm_attr_release(struct device *dev)
|
|
{
|
|
- struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
|
|
-
|
|
- edac_dbg(1, "device %s released\n", dev_name(dev));
|
|
- kfree(dimm);
|
|
+ /* release device with _edac_mc_free() */
|
|
}
|
|
|
|
static const struct device_type dimm_attr_type = {
|
|
@@ -906,10 +899,7 @@ static const struct attribute_group *mci_attr_groups[] = {
|
|
|
|
static void mci_attr_release(struct device *dev)
|
|
{
|
|
- struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
|
|
-
|
|
- edac_dbg(1, "device %s released\n", dev_name(dev));
|
|
- kfree(mci);
|
|
+ /* release device with _edac_mc_free() */
|
|
}
|
|
|
|
static const struct device_type mci_attr_type = {
|
|
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
|
|
index a9748b5198e6..67f9f82e0db0 100644
|
|
--- a/drivers/gpio/gpio-xilinx.c
|
|
+++ b/drivers/gpio/gpio-xilinx.c
|
|
@@ -147,9 +147,10 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
|
|
for (i = 0; i < gc->ngpio; i++) {
|
|
if (*mask == 0)
|
|
break;
|
|
+ /* Once finished with an index write it out to the register */
|
|
if (index != xgpio_index(chip, i)) {
|
|
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
|
|
- xgpio_regoffset(chip, i),
|
|
+ index * XGPIO_CHANNEL_OFFSET,
|
|
chip->gpio_state[index]);
|
|
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
|
|
index = xgpio_index(chip, i);
|
|
@@ -165,7 +166,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
|
|
}
|
|
|
|
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
|
|
- xgpio_regoffset(chip, i), chip->gpio_state[index]);
|
|
+ index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
|
|
|
|
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
|
|
}
|
|
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
|
|
index 7ee5b7f53aeb..3ece59185d37 100644
|
|
--- a/drivers/gpio/gpiolib-of.c
|
|
+++ b/drivers/gpio/gpiolib-of.c
|
|
@@ -146,10 +146,6 @@ static void of_gpio_flags_quirks(struct device_node *np,
|
|
if (of_property_read_bool(np, "cd-inverted"))
|
|
*flags ^= OF_GPIO_ACTIVE_LOW;
|
|
}
|
|
- if (!strcmp(propname, "wp-gpios")) {
|
|
- if (of_property_read_bool(np, "wp-inverted"))
|
|
- *flags ^= OF_GPIO_ACTIVE_LOW;
|
|
- }
|
|
}
|
|
/*
|
|
* Some GPIO fixed regulator quirks.
|
|
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
|
|
index 2476306e7030..22506e4614b3 100644
|
|
--- a/drivers/gpio/gpiolib.c
|
|
+++ b/drivers/gpio/gpiolib.c
|
|
@@ -3220,6 +3220,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc)
|
|
}
|
|
EXPORT_SYMBOL_GPL(gpiod_is_active_low);
|
|
|
|
+/**
|
|
+ * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not
|
|
+ * @desc: the gpio descriptor to change
|
|
+ */
|
|
+void gpiod_toggle_active_low(struct gpio_desc *desc)
|
|
+{
|
|
+ VALIDATE_DESC_VOID(desc);
|
|
+ change_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
|
|
+
|
|
/* I/O calls are only valid after configuration completed; the relevant
|
|
* "is this a valid GPIO" error checks should already have been done.
|
|
*
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
|
|
index 5906c80c4b2c..f57dd195dfb8 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
|
|
@@ -166,6 +166,7 @@ panfrost_lookup_bos(struct drm_device *dev,
|
|
break;
|
|
}
|
|
|
|
+ atomic_inc(&bo->gpu_usecount);
|
|
job->mappings[i] = mapping;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
|
|
index ca1bc9019600..b3517ff9630c 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
|
|
@@ -30,6 +30,12 @@ struct panfrost_gem_object {
|
|
struct mutex lock;
|
|
} mappings;
|
|
|
|
+ /*
|
|
+ * Count the number of jobs referencing this BO so we don't let the
|
|
+ * shrinker reclaim this object prematurely.
|
|
+ */
|
|
+ atomic_t gpu_usecount;
|
|
+
|
|
bool noexec :1;
|
|
bool is_heap :1;
|
|
};
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
|
|
index f5dd7b29bc95..288e46c40673 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
|
|
@@ -41,6 +41,9 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
|
|
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
|
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
|
|
|
|
+ if (atomic_read(&bo->gpu_usecount))
|
|
+ return false;
|
|
+
|
|
if (!mutex_trylock(&shmem->pages_lock))
|
|
return false;
|
|
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
|
|
index bbb0c5e3ca6f..9f770d454684 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
|
|
@@ -270,8 +270,13 @@ static void panfrost_job_cleanup(struct kref *ref)
|
|
dma_fence_put(job->render_done_fence);
|
|
|
|
if (job->mappings) {
|
|
- for (i = 0; i < job->bo_count; i++)
|
|
+ for (i = 0; i < job->bo_count; i++) {
|
|
+ if (!job->mappings[i])
|
|
+ break;
|
|
+
|
|
+ atomic_dec(&job->mappings[i]->obj->gpu_usecount);
|
|
panfrost_gem_mapping_put(job->mappings[i]);
|
|
+ }
|
|
kvfree(job->mappings);
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
|
|
index a5757b11b730..5b54eff12cc0 100644
|
|
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
|
|
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
|
|
@@ -85,7 +85,6 @@ static int sun4i_drv_bind(struct device *dev)
|
|
}
|
|
|
|
drm_mode_config_init(drm);
|
|
- drm->mode_config.allow_fb_modifiers = true;
|
|
|
|
ret = component_bind_all(drm->dev, drm);
|
|
if (ret) {
|
|
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
|
|
index 5bd60ded3d81..909eba43664a 100644
|
|
--- a/drivers/gpu/drm/vgem/vgem_drv.c
|
|
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
|
|
@@ -196,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
|
|
return ERR_CAST(obj);
|
|
|
|
ret = drm_gem_handle_create(file, &obj->base, handle);
|
|
- drm_gem_object_put_unlocked(&obj->base);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ drm_gem_object_put_unlocked(&obj->base);
|
|
return ERR_PTR(ret);
|
|
+ }
|
|
|
|
return &obj->base;
|
|
}
|
|
@@ -221,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
|
args->size = gem_object->size;
|
|
args->pitch = pitch;
|
|
|
|
- DRM_DEBUG("Created object of size %lld\n", size);
|
|
+ drm_gem_object_put_unlocked(gem_object);
|
|
+
|
|
+ DRM_DEBUG("Created object of size %llu\n", args->size);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
|
|
index f01f4887fb2e..a91ed01abb68 100644
|
|
--- a/drivers/hwmon/pmbus/ltc2978.c
|
|
+++ b/drivers/hwmon/pmbus/ltc2978.c
|
|
@@ -82,8 +82,8 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
|
|
|
|
#define LTC_POLL_TIMEOUT 100 /* in milli-seconds */
|
|
|
|
-#define LTC_NOT_BUSY BIT(5)
|
|
-#define LTC_NOT_PENDING BIT(4)
|
|
+#define LTC_NOT_BUSY BIT(6)
|
|
+#define LTC_NOT_PENDING BIT(5)
|
|
|
|
/*
|
|
* LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which
|
|
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
|
|
index 6eb6d2717ca5..2b4d80393bd0 100644
|
|
--- a/drivers/infiniband/core/security.c
|
|
+++ b/drivers/infiniband/core/security.c
|
|
@@ -339,22 +339,16 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
|
|
if (!new_pps)
|
|
return NULL;
|
|
|
|
- if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
|
|
- if (!qp_pps) {
|
|
- new_pps->main.port_num = qp_attr->port_num;
|
|
- new_pps->main.pkey_index = qp_attr->pkey_index;
|
|
- } else {
|
|
- new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
|
|
- qp_attr->port_num :
|
|
- qp_pps->main.port_num;
|
|
-
|
|
- new_pps->main.pkey_index =
|
|
- (qp_attr_mask & IB_QP_PKEY_INDEX) ?
|
|
- qp_attr->pkey_index :
|
|
- qp_pps->main.pkey_index;
|
|
- }
|
|
+ if (qp_attr_mask & IB_QP_PORT)
|
|
+ new_pps->main.port_num =
|
|
+ (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
|
|
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
|
|
+ new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
|
|
+ qp_attr->pkey_index;
|
|
+ if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
|
|
new_pps->main.state = IB_PORT_PKEY_VALID;
|
|
- } else if (qp_pps) {
|
|
+
|
|
+ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
|
|
new_pps->main.port_num = qp_pps->main.port_num;
|
|
new_pps->main.pkey_index = qp_pps->main.pkey_index;
|
|
if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
|
|
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
|
|
index d1407fa378e8..1235ffb2389b 100644
|
|
--- a/drivers/infiniband/core/user_mad.c
|
|
+++ b/drivers/infiniband/core/user_mad.c
|
|
@@ -1312,6 +1312,9 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
|
|
struct ib_umad_file *file;
|
|
int id;
|
|
|
|
+ cdev_device_del(&port->sm_cdev, &port->sm_dev);
|
|
+ cdev_device_del(&port->cdev, &port->dev);
|
|
+
|
|
mutex_lock(&port->file_mutex);
|
|
|
|
/* Mark ib_dev NULL and block ioctl or other file ops to progress
|
|
@@ -1331,8 +1334,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
|
|
|
|
mutex_unlock(&port->file_mutex);
|
|
|
|
- cdev_device_del(&port->sm_cdev, &port->sm_dev);
|
|
- cdev_device_del(&port->cdev, &port->dev);
|
|
ida_free(&umad_ida, port->dev_num);
|
|
|
|
/* balances device_initialize() */
|
|
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
|
|
index 14a80fd9f464..300353c1e5f1 100644
|
|
--- a/drivers/infiniband/core/uverbs_cmd.c
|
|
+++ b/drivers/infiniband/core/uverbs_cmd.c
|
|
@@ -2718,12 +2718,6 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
|
|
return 0;
|
|
}
|
|
|
|
-static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
|
|
-{
|
|
- /* Returns user space filter size, includes padding */
|
|
- return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
|
|
-}
|
|
-
|
|
static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
|
|
u16 ib_real_filter_sz)
|
|
{
|
|
@@ -2867,11 +2861,16 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
|
|
static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
|
|
union ib_flow_spec *ib_spec)
|
|
{
|
|
- ssize_t kern_filter_sz;
|
|
+ size_t kern_filter_sz;
|
|
void *kern_spec_mask;
|
|
void *kern_spec_val;
|
|
|
|
- kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
|
|
+ if (check_sub_overflow((size_t)kern_spec->hdr.size,
|
|
+ sizeof(struct ib_uverbs_flow_spec_hdr),
|
|
+ &kern_filter_sz))
|
|
+ return -EINVAL;
|
|
+
|
|
+ kern_filter_sz /= 2;
|
|
|
|
kern_spec_val = (void *)kern_spec +
|
|
sizeof(struct ib_uverbs_flow_spec_hdr);
|
|
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
|
|
index 347dc242fb88..d82e0589cfd2 100644
|
|
--- a/drivers/infiniband/hw/cxgb4/cm.c
|
|
+++ b/drivers/infiniband/hw/cxgb4/cm.c
|
|
@@ -3036,6 +3036,10 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
|
|
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
|
|
}
|
|
|
|
+ /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3,
|
|
+ * when entering the TERM state the RNIC MUST initiate a CLOSE.
|
|
+ */
|
|
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
|
|
c4iw_put_ep(&ep->com);
|
|
} else
|
|
pr_warn("TERM received tid %u no ep/qp\n", tid);
|
|
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
|
|
index bbcac539777a..89ac2f9ae6dd 100644
|
|
--- a/drivers/infiniband/hw/cxgb4/qp.c
|
|
+++ b/drivers/infiniband/hw/cxgb4/qp.c
|
|
@@ -1948,10 +1948,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
|
qhp->attr.layer_etype = attrs->layer_etype;
|
|
qhp->attr.ecode = attrs->ecode;
|
|
ep = qhp->ep;
|
|
- c4iw_get_ep(&ep->com);
|
|
- disconnect = 1;
|
|
if (!internal) {
|
|
+ c4iw_get_ep(&ep->com);
|
|
terminate = 1;
|
|
+ disconnect = 1;
|
|
} else {
|
|
terminate = qhp->attr.send_term;
|
|
ret = rdma_fini(rhp, qhp, ep);
|
|
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
|
|
index c142b23bb401..1aeea5d65c01 100644
|
|
--- a/drivers/infiniband/hw/hfi1/affinity.c
|
|
+++ b/drivers/infiniband/hw/hfi1/affinity.c
|
|
@@ -479,6 +479,8 @@ static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
|
|
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
|
|
}
|
|
|
|
+ free_cpumask_var(available_cpus);
|
|
+ free_cpumask_var(non_intr_cpus);
|
|
return 0;
|
|
|
|
fail:
|
|
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
|
|
index f9a7e9d29c8b..89e1dfd07a1b 100644
|
|
--- a/drivers/infiniband/hw/hfi1/file_ops.c
|
|
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
|
|
@@ -200,23 +200,24 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
|
|
|
|
fd = kzalloc(sizeof(*fd), GFP_KERNEL);
|
|
|
|
- if (fd) {
|
|
- fd->rec_cpu_num = -1; /* no cpu affinity by default */
|
|
- fd->mm = current->mm;
|
|
- mmgrab(fd->mm);
|
|
- fd->dd = dd;
|
|
- kobject_get(&fd->dd->kobj);
|
|
- fp->private_data = fd;
|
|
- } else {
|
|
- fp->private_data = NULL;
|
|
-
|
|
- if (atomic_dec_and_test(&dd->user_refcount))
|
|
- complete(&dd->user_comp);
|
|
-
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
+ if (!fd || init_srcu_struct(&fd->pq_srcu))
|
|
+ goto nomem;
|
|
+ spin_lock_init(&fd->pq_rcu_lock);
|
|
+ spin_lock_init(&fd->tid_lock);
|
|
+ spin_lock_init(&fd->invalid_lock);
|
|
+ fd->rec_cpu_num = -1; /* no cpu affinity by default */
|
|
+ fd->mm = current->mm;
|
|
+ mmgrab(fd->mm);
|
|
+ fd->dd = dd;
|
|
+ kobject_get(&fd->dd->kobj);
|
|
+ fp->private_data = fd;
|
|
return 0;
|
|
+nomem:
|
|
+ kfree(fd);
|
|
+ fp->private_data = NULL;
|
|
+ if (atomic_dec_and_test(&dd->user_refcount))
|
|
+ complete(&dd->user_comp);
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
|
|
@@ -301,21 +302,30 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
|
|
static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
|
|
{
|
|
struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
|
|
- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
|
|
+ struct hfi1_user_sdma_pkt_q *pq;
|
|
struct hfi1_user_sdma_comp_q *cq = fd->cq;
|
|
int done = 0, reqs = 0;
|
|
unsigned long dim = from->nr_segs;
|
|
+ int idx;
|
|
|
|
- if (!cq || !pq)
|
|
+ idx = srcu_read_lock(&fd->pq_srcu);
|
|
+ pq = srcu_dereference(fd->pq, &fd->pq_srcu);
|
|
+ if (!cq || !pq) {
|
|
+ srcu_read_unlock(&fd->pq_srcu, idx);
|
|
return -EIO;
|
|
+ }
|
|
|
|
- if (!iter_is_iovec(from) || !dim)
|
|
+ if (!iter_is_iovec(from) || !dim) {
|
|
+ srcu_read_unlock(&fd->pq_srcu, idx);
|
|
return -EINVAL;
|
|
+ }
|
|
|
|
trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
|
|
|
|
- if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
|
|
+ if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
|
|
+ srcu_read_unlock(&fd->pq_srcu, idx);
|
|
return -ENOSPC;
|
|
+ }
|
|
|
|
while (dim) {
|
|
int ret;
|
|
@@ -333,6 +343,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
|
|
reqs++;
|
|
}
|
|
|
|
+ srcu_read_unlock(&fd->pq_srcu, idx);
|
|
return reqs;
|
|
}
|
|
|
|
@@ -707,6 +718,7 @@ done:
|
|
if (atomic_dec_and_test(&dd->user_refcount))
|
|
complete(&dd->user_comp);
|
|
|
|
+ cleanup_srcu_struct(&fdata->pq_srcu);
|
|
kfree(fdata);
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
|
|
index fa45350a9a1d..1af94650bd84 100644
|
|
--- a/drivers/infiniband/hw/hfi1/hfi.h
|
|
+++ b/drivers/infiniband/hw/hfi1/hfi.h
|
|
@@ -1436,10 +1436,13 @@ struct mmu_rb_handler;
|
|
|
|
/* Private data for file operations */
|
|
struct hfi1_filedata {
|
|
+ struct srcu_struct pq_srcu;
|
|
struct hfi1_devdata *dd;
|
|
struct hfi1_ctxtdata *uctxt;
|
|
struct hfi1_user_sdma_comp_q *cq;
|
|
- struct hfi1_user_sdma_pkt_q *pq;
|
|
+ /* update side lock for SRCU */
|
|
+ spinlock_t pq_rcu_lock;
|
|
+ struct hfi1_user_sdma_pkt_q __rcu *pq;
|
|
u16 subctxt;
|
|
/* for cpu affinity; -1 if none */
|
|
int rec_cpu_num;
|
|
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
|
|
index 3592a9ec155e..4d732353379d 100644
|
|
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
|
|
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
|
|
@@ -90,9 +90,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
|
|
struct hfi1_devdata *dd = uctxt->dd;
|
|
int ret = 0;
|
|
|
|
- spin_lock_init(&fd->tid_lock);
|
|
- spin_lock_init(&fd->invalid_lock);
|
|
-
|
|
fd->entry_to_rb = kcalloc(uctxt->expected_count,
|
|
sizeof(struct rb_node *),
|
|
GFP_KERNEL);
|
|
@@ -165,10 +162,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
|
|
if (fd->handler) {
|
|
hfi1_mmu_rb_unregister(fd->handler);
|
|
} else {
|
|
+ mutex_lock(&uctxt->exp_mutex);
|
|
if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
|
|
unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
|
|
if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
|
|
unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
|
|
+ mutex_unlock(&uctxt->exp_mutex);
|
|
}
|
|
|
|
kfree(fd->invalid_tids);
|
|
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
|
|
index fd754a16475a..c2f0d9ba93de 100644
|
|
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
|
|
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
|
|
@@ -179,7 +179,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
|
|
pq = kzalloc(sizeof(*pq), GFP_KERNEL);
|
|
if (!pq)
|
|
return -ENOMEM;
|
|
-
|
|
pq->dd = dd;
|
|
pq->ctxt = uctxt->ctxt;
|
|
pq->subctxt = fd->subctxt;
|
|
@@ -236,7 +235,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
|
|
goto pq_mmu_fail;
|
|
}
|
|
|
|
- fd->pq = pq;
|
|
+ rcu_assign_pointer(fd->pq, pq);
|
|
fd->cq = cq;
|
|
|
|
return 0;
|
|
@@ -264,8 +263,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
|
|
|
|
trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
|
|
|
|
- pq = fd->pq;
|
|
+ spin_lock(&fd->pq_rcu_lock);
|
|
+ pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
|
|
+ lockdep_is_held(&fd->pq_rcu_lock));
|
|
if (pq) {
|
|
+ rcu_assign_pointer(fd->pq, NULL);
|
|
+ spin_unlock(&fd->pq_rcu_lock);
|
|
+ synchronize_srcu(&fd->pq_srcu);
|
|
+ /* at this point there can be no more new requests */
|
|
if (pq->handler)
|
|
hfi1_mmu_rb_unregister(pq->handler);
|
|
iowait_sdma_drain(&pq->busy);
|
|
@@ -277,7 +282,8 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
|
|
kfree(pq->req_in_use);
|
|
kmem_cache_destroy(pq->txreq_cache);
|
|
kfree(pq);
|
|
- fd->pq = NULL;
|
|
+ } else {
|
|
+ spin_unlock(&fd->pq_rcu_lock);
|
|
}
|
|
if (fd->cq) {
|
|
vfree(fd->cq->comps);
|
|
@@ -321,7 +327,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
|
{
|
|
int ret = 0, i;
|
|
struct hfi1_ctxtdata *uctxt = fd->uctxt;
|
|
- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
|
|
+ struct hfi1_user_sdma_pkt_q *pq =
|
|
+ srcu_dereference(fd->pq, &fd->pq_srcu);
|
|
struct hfi1_user_sdma_comp_q *cq = fd->cq;
|
|
struct hfi1_devdata *dd = pq->dd;
|
|
unsigned long idx = 0;
|
|
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
|
|
index 5fd071c05944..0865373bd12d 100644
|
|
--- a/drivers/infiniband/hw/mlx5/qp.c
|
|
+++ b/drivers/infiniband/hw/mlx5/qp.c
|
|
@@ -3391,9 +3391,6 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
|
|
struct mlx5_ib_qp_base *base;
|
|
u32 set_id;
|
|
|
|
- if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
|
|
- return 0;
|
|
-
|
|
if (counter)
|
|
set_id = counter->id;
|
|
else
|
|
@@ -6503,6 +6500,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
|
|
*/
|
|
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
|
|
{
|
|
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
|
|
struct mlx5_ib_qp *mqp = to_mqp(qp);
|
|
int err = 0;
|
|
|
|
@@ -6512,6 +6510,11 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
|
|
goto out;
|
|
}
|
|
|
|
+ if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
|
|
+ err = -EOPNOTSUPP;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
if (mqp->state == IB_QPS_RTS) {
|
|
err = __mlx5_ib_qp_set_counter(qp, counter);
|
|
if (!err)
|
|
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
|
|
index 0b0a241c57ff..799254a049ba 100644
|
|
--- a/drivers/infiniband/sw/rdmavt/qp.c
|
|
+++ b/drivers/infiniband/sw/rdmavt/qp.c
|
|
@@ -61,6 +61,8 @@
|
|
#define RVT_RWQ_COUNT_THRESHOLD 16
|
|
|
|
static void rvt_rc_timeout(struct timer_list *t);
|
|
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
|
+ enum ib_qp_type type);
|
|
|
|
/*
|
|
* Convert the AETH RNR timeout code into the number of microseconds.
|
|
@@ -452,40 +454,41 @@ no_qp_table:
|
|
}
|
|
|
|
/**
|
|
- * free_all_qps - check for QPs still in use
|
|
+ * rvt_free_qp_cb - callback function to reset a qp
|
|
+ * @qp: the qp to reset
|
|
+ * @v: a 64-bit value
|
|
+ *
|
|
+ * This function resets the qp and removes it from the
|
|
+ * qp hash table.
|
|
+ */
|
|
+static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
|
|
+{
|
|
+ unsigned int *qp_inuse = (unsigned int *)v;
|
|
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
|
|
+
|
|
+ /* Reset the qp and remove it from the qp hash list */
|
|
+ rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
|
|
+
|
|
+ /* Increment the qp_inuse count */
|
|
+ (*qp_inuse)++;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * rvt_free_all_qps - check for QPs still in use
|
|
* @rdi: rvt device info structure
|
|
*
|
|
* There should not be any QPs still in use.
|
|
* Free memory for table.
|
|
+ * Return the number of QPs still in use.
|
|
*/
|
|
static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
|
|
{
|
|
- unsigned long flags;
|
|
- struct rvt_qp *qp;
|
|
- unsigned n, qp_inuse = 0;
|
|
- spinlock_t *ql; /* work around too long line below */
|
|
-
|
|
- if (rdi->driver_f.free_all_qps)
|
|
- qp_inuse = rdi->driver_f.free_all_qps(rdi);
|
|
+ unsigned int qp_inuse = 0;
|
|
|
|
qp_inuse += rvt_mcast_tree_empty(rdi);
|
|
|
|
- if (!rdi->qp_dev)
|
|
- return qp_inuse;
|
|
+ rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
|
|
|
|
- ql = &rdi->qp_dev->qpt_lock;
|
|
- spin_lock_irqsave(ql, flags);
|
|
- for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
|
|
- qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
|
|
- lockdep_is_held(ql));
|
|
- RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
|
|
-
|
|
- for (; qp; qp = rcu_dereference_protected(qp->next,
|
|
- lockdep_is_held(ql)))
|
|
- qp_inuse++;
|
|
- }
|
|
- spin_unlock_irqrestore(ql, flags);
|
|
- synchronize_rcu();
|
|
return qp_inuse;
|
|
}
|
|
|
|
@@ -902,14 +905,14 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
|
}
|
|
|
|
/**
|
|
- * rvt_reset_qp - initialize the QP state to the reset state
|
|
+ * _rvt_reset_qp - initialize the QP state to the reset state
|
|
* @qp: the QP to reset
|
|
* @type: the QP type
|
|
*
|
|
* r_lock, s_hlock, and s_lock are required to be held by the caller
|
|
*/
|
|
-static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
|
- enum ib_qp_type type)
|
|
+static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
|
+ enum ib_qp_type type)
|
|
__must_hold(&qp->s_lock)
|
|
__must_hold(&qp->s_hlock)
|
|
__must_hold(&qp->r_lock)
|
|
@@ -955,6 +958,27 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
|
lockdep_assert_held(&qp->s_lock);
|
|
}
|
|
|
|
+/**
|
|
+ * rvt_reset_qp - initialize the QP state to the reset state
|
|
+ * @rdi: the device info
|
|
+ * @qp: the QP to reset
|
|
+ * @type: the QP type
|
|
+ *
|
|
+ * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
|
|
+ * before calling _rvt_reset_qp().
|
|
+ */
|
|
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
|
+ enum ib_qp_type type)
|
|
+{
|
|
+ spin_lock_irq(&qp->r_lock);
|
|
+ spin_lock(&qp->s_hlock);
|
|
+ spin_lock(&qp->s_lock);
|
|
+ _rvt_reset_qp(rdi, qp, type);
|
|
+ spin_unlock(&qp->s_lock);
|
|
+ spin_unlock(&qp->s_hlock);
|
|
+ spin_unlock_irq(&qp->r_lock);
|
|
+}
|
|
+
|
|
/** rvt_free_qpn - Free a qpn from the bit map
|
|
* @qpt: QP table
|
|
* @qpn: queue pair number to free
|
|
@@ -1546,7 +1570,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|
switch (new_state) {
|
|
case IB_QPS_RESET:
|
|
if (qp->state != IB_QPS_RESET)
|
|
- rvt_reset_qp(rdi, qp, ibqp->qp_type);
|
|
+ _rvt_reset_qp(rdi, qp, ibqp->qp_type);
|
|
break;
|
|
|
|
case IB_QPS_RTR:
|
|
@@ -1695,13 +1719,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
|
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
|
|
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
|
|
|
|
- spin_lock_irq(&qp->r_lock);
|
|
- spin_lock(&qp->s_hlock);
|
|
- spin_lock(&qp->s_lock);
|
|
rvt_reset_qp(rdi, qp, ibqp->qp_type);
|
|
- spin_unlock(&qp->s_lock);
|
|
- spin_unlock(&qp->s_hlock);
|
|
- spin_unlock_irq(&qp->r_lock);
|
|
|
|
wait_event(qp->wait, !atomic_read(&qp->refcount));
|
|
/* qpn is now available for use again */
|
|
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
|
|
index 116cafc9afcf..4bc88708b355 100644
|
|
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
|
|
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
|
|
@@ -329,7 +329,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
|
|
qp->comp.psn = pkt->psn;
|
|
if (qp->req.wait_psn) {
|
|
qp->req.wait_psn = 0;
|
|
- rxe_run_task(&qp->req.task, 1);
|
|
+ rxe_run_task(&qp->req.task, 0);
|
|
}
|
|
}
|
|
return COMPST_ERROR_RETRY;
|
|
@@ -463,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
|
*/
|
|
if (qp->req.wait_fence) {
|
|
qp->req.wait_fence = 0;
|
|
- rxe_run_task(&qp->req.task, 1);
|
|
+ rxe_run_task(&qp->req.task, 0);
|
|
}
|
|
}
|
|
|
|
@@ -479,7 +479,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
|
|
if (qp->req.need_rd_atomic) {
|
|
qp->comp.timeout_retry = 0;
|
|
qp->req.need_rd_atomic = 0;
|
|
- rxe_run_task(&qp->req.task, 1);
|
|
+ rxe_run_task(&qp->req.task, 0);
|
|
}
|
|
}
|
|
|
|
@@ -725,7 +725,7 @@ int rxe_completer(void *arg)
|
|
RXE_CNT_COMP_RETRY);
|
|
qp->req.need_retry = 1;
|
|
qp->comp.started_retry = 1;
|
|
- rxe_run_task(&qp->req.task, 1);
|
|
+ rxe_run_task(&qp->req.task, 0);
|
|
}
|
|
|
|
if (pkt) {
|
|
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
|
|
index 1ae6f8bba9ae..2c666fb34625 100644
|
|
--- a/drivers/input/mouse/synaptics.c
|
|
+++ b/drivers/input/mouse/synaptics.c
|
|
@@ -146,7 +146,6 @@ static const char * const topbuttonpad_pnp_ids[] = {
|
|
"LEN0042", /* Yoga */
|
|
"LEN0045",
|
|
"LEN0047",
|
|
- "LEN0049",
|
|
"LEN2000", /* S540 */
|
|
"LEN2001", /* Edge E431 */
|
|
"LEN2002", /* Edge E531 */
|
|
@@ -166,9 +165,11 @@ static const char * const smbus_pnp_ids[] = {
|
|
/* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
|
|
"LEN0048", /* X1 Carbon 3 */
|
|
"LEN0046", /* X250 */
|
|
+ "LEN0049", /* Yoga 11e */
|
|
"LEN004a", /* W541 */
|
|
"LEN005b", /* P50 */
|
|
"LEN005e", /* T560 */
|
|
+ "LEN006c", /* T470s */
|
|
"LEN0071", /* T480 */
|
|
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
|
|
"LEN0073", /* X1 Carbon G5 (Elantech) */
|
|
@@ -179,6 +180,7 @@ static const char * const smbus_pnp_ids[] = {
|
|
"LEN0097", /* X280 -> ALPS trackpoint */
|
|
"LEN009b", /* T580 */
|
|
"LEN200f", /* T450s */
|
|
+ "LEN2044", /* L470 */
|
|
"LEN2054", /* E480 */
|
|
"LEN2055", /* E580 */
|
|
"SYN3052", /* HP EliteBook 840 G4 */
|
|
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
|
|
index 105b7a7c0251..b3484def0a8b 100644
|
|
--- a/drivers/mmc/core/host.c
|
|
+++ b/drivers/mmc/core/host.c
|
|
@@ -176,7 +176,6 @@ int mmc_of_parse(struct mmc_host *host)
|
|
u32 bus_width, drv_type, cd_debounce_delay_ms;
|
|
int ret;
|
|
bool cd_cap_invert, cd_gpio_invert = false;
|
|
- bool ro_cap_invert, ro_gpio_invert = false;
|
|
|
|
if (!dev || !dev_fwnode(dev))
|
|
return 0;
|
|
@@ -255,9 +254,11 @@ int mmc_of_parse(struct mmc_host *host)
|
|
}
|
|
|
|
/* Parse Write Protection */
|
|
- ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
|
|
|
|
- ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert);
|
|
+ if (device_property_read_bool(dev, "wp-inverted"))
|
|
+ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
|
|
+
|
|
+ ret = mmc_gpiod_request_ro(host, "wp", 0, 0, NULL);
|
|
if (!ret)
|
|
dev_info(host->parent, "Got WP GPIO\n");
|
|
else if (ret != -ENOENT && ret != -ENOSYS)
|
|
@@ -266,10 +267,6 @@ int mmc_of_parse(struct mmc_host *host)
|
|
if (device_property_read_bool(dev, "disable-wp"))
|
|
host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
|
|
|
|
- /* See the comment on CD inversion above */
|
|
- if (ro_cap_invert ^ ro_gpio_invert)
|
|
- host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
|
|
-
|
|
if (device_property_read_bool(dev, "cap-sd-highspeed"))
|
|
host->caps |= MMC_CAP_SD_HIGHSPEED;
|
|
if (device_property_read_bool(dev, "cap-mmc-highspeed"))
|
|
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
|
|
index da2596c5fa28..582ec3d720f6 100644
|
|
--- a/drivers/mmc/core/slot-gpio.c
|
|
+++ b/drivers/mmc/core/slot-gpio.c
|
|
@@ -241,6 +241,9 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
|
|
return ret;
|
|
}
|
|
|
|
+ if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH)
|
|
+ gpiod_toggle_active_low(desc);
|
|
+
|
|
if (gpio_invert)
|
|
*gpio_invert = !gpiod_is_active_low(desc);
|
|
|
|
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
|
|
index 024acc1b0a2e..b2bbcb09a49e 100644
|
|
--- a/drivers/mmc/host/pxamci.c
|
|
+++ b/drivers/mmc/host/pxamci.c
|
|
@@ -740,16 +740,16 @@ static int pxamci_probe(struct platform_device *pdev)
|
|
goto out;
|
|
}
|
|
|
|
+ if (!host->pdata->gpio_card_ro_invert)
|
|
+ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
|
|
+
|
|
ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
|
|
if (ret && ret != -ENOENT) {
|
|
dev_err(dev, "Failed requesting gpio_ro\n");
|
|
goto out;
|
|
}
|
|
- if (!ret) {
|
|
+ if (!ret)
|
|
host->use_ro_gpio = true;
|
|
- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
|
|
- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
|
|
- }
|
|
|
|
if (host->pdata->init)
|
|
host->pdata->init(dev, pxamci_detect_irq, mmc);
|
|
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
|
|
index 1c988d6a2433..dccb4df46512 100644
|
|
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
|
|
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
|
|
@@ -1381,13 +1381,14 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
|
|
host->mmc->parent->platform_data);
|
|
/* write_protect */
|
|
if (boarddata->wp_type == ESDHC_WP_GPIO) {
|
|
+ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
|
|
+
|
|
err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
|
|
if (err) {
|
|
dev_err(mmc_dev(host->mmc),
|
|
"failed to request write-protect gpio!\n");
|
|
return err;
|
|
}
|
|
- host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
|
|
}
|
|
|
|
/* card_detect */
|
|
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
|
|
index a6b7b242d516..e703827d27e9 100644
|
|
--- a/drivers/nvme/host/core.c
|
|
+++ b/drivers/nvme/host/core.c
|
|
@@ -3853,7 +3853,7 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
|
|
if (!log)
|
|
return;
|
|
|
|
- if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log,
|
|
+ if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
|
|
sizeof(*log), 0))
|
|
dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
|
|
kfree(log);
|
|
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
|
|
index e17fac20127e..5c9898e934d9 100644
|
|
--- a/drivers/s390/crypto/pkey_api.c
|
|
+++ b/drivers/s390/crypto/pkey_api.c
|
|
@@ -794,7 +794,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
|
return -EFAULT;
|
|
rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
|
|
ksp.seckey.seckey, ksp.protkey.protkey,
|
|
- NULL, &ksp.protkey.type);
|
|
+ &ksp.protkey.len, &ksp.protkey.type);
|
|
DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
|
|
if (rc)
|
|
break;
|
|
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
|
|
index 97acc2ba2912..de844b412110 100644
|
|
--- a/drivers/spmi/spmi-pmic-arb.c
|
|
+++ b/drivers/spmi/spmi-pmic-arb.c
|
|
@@ -731,6 +731,7 @@ static int qpnpint_irq_domain_translate(struct irq_domain *d,
|
|
return 0;
|
|
}
|
|
|
|
+static struct lock_class_key qpnpint_irq_lock_class, qpnpint_irq_request_class;
|
|
|
|
static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
|
|
struct irq_domain *domain, unsigned int virq,
|
|
@@ -746,6 +747,9 @@ static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
|
|
else
|
|
handler = handle_level_irq;
|
|
|
|
+
|
|
+ irq_set_lockdep_class(virq, &qpnpint_irq_lock_class,
|
|
+ &qpnpint_irq_request_class);
|
|
irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb,
|
|
handler, NULL, NULL);
|
|
}
|
|
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
|
|
index 7becc5e96f92..b0ccca5d08b5 100644
|
|
--- a/fs/btrfs/disk-io.c
|
|
+++ b/fs/btrfs/disk-io.c
|
|
@@ -3167,6 +3167,7 @@ retry_root_backup:
|
|
/* do not make disk changes in broken FS or nologreplay is given */
|
|
if (btrfs_super_log_root(disk_super) != 0 &&
|
|
!btrfs_test_opt(fs_info, NOLOGREPLAY)) {
|
|
+ btrfs_info(fs_info, "start tree-log replay");
|
|
ret = btrfs_replay_log(fs_info, fs_devices);
|
|
if (ret) {
|
|
err = ret;
|
|
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
|
|
index 9d30acca55e1..043eec682ccd 100644
|
|
--- a/fs/btrfs/extent_map.c
|
|
+++ b/fs/btrfs/extent_map.c
|
|
@@ -233,6 +233,17 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
|
|
struct extent_map *merge = NULL;
|
|
struct rb_node *rb;
|
|
|
|
+ /*
|
|
+ * We can't modify an extent map that is in the tree and that is being
|
|
+ * used by another task, as it can cause that other task to see it in
|
|
+ * inconsistent state during the merging. We always have 1 reference for
|
|
+ * the tree and 1 for this task (which is unpinning the extent map or
|
|
+ * clearing the logging flag), so anything > 2 means it's being used by
|
|
+ * other tasks too.
|
|
+ */
|
|
+ if (refcount_read(&em->refs) > 2)
|
|
+ return;
|
|
+
|
|
if (em->start != 0) {
|
|
rb = rb_prev(&em->rb_node);
|
|
if (rb)
|
|
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
|
|
index b57f3618e58e..454a1015d026 100644
|
|
--- a/fs/btrfs/ref-verify.c
|
|
+++ b/fs/btrfs/ref-verify.c
|
|
@@ -744,6 +744,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
|
|
*/
|
|
be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
|
|
if (IS_ERR(be)) {
|
|
+ kfree(ref);
|
|
kfree(ra);
|
|
ret = PTR_ERR(be);
|
|
goto out;
|
|
@@ -757,6 +758,8 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
|
|
"re-allocated a block that still has references to it!");
|
|
dump_block_entry(fs_info, be);
|
|
dump_ref_action(fs_info, ra);
|
|
+ kfree(ref);
|
|
+ kfree(ra);
|
|
goto out_unlock;
|
|
}
|
|
|
|
@@ -819,6 +822,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
|
|
"dropping a ref for a existing root that doesn't have a ref on the block");
|
|
dump_block_entry(fs_info, be);
|
|
dump_ref_action(fs_info, ra);
|
|
+ kfree(ref);
|
|
kfree(ra);
|
|
goto out_unlock;
|
|
}
|
|
@@ -834,6 +838,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
|
|
"attempting to add another ref for an existing ref on a tree block");
|
|
dump_block_entry(fs_info, be);
|
|
dump_ref_action(fs_info, ra);
|
|
+ kfree(ref);
|
|
kfree(ra);
|
|
goto out_unlock;
|
|
}
|
|
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
|
|
index abcd93a3ca1d..aea24202cd35 100644
|
|
--- a/fs/btrfs/super.c
|
|
+++ b/fs/btrfs/super.c
|
|
@@ -1804,6 +1804,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
|
|
}
|
|
|
|
if (btrfs_super_log_root(fs_info->super_copy) != 0) {
|
|
+ btrfs_warn(fs_info,
|
|
+ "mount required to replay tree-log, cannot remount read-write");
|
|
ret = -EINVAL;
|
|
goto restore;
|
|
}
|
|
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
|
|
index 07d8ace61f77..637624ab6e46 100644
|
|
--- a/fs/cifs/cifsfs.c
|
|
+++ b/fs/cifs/cifsfs.c
|
|
@@ -414,7 +414,7 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
|
|
seq_puts(s, "ntlm");
|
|
break;
|
|
case Kerberos:
|
|
- seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid));
|
|
+ seq_puts(s, "krb5");
|
|
break;
|
|
case RawNTLMSSP:
|
|
seq_puts(s, "ntlmssp");
|
|
@@ -427,6 +427,10 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
|
|
|
|
if (ses->sign)
|
|
seq_puts(s, "i");
|
|
+
|
|
+ if (ses->sectype == Kerberos)
|
|
+ seq_printf(s, ",cruid=%u",
|
|
+ from_kuid_munged(&init_user_ns, ses->cred_uid));
|
|
}
|
|
|
|
static void
|
|
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
|
|
index 662256fa2a18..b75d208d4b2b 100644
|
|
--- a/fs/cifs/smb2ops.c
|
|
+++ b/fs/cifs/smb2ops.c
|
|
@@ -1087,7 +1087,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
|
|
void *data[1];
|
|
struct smb2_file_full_ea_info *ea = NULL;
|
|
struct kvec close_iov[1];
|
|
- int rc;
|
|
+ struct smb2_query_info_rsp *rsp;
|
|
+ int rc, used_len = 0;
|
|
|
|
if (smb3_encryption_required(tcon))
|
|
flags |= CIFS_TRANSFORM_REQ;
|
|
@@ -1110,6 +1111,38 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
|
|
cifs_sb);
|
|
if (rc == -ENODATA)
|
|
goto sea_exit;
|
|
+ } else {
|
|
+ /* If we are adding a attribute we should first check
|
|
+ * if there will be enough space available to store
|
|
+ * the new EA. If not we should not add it since we
|
|
+ * would not be able to even read the EAs back.
|
|
+ */
|
|
+ rc = smb2_query_info_compound(xid, tcon, utf16_path,
|
|
+ FILE_READ_EA,
|
|
+ FILE_FULL_EA_INFORMATION,
|
|
+ SMB2_O_INFO_FILE,
|
|
+ CIFSMaxBufSize -
|
|
+ MAX_SMB2_CREATE_RESPONSE_SIZE -
|
|
+ MAX_SMB2_CLOSE_RESPONSE_SIZE,
|
|
+ &rsp_iov[1], &resp_buftype[1], cifs_sb);
|
|
+ if (rc == 0) {
|
|
+ rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
|
|
+ used_len = le32_to_cpu(rsp->OutputBufferLength);
|
|
+ }
|
|
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
|
|
+ resp_buftype[1] = CIFS_NO_BUFFER;
|
|
+ memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
|
|
+ rc = 0;
|
|
+
|
|
+ /* Use a fudge factor of 256 bytes in case we collide
|
|
+ * with a different set_EAs command.
|
|
+ */
|
|
+ if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
|
|
+ MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
|
|
+ used_len + ea_name_len + ea_value_len + 1) {
|
|
+ rc = -ENOSPC;
|
|
+ goto sea_exit;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
|
|
index d4d4fdfac1a6..ff8e1205127e 100644
|
|
--- a/fs/ext4/block_validity.c
|
|
+++ b/fs/ext4/block_validity.c
|
|
@@ -203,6 +203,7 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
|
|
return PTR_ERR(inode);
|
|
num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
|
|
while (i < num) {
|
|
+ cond_resched();
|
|
map.m_lblk = i;
|
|
map.m_len = num - i;
|
|
n = ext4_map_blocks(NULL, inode, &map, 0);
|
|
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
|
|
index 5ef8d7ae231b..2743c6f8a457 100644
|
|
--- a/fs/ext4/dir.c
|
|
+++ b/fs/ext4/dir.c
|
|
@@ -130,12 +130,14 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
|
|
if (err != ERR_BAD_DX_DIR) {
|
|
return err;
|
|
}
|
|
- /*
|
|
- * We don't set the inode dirty flag since it's not
|
|
- * critical that it get flushed back to the disk.
|
|
- */
|
|
- ext4_clear_inode_flag(file_inode(file),
|
|
- EXT4_INODE_INDEX);
|
|
+ /* Can we just clear INDEX flag to ignore htree information? */
|
|
+ if (!ext4_has_metadata_csum(sb)) {
|
|
+ /*
|
|
+ * We don't set the inode dirty flag since it's not
|
|
+ * critical that it gets flushed back to the disk.
|
|
+ */
|
|
+ ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
|
|
+ }
|
|
}
|
|
|
|
if (ext4_has_inline_data(inode)) {
|
|
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
|
|
index 03db3e71676c..e2f65b565c1f 100644
|
|
--- a/fs/ext4/ext4.h
|
|
+++ b/fs/ext4/ext4.h
|
|
@@ -2476,8 +2476,11 @@ void ext4_insert_dentry(struct inode *inode,
|
|
struct ext4_filename *fname);
|
|
static inline void ext4_update_dx_flag(struct inode *inode)
|
|
{
|
|
- if (!ext4_has_feature_dir_index(inode->i_sb))
|
|
+ if (!ext4_has_feature_dir_index(inode->i_sb)) {
|
|
+ /* ext4_iget() should have caught this... */
|
|
+ WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
|
|
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
|
|
+ }
|
|
}
|
|
static const unsigned char ext4_filetype_table[] = {
|
|
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
|
|
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
|
|
index 8bba6cd5e870..76a38ef5f226 100644
|
|
--- a/fs/ext4/inode.c
|
|
+++ b/fs/ext4/inode.c
|
|
@@ -4972,6 +4972,18 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
|
|
ret = -EFSCORRUPTED;
|
|
goto bad_inode;
|
|
}
|
|
+ /*
|
|
+ * If dir_index is not enabled but there's dir with INDEX flag set,
|
|
+ * we'd normally treat htree data as empty space. But with metadata
|
|
+ * checksumming that corrupts checksums so forbid that.
|
|
+ */
|
|
+ if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
|
|
+ ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
|
|
+ ext4_error_inode(inode, function, line, 0,
|
|
+ "iget: Dir with htree data on filesystem without dir_index feature.");
|
|
+ ret = -EFSCORRUPTED;
|
|
+ goto bad_inode;
|
|
+ }
|
|
ei->i_disksize = inode->i_size;
|
|
#ifdef CONFIG_QUOTA
|
|
ei->i_reserved_quota = 0;
|
|
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
|
|
index 2305b4374fd3..9d00e0dd2ba9 100644
|
|
--- a/fs/ext4/mmp.c
|
|
+++ b/fs/ext4/mmp.c
|
|
@@ -120,10 +120,10 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
|
|
{
|
|
__ext4_warning(sb, function, line, "%s", msg);
|
|
__ext4_warning(sb, function, line,
|
|
- "MMP failure info: last update time: %llu, last update "
|
|
- "node: %s, last update device: %s",
|
|
- (long long unsigned int) le64_to_cpu(mmp->mmp_time),
|
|
- mmp->mmp_nodename, mmp->mmp_bdevname);
|
|
+ "MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s",
|
|
+ (unsigned long long)le64_to_cpu(mmp->mmp_time),
|
|
+ (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename,
|
|
+ (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname);
|
|
}
|
|
|
|
/*
|
|
@@ -154,6 +154,7 @@ static int kmmpd(void *data)
|
|
mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
|
|
EXT4_MMP_MIN_CHECK_INTERVAL);
|
|
mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
|
|
+ BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
|
|
bdevname(bh->b_bdev, mmp->mmp_bdevname);
|
|
|
|
memcpy(mmp->mmp_nodename, init_utsname()->nodename,
|
|
@@ -375,7 +376,8 @@ skip:
|
|
/*
|
|
* Start a kernel thread to update the MMP block periodically.
|
|
*/
|
|
- EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
|
|
+ EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s",
|
|
+ (int)sizeof(mmp->mmp_bdevname),
|
|
bdevname(bh->b_bdev,
|
|
mmp->mmp_bdevname));
|
|
if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
|
|
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
|
|
index f56402e9c11c..94d84910dc1e 100644
|
|
--- a/fs/ext4/namei.c
|
|
+++ b/fs/ext4/namei.c
|
|
@@ -2205,6 +2205,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
|
|
retval = ext4_dx_add_entry(handle, &fname, dir, inode);
|
|
if (!retval || (retval != ERR_BAD_DX_DIR))
|
|
goto out;
|
|
+ /* Can we just ignore htree data? */
|
|
+ if (ext4_has_metadata_csum(sb)) {
|
|
+ EXT4_ERROR_INODE(dir,
|
|
+ "Directory has corrupted htree index.");
|
|
+ retval = -EFSCORRUPTED;
|
|
+ goto out;
|
|
+ }
|
|
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
|
|
dx_fallback++;
|
|
ext4_mark_inode_dirty(handle, dir);
|
|
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
|
|
index 66162b430edc..914230e63054 100644
|
|
--- a/fs/ext4/super.c
|
|
+++ b/fs/ext4/super.c
|
|
@@ -2961,17 +2961,11 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
|
|
return 0;
|
|
}
|
|
|
|
-#ifndef CONFIG_QUOTA
|
|
- if (ext4_has_feature_quota(sb) && !readonly) {
|
|
+#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
|
|
+ if (!readonly && (ext4_has_feature_quota(sb) ||
|
|
+ ext4_has_feature_project(sb))) {
|
|
ext4_msg(sb, KERN_ERR,
|
|
- "Filesystem with quota feature cannot be mounted RDWR "
|
|
- "without CONFIG_QUOTA");
|
|
- return 0;
|
|
- }
|
|
- if (ext4_has_feature_project(sb) && !readonly) {
|
|
- ext4_msg(sb, KERN_ERR,
|
|
- "Filesystem with project quota feature cannot be mounted RDWR "
|
|
- "without CONFIG_QUOTA");
|
|
+ "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_QUOTA */
|
|
@@ -3765,6 +3759,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|
*/
|
|
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
|
|
|
|
+ blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
|
|
+ if (blocksize < EXT4_MIN_BLOCK_SIZE ||
|
|
+ blocksize > EXT4_MAX_BLOCK_SIZE) {
|
|
+ ext4_msg(sb, KERN_ERR,
|
|
+ "Unsupported filesystem blocksize %d (%d log_block_size)",
|
|
+ blocksize, le32_to_cpu(es->s_log_block_size));
|
|
+ goto failed_mount;
|
|
+ }
|
|
+
|
|
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
|
|
sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
|
|
sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
|
|
@@ -3782,6 +3785,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|
ext4_msg(sb, KERN_ERR,
|
|
"unsupported inode size: %d",
|
|
sbi->s_inode_size);
|
|
+ ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
|
|
goto failed_mount;
|
|
}
|
|
/*
|
|
@@ -3985,14 +3989,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|
if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
|
|
goto failed_mount;
|
|
|
|
- blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
|
|
- if (blocksize < EXT4_MIN_BLOCK_SIZE ||
|
|
- blocksize > EXT4_MAX_BLOCK_SIZE) {
|
|
- ext4_msg(sb, KERN_ERR,
|
|
- "Unsupported filesystem blocksize %d (%d log_block_size)",
|
|
- blocksize, le32_to_cpu(es->s_log_block_size));
|
|
- goto failed_mount;
|
|
- }
|
|
if (le32_to_cpu(es->s_log_block_size) >
|
|
(EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
|
|
ext4_msg(sb, KERN_ERR,
|
|
@@ -5544,9 +5540,15 @@ static int ext4_statfs_project(struct super_block *sb,
|
|
return PTR_ERR(dquot);
|
|
spin_lock(&dquot->dq_dqb_lock);
|
|
|
|
- limit = (dquot->dq_dqb.dqb_bsoftlimit ?
|
|
- dquot->dq_dqb.dqb_bsoftlimit :
|
|
- dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
|
|
+ limit = 0;
|
|
+ if (dquot->dq_dqb.dqb_bsoftlimit &&
|
|
+ (!limit || dquot->dq_dqb.dqb_bsoftlimit < limit))
|
|
+ limit = dquot->dq_dqb.dqb_bsoftlimit;
|
|
+ if (dquot->dq_dqb.dqb_bhardlimit &&
|
|
+ (!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
|
|
+ limit = dquot->dq_dqb.dqb_bhardlimit;
|
|
+ limit >>= sb->s_blocksize_bits;
|
|
+
|
|
if (limit && buf->f_blocks > limit) {
|
|
curblock = (dquot->dq_dqb.dqb_curspace +
|
|
dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
|
|
@@ -5556,9 +5558,14 @@ static int ext4_statfs_project(struct super_block *sb,
|
|
(buf->f_blocks - curblock) : 0;
|
|
}
|
|
|
|
- limit = dquot->dq_dqb.dqb_isoftlimit ?
|
|
- dquot->dq_dqb.dqb_isoftlimit :
|
|
- dquot->dq_dqb.dqb_ihardlimit;
|
|
+ limit = 0;
|
|
+ if (dquot->dq_dqb.dqb_isoftlimit &&
|
|
+ (!limit || dquot->dq_dqb.dqb_isoftlimit < limit))
|
|
+ limit = dquot->dq_dqb.dqb_isoftlimit;
|
|
+ if (dquot->dq_dqb.dqb_ihardlimit &&
|
|
+ (!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
|
|
+ limit = dquot->dq_dqb.dqb_ihardlimit;
|
|
+
|
|
if (limit && buf->f_files > limit) {
|
|
buf->f_files = limit;
|
|
buf->f_ffree =
|
|
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
|
|
index c43591cd70f1..2a42904bcd62 100644
|
|
--- a/fs/jbd2/commit.c
|
|
+++ b/fs/jbd2/commit.c
|
|
@@ -974,29 +974,33 @@ restart_loop:
|
|
* it. */
|
|
|
|
/*
|
|
- * A buffer which has been freed while still being journaled by
|
|
- * a previous transaction.
|
|
- */
|
|
- if (buffer_freed(bh)) {
|
|
+ * A buffer which has been freed while still being journaled
|
|
+ * by a previous transaction, refile the buffer to BJ_Forget of
|
|
+ * the running transaction. If the just committed transaction
|
|
+ * contains "add to orphan" operation, we can completely
|
|
+ * invalidate the buffer now. We are rather through in that
|
|
+ * since the buffer may be still accessible when blocksize <
|
|
+ * pagesize and it is attached to the last partial page.
|
|
+ */
|
|
+ if (buffer_freed(bh) && !jh->b_next_transaction) {
|
|
+ struct address_space *mapping;
|
|
+
|
|
+ clear_buffer_freed(bh);
|
|
+ clear_buffer_jbddirty(bh);
|
|
+
|
|
/*
|
|
- * If the running transaction is the one containing
|
|
- * "add to orphan" operation (b_next_transaction !=
|
|
- * NULL), we have to wait for that transaction to
|
|
- * commit before we can really get rid of the buffer.
|
|
- * So just clear b_modified to not confuse transaction
|
|
- * credit accounting and refile the buffer to
|
|
- * BJ_Forget of the running transaction. If the just
|
|
- * committed transaction contains "add to orphan"
|
|
- * operation, we can completely invalidate the buffer
|
|
- * now. We are rather through in that since the
|
|
- * buffer may be still accessible when blocksize <
|
|
- * pagesize and it is attached to the last partial
|
|
- * page.
|
|
+ * Block device buffers need to stay mapped all the
|
|
+ * time, so it is enough to clear buffer_jbddirty and
|
|
+ * buffer_freed bits. For the file mapping buffers (i.e.
|
|
+ * journalled data) we need to unmap buffer and clear
|
|
+ * more bits. We also need to be careful about the check
|
|
+ * because the data page mapping can get cleared under
|
|
+ * out hands, which alse need not to clear more bits
|
|
+ * because the page and buffers will be freed and can
|
|
+ * never be reused once we are done with them.
|
|
*/
|
|
- jh->b_modified = 0;
|
|
- if (!jh->b_next_transaction) {
|
|
- clear_buffer_freed(bh);
|
|
- clear_buffer_jbddirty(bh);
|
|
+ mapping = READ_ONCE(bh->b_page->mapping);
|
|
+ if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
|
|
clear_buffer_mapped(bh);
|
|
clear_buffer_new(bh);
|
|
clear_buffer_req(bh);
|
|
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
|
|
index bee8498d7792..3930c68a9c20 100644
|
|
--- a/fs/jbd2/transaction.c
|
|
+++ b/fs/jbd2/transaction.c
|
|
@@ -2296,14 +2296,16 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
|
|
return -EBUSY;
|
|
}
|
|
/*
|
|
- * OK, buffer won't be reachable after truncate. We just set
|
|
- * j_next_transaction to the running transaction (if there is
|
|
- * one) and mark buffer as freed so that commit code knows it
|
|
- * should clear dirty bits when it is done with the buffer.
|
|
+ * OK, buffer won't be reachable after truncate. We just clear
|
|
+ * b_modified to not confuse transaction credit accounting, and
|
|
+ * set j_next_transaction to the running transaction (if there
|
|
+ * is one) and mark buffer as freed so that commit code knows
|
|
+ * it should clear dirty bits when it is done with the buffer.
|
|
*/
|
|
set_buffer_freed(bh);
|
|
if (journal->j_running_transaction && buffer_jbddirty(bh))
|
|
jh->b_next_transaction = journal->j_running_transaction;
|
|
+ jh->b_modified = 0;
|
|
jbd2_journal_put_journal_head(jh);
|
|
spin_unlock(&journal->j_list_lock);
|
|
jbd_unlock_bh_state(bh);
|
|
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
|
|
index 423960d480f1..f808fb34b110 100644
|
|
--- a/fs/nfs/nfs4proc.c
|
|
+++ b/fs/nfs/nfs4proc.c
|
|
@@ -5293,7 +5293,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
|
|
hdr->timestamp = jiffies;
|
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
|
|
- nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1, 0);
|
|
+ nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
|
|
nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
|
|
}
|
|
|
|
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
|
|
index e5e041413581..d1fdf26ccb33 100644
|
|
--- a/include/acpi/acpixf.h
|
|
+++ b/include/acpi/acpixf.h
|
|
@@ -748,6 +748,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3
|
|
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
|
|
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
|
|
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
|
|
+ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
|
|
|
|
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
|
acpi_get_gpe_device(u32 gpe_index,
|
|
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
|
|
index b70af921c614..803bb63dd5ff 100644
|
|
--- a/include/linux/gpio/consumer.h
|
|
+++ b/include/linux/gpio/consumer.h
|
|
@@ -158,6 +158,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
|
|
|
|
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
|
|
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
|
|
+void gpiod_toggle_active_low(struct gpio_desc *desc);
|
|
|
|
int gpiod_is_active_low(const struct gpio_desc *desc);
|
|
int gpiod_cansleep(const struct gpio_desc *desc);
|
|
@@ -479,6 +480,12 @@ static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
|
|
return -ENOSYS;
|
|
}
|
|
|
|
+static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
|
|
+{
|
|
+ /* GPIO can never have been requested */
|
|
+ WARN_ON(desc);
|
|
+}
|
|
+
|
|
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
|
|
{
|
|
/* GPIO can never have been requested */
|
|
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
|
|
index 6fc8843f1c9e..cd97d2c8840c 100644
|
|
--- a/include/linux/suspend.h
|
|
+++ b/include/linux/suspend.h
|
|
@@ -191,7 +191,7 @@ struct platform_s2idle_ops {
|
|
int (*begin)(void);
|
|
int (*prepare)(void);
|
|
int (*prepare_late)(void);
|
|
- void (*wake)(void);
|
|
+ bool (*wake)(void);
|
|
void (*restore_early)(void);
|
|
void (*restore)(void);
|
|
void (*end)(void);
|
|
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
|
|
index f3b7239f1892..27f149f5d4a9 100644
|
|
--- a/kernel/power/suspend.c
|
|
+++ b/kernel/power/suspend.c
|
|
@@ -131,11 +131,12 @@ static void s2idle_loop(void)
|
|
* to avoid them upfront.
|
|
*/
|
|
for (;;) {
|
|
- if (s2idle_ops && s2idle_ops->wake)
|
|
- s2idle_ops->wake();
|
|
-
|
|
- if (pm_wakeup_pending())
|
|
+ if (s2idle_ops && s2idle_ops->wake) {
|
|
+ if (s2idle_ops->wake())
|
|
+ break;
|
|
+ } else if (pm_wakeup_pending()) {
|
|
break;
|
|
+ }
|
|
|
|
pm_wakeup_clear(false);
|
|
|
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
|
index 00743684a549..dfaefb175ba0 100644
|
|
--- a/kernel/sched/core.c
|
|
+++ b/kernel/sched/core.c
|
|
@@ -7250,7 +7250,7 @@ capacity_from_percent(char *buf)
|
|
&req.percent);
|
|
if (req.ret)
|
|
return req;
|
|
- if (req.percent > UCLAMP_PERCENT_SCALE) {
|
|
+ if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
|
|
req.ret = -ERANGE;
|
|
return req;
|
|
}
|
|
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
|
|
index 54dd8849d1cc..1e3b9d34aaa4 100644
|
|
--- a/net/mac80211/mlme.c
|
|
+++ b/net/mac80211/mlme.c
|
|
@@ -8,7 +8,7 @@
|
|
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
|
|
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
|
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
|
|
- * Copyright (C) 2018 - 2019 Intel Corporation
|
|
+ * Copyright (C) 2018 - 2020 Intel Corporation
|
|
*/
|
|
|
|
#include <linux/delay.h>
|
|
@@ -1311,7 +1311,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
|
if (!res) {
|
|
ch_switch.timestamp = timestamp;
|
|
ch_switch.device_timestamp = device_timestamp;
|
|
- ch_switch.block_tx = beacon ? csa_ie.mode : 0;
|
|
+ ch_switch.block_tx = csa_ie.mode;
|
|
ch_switch.chandef = csa_ie.chandef;
|
|
ch_switch.count = csa_ie.count;
|
|
ch_switch.delay = csa_ie.max_switch_time;
|
|
@@ -1404,7 +1404,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
|
|
|
sdata->vif.csa_active = true;
|
|
sdata->csa_chandef = csa_ie.chandef;
|
|
- sdata->csa_block_tx = ch_switch.block_tx;
|
|
+ sdata->csa_block_tx = csa_ie.mode;
|
|
ifmgd->csa_ignored_same_chan = false;
|
|
|
|
if (sdata->csa_block_tx)
|
|
@@ -1438,7 +1438,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
|
* reset when the disconnection worker runs.
|
|
*/
|
|
sdata->vif.csa_active = true;
|
|
- sdata->csa_block_tx = ch_switch.block_tx;
|
|
+ sdata->csa_block_tx = csa_ie.mode;
|
|
|
|
ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
|
|
mutex_unlock(&local->chanctx_mtx);
|
|
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
|
|
index 9901a811f598..0ad45a8fe3fb 100644
|
|
--- a/net/sunrpc/xprtrdma/frwr_ops.c
|
|
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
|
|
@@ -326,8 +326,8 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
|
|
{
|
|
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
|
struct ib_reg_wr *reg_wr;
|
|
+ int i, n, dma_nents;
|
|
struct ib_mr *ibmr;
|
|
- int i, n;
|
|
u8 key;
|
|
|
|
if (nsegs > ia->ri_max_frwr_depth)
|
|
@@ -351,15 +351,16 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
|
|
break;
|
|
}
|
|
mr->mr_dir = rpcrdma_data_dir(writing);
|
|
+ mr->mr_nents = i;
|
|
|
|
- mr->mr_nents =
|
|
- ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, i, mr->mr_dir);
|
|
- if (!mr->mr_nents)
|
|
+ dma_nents = ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, mr->mr_nents,
|
|
+ mr->mr_dir);
|
|
+ if (!dma_nents)
|
|
goto out_dmamap_err;
|
|
|
|
ibmr = mr->frwr.fr_mr;
|
|
- n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
|
|
- if (unlikely(n != mr->mr_nents))
|
|
+ n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
|
|
+ if (n != dma_nents)
|
|
goto out_mapmr_err;
|
|
|
|
ibmr->iova &= 0x00000000ffffffff;
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index 68832f52c1ad..a66d4be3516e 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -2447,6 +2447,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
|
|
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
|
|
+ SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
|
|
SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
|
|
SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
|
|
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
|
|
@@ -5701,8 +5702,11 @@ static void alc_fixup_headset_jack(struct hda_codec *codec,
|
|
break;
|
|
case HDA_FIXUP_ACT_INIT:
|
|
switch (codec->core.vendor_id) {
|
|
+ case 0x10ec0215:
|
|
case 0x10ec0225:
|
|
+ case 0x10ec0285:
|
|
case 0x10ec0295:
|
|
+ case 0x10ec0289:
|
|
case 0x10ec0299:
|
|
alc_write_coef_idx(codec, 0x48, 0xd011);
|
|
alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
|
|
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
|
|
index 6b8c14f9b5d4..a48313dfa967 100644
|
|
--- a/sound/usb/clock.c
|
|
+++ b/sound/usb/clock.c
|
|
@@ -151,8 +151,34 @@ static int uac_clock_selector_set_val(struct snd_usb_audio *chip, int selector_i
|
|
return ret;
|
|
}
|
|
|
|
+/*
|
|
+ * Assume the clock is valid if clock source supports only one single sample
|
|
+ * rate, the terminal is connected directly to it (there is no clock selector)
|
|
+ * and clock type is internal. This is to deal with some Denon DJ controllers
|
|
+ * that always reports that clock is invalid.
|
|
+ */
|
|
+static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip,
|
|
+ struct audioformat *fmt,
|
|
+ int source_id)
|
|
+{
|
|
+ if (fmt->protocol == UAC_VERSION_2) {
|
|
+ struct uac_clock_source_descriptor *cs_desc =
|
|
+ snd_usb_find_clock_source(chip->ctrl_intf, source_id);
|
|
+
|
|
+ if (!cs_desc)
|
|
+ return false;
|
|
+
|
|
+ return (fmt->nr_rates == 1 &&
|
|
+ (fmt->clock & 0xff) == cs_desc->bClockID &&
|
|
+ (cs_desc->bmAttributes & 0x3) !=
|
|
+ UAC_CLOCK_SOURCE_TYPE_EXT);
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
|
|
- int protocol,
|
|
+ struct audioformat *fmt,
|
|
int source_id)
|
|
{
|
|
int err;
|
|
@@ -160,26 +186,26 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
|
|
struct usb_device *dev = chip->dev;
|
|
u32 bmControls;
|
|
|
|
- if (protocol == UAC_VERSION_3) {
|
|
+ if (fmt->protocol == UAC_VERSION_3) {
|
|
struct uac3_clock_source_descriptor *cs_desc =
|
|
snd_usb_find_clock_source_v3(chip->ctrl_intf, source_id);
|
|
|
|
if (!cs_desc)
|
|
- return 0;
|
|
+ return false;
|
|
bmControls = le32_to_cpu(cs_desc->bmControls);
|
|
} else { /* UAC_VERSION_1/2 */
|
|
struct uac_clock_source_descriptor *cs_desc =
|
|
snd_usb_find_clock_source(chip->ctrl_intf, source_id);
|
|
|
|
if (!cs_desc)
|
|
- return 0;
|
|
+ return false;
|
|
bmControls = cs_desc->bmControls;
|
|
}
|
|
|
|
/* If a clock source can't tell us whether it's valid, we assume it is */
|
|
if (!uac_v2v3_control_is_readable(bmControls,
|
|
UAC2_CS_CONTROL_CLOCK_VALID))
|
|
- return 1;
|
|
+ return true;
|
|
|
|
err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
|
|
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
|
|
@@ -191,13 +217,17 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
|
|
dev_warn(&dev->dev,
|
|
"%s(): cannot get clock validity for id %d\n",
|
|
__func__, source_id);
|
|
- return 0;
|
|
+ return false;
|
|
}
|
|
|
|
- return !!data;
|
|
+ if (data)
|
|
+ return true;
|
|
+ else
|
|
+ return uac_clock_source_is_valid_quirk(chip, fmt, source_id);
|
|
}
|
|
|
|
-static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
|
|
+static int __uac_clock_find_source(struct snd_usb_audio *chip,
|
|
+ struct audioformat *fmt, int entity_id,
|
|
unsigned long *visited, bool validate)
|
|
{
|
|
struct uac_clock_source_descriptor *source;
|
|
@@ -217,7 +247,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
|
|
source = snd_usb_find_clock_source(chip->ctrl_intf, entity_id);
|
|
if (source) {
|
|
entity_id = source->bClockID;
|
|
- if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_2,
|
|
+ if (validate && !uac_clock_source_is_valid(chip, fmt,
|
|
entity_id)) {
|
|
usb_audio_err(chip,
|
|
"clock source %d is not valid, cannot use\n",
|
|
@@ -248,8 +278,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
|
|
}
|
|
|
|
cur = ret;
|
|
- ret = __uac_clock_find_source(chip, selector->baCSourceID[ret - 1],
|
|
- visited, validate);
|
|
+ ret = __uac_clock_find_source(chip, fmt,
|
|
+ selector->baCSourceID[ret - 1],
|
|
+ visited, validate);
|
|
if (!validate || ret > 0 || !chip->autoclock)
|
|
return ret;
|
|
|
|
@@ -260,8 +291,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
|
|
if (i == cur)
|
|
continue;
|
|
|
|
- ret = __uac_clock_find_source(chip, selector->baCSourceID[i - 1],
|
|
- visited, true);
|
|
+ ret = __uac_clock_find_source(chip, fmt,
|
|
+ selector->baCSourceID[i - 1],
|
|
+ visited, true);
|
|
if (ret < 0)
|
|
continue;
|
|
|
|
@@ -281,14 +313,16 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
|
|
/* FIXME: multipliers only act as pass-thru element for now */
|
|
multiplier = snd_usb_find_clock_multiplier(chip->ctrl_intf, entity_id);
|
|
if (multiplier)
|
|
- return __uac_clock_find_source(chip, multiplier->bCSourceID,
|
|
- visited, validate);
|
|
+ return __uac_clock_find_source(chip, fmt,
|
|
+ multiplier->bCSourceID,
|
|
+ visited, validate);
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
-static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
|
|
- unsigned long *visited, bool validate)
|
|
+static int __uac3_clock_find_source(struct snd_usb_audio *chip,
|
|
+ struct audioformat *fmt, int entity_id,
|
|
+ unsigned long *visited, bool validate)
|
|
{
|
|
struct uac3_clock_source_descriptor *source;
|
|
struct uac3_clock_selector_descriptor *selector;
|
|
@@ -307,7 +341,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
|
|
source = snd_usb_find_clock_source_v3(chip->ctrl_intf, entity_id);
|
|
if (source) {
|
|
entity_id = source->bClockID;
|
|
- if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_3,
|
|
+ if (validate && !uac_clock_source_is_valid(chip, fmt,
|
|
entity_id)) {
|
|
usb_audio_err(chip,
|
|
"clock source %d is not valid, cannot use\n",
|
|
@@ -338,7 +372,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
|
|
}
|
|
|
|
cur = ret;
|
|
- ret = __uac3_clock_find_source(chip, selector->baCSourceID[ret - 1],
|
|
+ ret = __uac3_clock_find_source(chip, fmt,
|
|
+ selector->baCSourceID[ret - 1],
|
|
visited, validate);
|
|
if (!validate || ret > 0 || !chip->autoclock)
|
|
return ret;
|
|
@@ -350,8 +385,9 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
|
|
if (i == cur)
|
|
continue;
|
|
|
|
- ret = __uac3_clock_find_source(chip, selector->baCSourceID[i - 1],
|
|
- visited, true);
|
|
+ ret = __uac3_clock_find_source(chip, fmt,
|
|
+ selector->baCSourceID[i - 1],
|
|
+ visited, true);
|
|
if (ret < 0)
|
|
continue;
|
|
|
|
@@ -372,7 +408,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
|
|
multiplier = snd_usb_find_clock_multiplier_v3(chip->ctrl_intf,
|
|
entity_id);
|
|
if (multiplier)
|
|
- return __uac3_clock_find_source(chip, multiplier->bCSourceID,
|
|
+ return __uac3_clock_find_source(chip, fmt,
|
|
+ multiplier->bCSourceID,
|
|
visited, validate);
|
|
|
|
return -EINVAL;
|
|
@@ -389,18 +426,18 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
|
|
*
|
|
* Returns the clock source UnitID (>=0) on success, or an error.
|
|
*/
|
|
-int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
|
|
- int entity_id, bool validate)
|
|
+int snd_usb_clock_find_source(struct snd_usb_audio *chip,
|
|
+ struct audioformat *fmt, bool validate)
|
|
{
|
|
DECLARE_BITMAP(visited, 256);
|
|
memset(visited, 0, sizeof(visited));
|
|
|
|
- switch (protocol) {
|
|
+ switch (fmt->protocol) {
|
|
case UAC_VERSION_2:
|
|
- return __uac_clock_find_source(chip, entity_id, visited,
|
|
+ return __uac_clock_find_source(chip, fmt, fmt->clock, visited,
|
|
validate);
|
|
case UAC_VERSION_3:
|
|
- return __uac3_clock_find_source(chip, entity_id, visited,
|
|
+ return __uac3_clock_find_source(chip, fmt, fmt->clock, visited,
|
|
validate);
|
|
default:
|
|
return -EINVAL;
|
|
@@ -501,8 +538,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
|
|
* automatic clock selection if the current clock is not
|
|
* valid.
|
|
*/
|
|
- clock = snd_usb_clock_find_source(chip, fmt->protocol,
|
|
- fmt->clock, true);
|
|
+ clock = snd_usb_clock_find_source(chip, fmt, true);
|
|
if (clock < 0) {
|
|
/* We did not find a valid clock, but that might be
|
|
* because the current sample rate does not match an
|
|
@@ -510,8 +546,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
|
|
* and we will do another validation after setting the
|
|
* rate.
|
|
*/
|
|
- clock = snd_usb_clock_find_source(chip, fmt->protocol,
|
|
- fmt->clock, false);
|
|
+ clock = snd_usb_clock_find_source(chip, fmt, false);
|
|
if (clock < 0)
|
|
return clock;
|
|
}
|
|
@@ -577,7 +612,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
|
|
|
|
validation:
|
|
/* validate clock after rate change */
|
|
- if (!uac_clock_source_is_valid(chip, fmt->protocol, clock))
|
|
+ if (!uac_clock_source_is_valid(chip, fmt, clock))
|
|
return -ENXIO;
|
|
return 0;
|
|
}
|
|
diff --git a/sound/usb/clock.h b/sound/usb/clock.h
|
|
index 076e31b79ee0..68df0fbe09d0 100644
|
|
--- a/sound/usb/clock.h
|
|
+++ b/sound/usb/clock.h
|
|
@@ -6,7 +6,7 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
|
|
struct usb_host_interface *alts,
|
|
struct audioformat *fmt, int rate);
|
|
|
|
-int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
|
|
- int entity_id, bool validate);
|
|
+int snd_usb_clock_find_source(struct snd_usb_audio *chip,
|
|
+ struct audioformat *fmt, bool validate);
|
|
|
|
#endif /* __USBAUDIO_CLOCK_H */
|
|
diff --git a/sound/usb/format.c b/sound/usb/format.c
|
|
index d79db71305f6..25668ba5e68e 100644
|
|
--- a/sound/usb/format.c
|
|
+++ b/sound/usb/format.c
|
|
@@ -322,8 +322,7 @@ static int parse_audio_format_rates_v2v3(struct snd_usb_audio *chip,
|
|
struct usb_device *dev = chip->dev;
|
|
unsigned char tmp[2], *data;
|
|
int nr_triplets, data_size, ret = 0, ret_l6;
|
|
- int clock = snd_usb_clock_find_source(chip, fp->protocol,
|
|
- fp->clock, false);
|
|
+ int clock = snd_usb_clock_find_source(chip, fp, false);
|
|
|
|
if (clock < 0) {
|
|
dev_err(&dev->dev,
|
|
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
|
|
index 6cd4ff09c5ee..d2a050bb8341 100644
|
|
--- a/sound/usb/mixer.c
|
|
+++ b/sound/usb/mixer.c
|
|
@@ -897,6 +897,15 @@ static int parse_term_proc_unit(struct mixer_build *state,
|
|
return 0;
|
|
}
|
|
|
|
+static int parse_term_effect_unit(struct mixer_build *state,
|
|
+ struct usb_audio_term *term,
|
|
+ void *p1, int id)
|
|
+{
|
|
+ term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */
|
|
+ term->id = id;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int parse_term_uac2_clock_source(struct mixer_build *state,
|
|
struct usb_audio_term *term,
|
|
void *p1, int id)
|
|
@@ -981,8 +990,7 @@ static int __check_input_term(struct mixer_build *state, int id,
|
|
UAC3_PROCESSING_UNIT);
|
|
case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT):
|
|
case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT):
|
|
- return parse_term_proc_unit(state, term, p1, id,
|
|
- UAC3_EFFECT_UNIT);
|
|
+ return parse_term_effect_unit(state, term, p1, id);
|
|
case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT):
|
|
case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2):
|
|
case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT):
|
|
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
|
|
index 82184036437b..1ed25b1d2a6a 100644
|
|
--- a/sound/usb/quirks.c
|
|
+++ b/sound/usb/quirks.c
|
|
@@ -1402,6 +1402,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
|
|
case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
|
|
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
|
|
case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
|
|
+ case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
|
|
return true;
|
|
}
|
|
|
|
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
|
|
index 2c41d47f6f83..90d23cc3c8d4 100644
|
|
--- a/tools/perf/util/stat-shadow.c
|
|
+++ b/tools/perf/util/stat-shadow.c
|
|
@@ -18,7 +18,6 @@
|
|
* AGGR_NONE: Use matching CPU
|
|
* AGGR_THREAD: Not supported?
|
|
*/
|
|
-static bool have_frontend_stalled;
|
|
|
|
struct runtime_stat rt_stat;
|
|
struct stats walltime_nsecs_stats;
|
|
@@ -144,7 +143,6 @@ void runtime_stat__exit(struct runtime_stat *st)
|
|
|
|
void perf_stat__init_shadow_stats(void)
|
|
{
|
|
- have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
|
|
runtime_stat__init(&rt_stat);
|
|
}
|
|
|
|
@@ -853,10 +851,6 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
|
|
print_metric(config, ctxp, NULL, "%7.2f ",
|
|
"stalled cycles per insn",
|
|
ratio);
|
|
- } else if (have_frontend_stalled) {
|
|
- out->new_line(config, ctxp);
|
|
- print_metric(config, ctxp, NULL, "%7.2f ",
|
|
- "stalled cycles per insn", 0);
|
|
}
|
|
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
|
|
if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
|