mirror of
https://github.com/Fishwaldo/build.git
synced 2025-07-22 12:58:30 +00:00
11789 lines
393 KiB
Diff
11789 lines
393 KiB
Diff
diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst
|
||
index 9443fcef18760..f37d0743fd668 100644
|
||
--- a/Documentation/admin-guide/ext4.rst
|
||
+++ b/Documentation/admin-guide/ext4.rst
|
||
@@ -482,6 +482,9 @@ Files in /sys/fs/ext4/<devname>:
|
||
multiple of this tuning parameter if the stripe size is not set in the
|
||
ext4 superblock
|
||
|
||
+ mb_max_inode_prealloc
|
||
+ The maximum length of per-inode ext4_prealloc_space list.
|
||
+
|
||
mb_max_to_scan
|
||
The maximum number of extents the multiblock allocator will search to
|
||
find the best extent.
|
||
diff --git a/Makefile b/Makefile
|
||
index f47073a3b4740..5cf35650373b1 100644
|
||
--- a/Makefile
|
||
+++ b/Makefile
|
||
@@ -1,7 +1,7 @@
|
||
# SPDX-License-Identifier: GPL-2.0
|
||
VERSION = 5
|
||
PATCHLEVEL = 8
|
||
-SUBLEVEL = 5
|
||
+SUBLEVEL = 6
|
||
EXTRAVERSION =
|
||
NAME = Kleptomaniac Octopus
|
||
|
||
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
|
||
index 760a68c163c83..b2ff27af090ec 100644
|
||
--- a/arch/arm/boot/dts/ls1021a.dtsi
|
||
+++ b/arch/arm/boot/dts/ls1021a.dtsi
|
||
@@ -772,7 +772,7 @@
|
||
fsl,tmr-prsc = <2>;
|
||
fsl,tmr-add = <0xaaaaaaab>;
|
||
fsl,tmr-fiper1 = <999999995>;
|
||
- fsl,tmr-fiper2 = <99990>;
|
||
+ fsl,tmr-fiper2 = <999999995>;
|
||
fsl,max-adj = <499999999>;
|
||
fsl,extts-fifo;
|
||
};
|
||
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
|
||
index 91e377770a6b8..d5fe7c9e0be1d 100644
|
||
--- a/arch/arm64/Makefile
|
||
+++ b/arch/arm64/Makefile
|
||
@@ -158,7 +158,8 @@ zinstall install:
|
||
PHONY += vdso_install
|
||
vdso_install:
|
||
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
|
||
- $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@
|
||
+ $(if $(CONFIG_COMPAT_VDSO), \
|
||
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
|
||
|
||
# We use MRPROPER_FILES and CLEAN_FILES now
|
||
archclean:
|
||
diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
|
||
index 5785bf0a807ce..591f48a575353 100644
|
||
--- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
|
||
+++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
|
||
@@ -569,7 +569,7 @@
|
||
pins = "gpio63", "gpio64", "gpio65", "gpio66",
|
||
"gpio67", "gpio68";
|
||
drive-strength = <2>;
|
||
- bias-disable;
|
||
+ bias-pull-down;
|
||
};
|
||
};
|
||
};
|
||
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
|
||
index 51c1d99189992..1da8e3dc44555 100644
|
||
--- a/arch/arm64/include/asm/kvm_arm.h
|
||
+++ b/arch/arm64/include/asm/kvm_arm.h
|
||
@@ -71,11 +71,12 @@
|
||
* IMO: Override CPSR.I and enable signaling with VI
|
||
* FMO: Override CPSR.F and enable signaling with VF
|
||
* SWIO: Turn set/way invalidates into set/way clean+invalidate
|
||
+ * PTW: Take a stage2 fault if a stage1 walk steps in device memory
|
||
*/
|
||
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
|
||
HCR_BSU_IS | HCR_FB | HCR_TAC | \
|
||
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
|
||
- HCR_FMO | HCR_IMO)
|
||
+ HCR_FMO | HCR_IMO | HCR_PTW )
|
||
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
|
||
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
|
||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
||
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
|
||
index a0c8a0b652593..0eadbf933e359 100644
|
||
--- a/arch/arm64/include/asm/smp.h
|
||
+++ b/arch/arm64/include/asm/smp.h
|
||
@@ -46,7 +46,12 @@ DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
|
||
* Logical CPU mapping.
|
||
*/
|
||
extern u64 __cpu_logical_map[NR_CPUS];
|
||
-#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
|
||
+extern u64 cpu_logical_map(int cpu);
|
||
+
|
||
+static inline void set_cpu_logical_map(int cpu, u64 hwid)
|
||
+{
|
||
+ __cpu_logical_map[cpu] = hwid;
|
||
+}
|
||
|
||
struct seq_file;
|
||
|
||
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
|
||
index 79728bfb5351f..2c0b82db825ba 100644
|
||
--- a/arch/arm64/kernel/cpu_errata.c
|
||
+++ b/arch/arm64/kernel/cpu_errata.c
|
||
@@ -910,6 +910,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||
.desc = "ARM erratum 1418040",
|
||
.capability = ARM64_WORKAROUND_1418040,
|
||
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
|
||
+ .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU |
|
||
+ ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU),
|
||
},
|
||
#endif
|
||
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
|
||
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
|
||
index 35de8ba60e3d5..44445d471442d 100644
|
||
--- a/arch/arm64/kernel/entry.S
|
||
+++ b/arch/arm64/kernel/entry.S
|
||
@@ -169,19 +169,6 @@ alternative_cb_end
|
||
stp x28, x29, [sp, #16 * 14]
|
||
|
||
.if \el == 0
|
||
- .if \regsize == 32
|
||
- /*
|
||
- * If we're returning from a 32-bit task on a system affected by
|
||
- * 1418040 then re-enable userspace access to the virtual counter.
|
||
- */
|
||
-#ifdef CONFIG_ARM64_ERRATUM_1418040
|
||
-alternative_if ARM64_WORKAROUND_1418040
|
||
- mrs x0, cntkctl_el1
|
||
- orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
|
||
- msr cntkctl_el1, x0
|
||
-alternative_else_nop_endif
|
||
-#endif
|
||
- .endif
|
||
clear_gp_regs
|
||
mrs x21, sp_el0
|
||
ldr_this_cpu tsk, __entry_task, x20
|
||
@@ -337,14 +324,6 @@ alternative_else_nop_endif
|
||
tst x22, #PSR_MODE32_BIT // native task?
|
||
b.eq 3f
|
||
|
||
-#ifdef CONFIG_ARM64_ERRATUM_1418040
|
||
-alternative_if ARM64_WORKAROUND_1418040
|
||
- mrs x0, cntkctl_el1
|
||
- bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
|
||
- msr cntkctl_el1, x0
|
||
-alternative_else_nop_endif
|
||
-#endif
|
||
-
|
||
#ifdef CONFIG_ARM64_ERRATUM_845719
|
||
alternative_if ARM64_WORKAROUND_845719
|
||
#ifdef CONFIG_PID_IN_CONTEXTIDR
|
||
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
|
||
index 6089638c7d43f..d8a10cf28f827 100644
|
||
--- a/arch/arm64/kernel/process.c
|
||
+++ b/arch/arm64/kernel/process.c
|
||
@@ -515,6 +515,39 @@ static void entry_task_switch(struct task_struct *next)
|
||
__this_cpu_write(__entry_task, next);
|
||
}
|
||
|
||
+/*
|
||
+ * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
|
||
+ * Assuming the virtual counter is enabled at the beginning of times:
|
||
+ *
|
||
+ * - disable access when switching from a 64bit task to a 32bit task
|
||
+ * - enable access when switching from a 32bit task to a 64bit task
|
||
+ */
|
||
+static void erratum_1418040_thread_switch(struct task_struct *prev,
|
||
+ struct task_struct *next)
|
||
+{
|
||
+ bool prev32, next32;
|
||
+ u64 val;
|
||
+
|
||
+ if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
|
||
+ cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
|
||
+ return;
|
||
+
|
||
+ prev32 = is_compat_thread(task_thread_info(prev));
|
||
+ next32 = is_compat_thread(task_thread_info(next));
|
||
+
|
||
+ if (prev32 == next32)
|
||
+ return;
|
||
+
|
||
+ val = read_sysreg(cntkctl_el1);
|
||
+
|
||
+ if (!next32)
|
||
+ val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
|
||
+ else
|
||
+ val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
|
||
+
|
||
+ write_sysreg(val, cntkctl_el1);
|
||
+}
|
||
+
|
||
/*
|
||
* Thread switching.
|
||
*/
|
||
@@ -530,6 +563,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
|
||
entry_task_switch(next);
|
||
uao_thread_switch(next);
|
||
ssbs_thread_switch(next);
|
||
+ erratum_1418040_thread_switch(prev, next);
|
||
|
||
/*
|
||
* Complete any pending TLB or cache maintenance on this CPU in case
|
||
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
|
||
index 93b3844cf4421..07b7940951e28 100644
|
||
--- a/arch/arm64/kernel/setup.c
|
||
+++ b/arch/arm64/kernel/setup.c
|
||
@@ -85,7 +85,7 @@ u64 __cacheline_aligned boot_args[4];
|
||
void __init smp_setup_processor_id(void)
|
||
{
|
||
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
|
||
- cpu_logical_map(0) = mpidr;
|
||
+ set_cpu_logical_map(0, mpidr);
|
||
|
||
/*
|
||
* clear __my_cpu_offset on boot CPU to avoid hang caused by
|
||
@@ -276,6 +276,12 @@ arch_initcall(reserve_memblock_reserved_regions);
|
||
|
||
u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
|
||
|
||
+u64 cpu_logical_map(int cpu)
|
||
+{
|
||
+ return __cpu_logical_map[cpu];
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(cpu_logical_map);
|
||
+
|
||
void __init setup_arch(char **cmdline_p)
|
||
{
|
||
init_mm.start_code = (unsigned long) _text;
|
||
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
|
||
index e43a8ff19f0f6..8cd6316a0d833 100644
|
||
--- a/arch/arm64/kernel/smp.c
|
||
+++ b/arch/arm64/kernel/smp.c
|
||
@@ -567,7 +567,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
|
||
return;
|
||
|
||
/* map the logical cpu id to cpu MPIDR */
|
||
- cpu_logical_map(cpu_count) = hwid;
|
||
+ set_cpu_logical_map(cpu_count, hwid);
|
||
|
||
cpu_madt_gicc[cpu_count] = *processor;
|
||
|
||
@@ -681,7 +681,7 @@ static void __init of_parse_and_init_cpus(void)
|
||
goto next;
|
||
|
||
pr_debug("cpu logical map 0x%llx\n", hwid);
|
||
- cpu_logical_map(cpu_count) = hwid;
|
||
+ set_cpu_logical_map(cpu_count, hwid);
|
||
|
||
early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
|
||
next:
|
||
@@ -722,7 +722,7 @@ void __init smp_init_cpus(void)
|
||
for (i = 1; i < nr_cpu_ids; i++) {
|
||
if (cpu_logical_map(i) != INVALID_HWID) {
|
||
if (smp_cpu_setup(i))
|
||
- cpu_logical_map(i) = INVALID_HWID;
|
||
+ set_cpu_logical_map(i, INVALID_HWID);
|
||
}
|
||
}
|
||
}
|
||
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
|
||
index db1c4487d95d1..9270b14157b55 100644
|
||
--- a/arch/arm64/kvm/hyp/switch.c
|
||
+++ b/arch/arm64/kvm/hyp/switch.c
|
||
@@ -897,7 +897,7 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
|
||
* making sure it is a kernel address and not a PC-relative
|
||
* reference.
|
||
*/
|
||
- asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
|
||
+ asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
|
||
|
||
__hyp_do_panic(str_va,
|
||
spsr, elr,
|
||
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
|
||
index a7e40bb1e5bc6..c43ad3b3cea4b 100644
|
||
--- a/arch/mips/Kconfig
|
||
+++ b/arch/mips/Kconfig
|
||
@@ -2203,6 +2203,7 @@ endchoice
|
||
|
||
config KVM_GUEST
|
||
bool "KVM Guest Kernel"
|
||
+ depends on CPU_MIPS32_R2
|
||
depends on BROKEN_ON_SMP
|
||
help
|
||
Select this option if building a guest kernel for KVM (Trap & Emulate)
|
||
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
|
||
index 2bf02d849a3a8..032b3fca6cbba 100644
|
||
--- a/arch/mips/kvm/Kconfig
|
||
+++ b/arch/mips/kvm/Kconfig
|
||
@@ -37,10 +37,11 @@ choice
|
||
|
||
config KVM_MIPS_TE
|
||
bool "Trap & Emulate"
|
||
+ depends on CPU_MIPS32_R2
|
||
help
|
||
Use trap and emulate to virtualize 32-bit guests in user mode. This
|
||
does not require any special hardware Virtualization support beyond
|
||
- standard MIPS32/64 r2 or later, but it does require the guest kernel
|
||
+ standard MIPS32 r2 or later, but it does require the guest kernel
|
||
to be configured with CONFIG_KVM_GUEST=y so that it resides in the
|
||
user address segment.
|
||
|
||
diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c
|
||
index be57b832bbe0a..ccba50ec8a40e 100644
|
||
--- a/arch/mips/vdso/genvdso.c
|
||
+++ b/arch/mips/vdso/genvdso.c
|
||
@@ -122,6 +122,7 @@ static void *map_vdso(const char *path, size_t *_size)
|
||
if (fstat(fd, &stat) != 0) {
|
||
fprintf(stderr, "%s: Failed to stat '%s': %s\n", program_name,
|
||
path, strerror(errno));
|
||
+ close(fd);
|
||
return NULL;
|
||
}
|
||
|
||
@@ -130,6 +131,7 @@ static void *map_vdso(const char *path, size_t *_size)
|
||
if (addr == MAP_FAILED) {
|
||
fprintf(stderr, "%s: Failed to map '%s': %s\n", program_name,
|
||
path, strerror(errno));
|
||
+ close(fd);
|
||
return NULL;
|
||
}
|
||
|
||
@@ -139,6 +141,7 @@ static void *map_vdso(const char *path, size_t *_size)
|
||
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
|
||
fprintf(stderr, "%s: '%s' is not an ELF file\n", program_name,
|
||
path);
|
||
+ close(fd);
|
||
return NULL;
|
||
}
|
||
|
||
@@ -150,6 +153,7 @@ static void *map_vdso(const char *path, size_t *_size)
|
||
default:
|
||
fprintf(stderr, "%s: '%s' has invalid ELF class\n",
|
||
program_name, path);
|
||
+ close(fd);
|
||
return NULL;
|
||
}
|
||
|
||
@@ -161,6 +165,7 @@ static void *map_vdso(const char *path, size_t *_size)
|
||
default:
|
||
fprintf(stderr, "%s: '%s' has invalid ELF data order\n",
|
||
program_name, path);
|
||
+ close(fd);
|
||
return NULL;
|
||
}
|
||
|
||
@@ -168,15 +173,18 @@ static void *map_vdso(const char *path, size_t *_size)
|
||
fprintf(stderr,
|
||
"%s: '%s' has invalid ELF machine (expected EM_MIPS)\n",
|
||
program_name, path);
|
||
+ close(fd);
|
||
return NULL;
|
||
} else if (swap_uint16(ehdr->e_type) != ET_DYN) {
|
||
fprintf(stderr,
|
||
"%s: '%s' has invalid ELF type (expected ET_DYN)\n",
|
||
program_name, path);
|
||
+ close(fd);
|
||
return NULL;
|
||
}
|
||
|
||
*_size = stat.st_size;
|
||
+ close(fd);
|
||
return addr;
|
||
}
|
||
|
||
@@ -293,10 +301,12 @@ int main(int argc, char **argv)
|
||
/* Calculate and write symbol offsets to <output file> */
|
||
if (!get_symbols(dbg_vdso_path, dbg_vdso)) {
|
||
unlink(out_path);
|
||
+ fclose(out_file);
|
||
return EXIT_FAILURE;
|
||
}
|
||
|
||
fprintf(out_file, "};\n");
|
||
+ fclose(out_file);
|
||
|
||
return EXIT_SUCCESS;
|
||
}
|
||
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
|
||
index 01d70280d2872..c6f9d75283813 100644
|
||
--- a/arch/powerpc/perf/core-book3s.c
|
||
+++ b/arch/powerpc/perf/core-book3s.c
|
||
@@ -1517,9 +1517,16 @@ nocheck:
|
||
ret = 0;
|
||
out:
|
||
if (has_branch_stack(event)) {
|
||
- power_pmu_bhrb_enable(event);
|
||
- cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
|
||
- event->attr.branch_sample_type);
|
||
+ u64 bhrb_filter = -1;
|
||
+
|
||
+ if (ppmu->bhrb_filter_map)
|
||
+ bhrb_filter = ppmu->bhrb_filter_map(
|
||
+ event->attr.branch_sample_type);
|
||
+
|
||
+ if (bhrb_filter != -1) {
|
||
+ cpuhw->bhrb_filter = bhrb_filter;
|
||
+ power_pmu_bhrb_enable(event);
|
||
+ }
|
||
}
|
||
|
||
perf_pmu_enable(event->pmu);
|
||
@@ -1841,7 +1848,6 @@ static int power_pmu_event_init(struct perf_event *event)
|
||
int n;
|
||
int err;
|
||
struct cpu_hw_events *cpuhw;
|
||
- u64 bhrb_filter;
|
||
|
||
if (!ppmu)
|
||
return -ENOENT;
|
||
@@ -1947,7 +1953,10 @@ static int power_pmu_event_init(struct perf_event *event)
|
||
err = power_check_constraints(cpuhw, events, cflags, n + 1);
|
||
|
||
if (has_branch_stack(event)) {
|
||
- bhrb_filter = ppmu->bhrb_filter_map(
|
||
+ u64 bhrb_filter = -1;
|
||
+
|
||
+ if (ppmu->bhrb_filter_map)
|
||
+ bhrb_filter = ppmu->bhrb_filter_map(
|
||
event->attr.branch_sample_type);
|
||
|
||
if (bhrb_filter == -1) {
|
||
@@ -2101,6 +2110,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
||
|
||
if (perf_event_overflow(event, &data, regs))
|
||
power_pmu_stop(event, 0);
|
||
+ } else if (period) {
|
||
+ /* Account for interrupt in case of invalid SIAR */
|
||
+ if (perf_event_account_interrupt(event))
|
||
+ power_pmu_stop(event, 0);
|
||
}
|
||
}
|
||
|
||
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
|
||
index 87737ec86d39a..1dc9d3c818726 100644
|
||
--- a/arch/powerpc/platforms/Kconfig.cputype
|
||
+++ b/arch/powerpc/platforms/Kconfig.cputype
|
||
@@ -36,7 +36,7 @@ config PPC_BOOK3S_6xx
|
||
select PPC_HAVE_PMU_SUPPORT
|
||
select PPC_HAVE_KUEP
|
||
select PPC_HAVE_KUAP
|
||
- select HAVE_ARCH_VMAP_STACK
|
||
+ select HAVE_ARCH_VMAP_STACK if !ADB_PMU
|
||
|
||
config PPC_BOOK3S_601
|
||
bool "PowerPC 601"
|
||
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
|
||
index 0f7c8241912b9..f2ff359041eec 100644
|
||
--- a/arch/powerpc/platforms/cell/Kconfig
|
||
+++ b/arch/powerpc/platforms/cell/Kconfig
|
||
@@ -44,6 +44,7 @@ config SPU_FS
|
||
tristate "SPU file system"
|
||
default m
|
||
depends on PPC_CELL
|
||
+ depends on COREDUMP
|
||
select SPU_BASE
|
||
help
|
||
The SPU file system is used to access Synergistic Processing
|
||
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
|
||
index 71b881e554fcb..cb58ec7ce77ac 100644
|
||
--- a/arch/powerpc/sysdev/xive/native.c
|
||
+++ b/arch/powerpc/sysdev/xive/native.c
|
||
@@ -18,6 +18,7 @@
|
||
#include <linux/delay.h>
|
||
#include <linux/cpumask.h>
|
||
#include <linux/mm.h>
|
||
+#include <linux/kmemleak.h>
|
||
|
||
#include <asm/machdep.h>
|
||
#include <asm/prom.h>
|
||
@@ -647,6 +648,7 @@ static bool xive_native_provision_pages(void)
|
||
pr_err("Failed to allocate provisioning page\n");
|
||
return false;
|
||
}
|
||
+ kmemleak_ignore(p);
|
||
opal_xive_donate_page(chip, __pa(p));
|
||
}
|
||
return true;
|
||
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
|
||
index dae32d948bf25..f8a56b5dc29fe 100644
|
||
--- a/arch/x86/kernel/apic/vector.c
|
||
+++ b/arch/x86/kernel/apic/vector.c
|
||
@@ -161,6 +161,7 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
|
||
apicd->move_in_progress = true;
|
||
apicd->prev_vector = apicd->vector;
|
||
apicd->prev_cpu = apicd->cpu;
|
||
+ WARN_ON_ONCE(apicd->cpu == newcpu);
|
||
} else {
|
||
irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
|
||
managed);
|
||
@@ -910,7 +911,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
|
||
__send_cleanup_vector(apicd);
|
||
}
|
||
|
||
-static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
|
||
+void irq_complete_move(struct irq_cfg *cfg)
|
||
{
|
||
struct apic_chip_data *apicd;
|
||
|
||
@@ -918,15 +919,16 @@ static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
|
||
if (likely(!apicd->move_in_progress))
|
||
return;
|
||
|
||
- if (vector == apicd->vector && apicd->cpu == smp_processor_id())
|
||
+ /*
|
||
+ * If the interrupt arrived on the new target CPU, cleanup the
|
||
+ * vector on the old target CPU. A vector check is not required
|
||
+ * because an interrupt can never move from one vector to another
|
||
+ * on the same CPU.
|
||
+ */
|
||
+ if (apicd->cpu == smp_processor_id())
|
||
__send_cleanup_vector(apicd);
|
||
}
|
||
|
||
-void irq_complete_move(struct irq_cfg *cfg)
|
||
-{
|
||
- __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
|
||
-}
|
||
-
|
||
/*
|
||
* Called from fixup_irqs() with @desc->lock held and interrupts disabled.
|
||
*/
|
||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
|
||
index 518ac6bf752e0..9fb6a8655ddf3 100644
|
||
--- a/arch/x86/kernel/smpboot.c
|
||
+++ b/arch/x86/kernel/smpboot.c
|
||
@@ -1604,14 +1604,28 @@ int native_cpu_disable(void)
|
||
if (ret)
|
||
return ret;
|
||
|
||
- /*
|
||
- * Disable the local APIC. Otherwise IPI broadcasts will reach
|
||
- * it. It still responds normally to INIT, NMI, SMI, and SIPI
|
||
- * messages.
|
||
- */
|
||
- apic_soft_disable();
|
||
cpu_disable_common();
|
||
|
||
+ /*
|
||
+ * Disable the local APIC. Otherwise IPI broadcasts will reach
|
||
+ * it. It still responds normally to INIT, NMI, SMI, and SIPI
|
||
+ * messages.
|
||
+ *
|
||
+ * Disabling the APIC must happen after cpu_disable_common()
|
||
+ * which invokes fixup_irqs().
|
||
+ *
|
||
+ * Disabling the APIC preserves already set bits in IRR, but
|
||
+ * an interrupt arriving after disabling the local APIC does not
|
||
+ * set the corresponding IRR bit.
|
||
+ *
|
||
+ * fixup_irqs() scans IRR for set bits so it can raise a not
|
||
+ * yet handled interrupt on the new destination CPU via an IPI
|
||
+ * but obviously it can't do so for IRR bits which are not set.
|
||
+ * IOW, interrupts arriving after disabling the local APIC will
|
||
+ * be lost.
|
||
+ */
|
||
+ apic_soft_disable();
|
||
+
|
||
return 0;
|
||
}
|
||
|
||
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
|
||
index 68882b9b8f11f..b791e2041e49b 100644
|
||
--- a/block/bfq-cgroup.c
|
||
+++ b/block/bfq-cgroup.c
|
||
@@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg)
|
||
kfree(bfqg);
|
||
}
|
||
|
||
-void bfqg_and_blkg_get(struct bfq_group *bfqg)
|
||
+static void bfqg_and_blkg_get(struct bfq_group *bfqg)
|
||
{
|
||
/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
|
||
bfqg_get(bfqg);
|
||
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
|
||
index cd224aaf9f52a..703895224562c 100644
|
||
--- a/block/bfq-iosched.h
|
||
+++ b/block/bfq-iosched.h
|
||
@@ -986,7 +986,6 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
|
||
struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
|
||
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
|
||
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
|
||
-void bfqg_and_blkg_get(struct bfq_group *bfqg);
|
||
void bfqg_and_blkg_put(struct bfq_group *bfqg);
|
||
|
||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
|
||
index eb0e2a6daabe6..26776bdbdf360 100644
|
||
--- a/block/bfq-wf2q.c
|
||
+++ b/block/bfq-wf2q.c
|
||
@@ -533,9 +533,7 @@ static void bfq_get_entity(struct bfq_entity *entity)
|
||
bfqq->ref++;
|
||
bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
|
||
bfqq, bfqq->ref);
|
||
- } else
|
||
- bfqg_and_blkg_get(container_of(entity, struct bfq_group,
|
||
- entity));
|
||
+ }
|
||
}
|
||
|
||
/**
|
||
@@ -649,14 +647,8 @@ static void bfq_forget_entity(struct bfq_service_tree *st,
|
||
|
||
entity->on_st_or_in_serv = false;
|
||
st->wsum -= entity->weight;
|
||
- if (is_in_service)
|
||
- return;
|
||
-
|
||
- if (bfqq)
|
||
+ if (bfqq && !is_in_service)
|
||
bfq_put_queue(bfqq);
|
||
- else
|
||
- bfqg_and_blkg_put(container_of(entity, struct bfq_group,
|
||
- entity));
|
||
}
|
||
|
||
/**
|
||
diff --git a/block/bio.c b/block/bio.c
|
||
index a7366c02c9b57..b1883adc8f154 100644
|
||
--- a/block/bio.c
|
||
+++ b/block/bio.c
|
||
@@ -738,8 +738,8 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
|
||
struct page *page, unsigned int len, unsigned int off,
|
||
bool *same_page)
|
||
{
|
||
- phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
|
||
- bv->bv_offset + bv->bv_len - 1;
|
||
+ size_t bv_end = bv->bv_offset + bv->bv_len;
|
||
+ phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
|
||
phys_addr_t page_addr = page_to_phys(page);
|
||
|
||
if (vec_end_addr + 1 != page_addr + off)
|
||
@@ -748,9 +748,9 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
|
||
return false;
|
||
|
||
*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
|
||
- if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
|
||
- return false;
|
||
- return true;
|
||
+ if (*same_page)
|
||
+ return true;
|
||
+ return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
|
||
}
|
||
|
||
/*
|
||
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
|
||
index 0ecc897b225c9..6e8f5e60b0982 100644
|
||
--- a/block/blk-cgroup.c
|
||
+++ b/block/blk-cgroup.c
|
||
@@ -1056,13 +1056,15 @@ int blkcg_init_queue(struct request_queue *q)
|
||
if (preloaded)
|
||
radix_tree_preload_end();
|
||
|
||
- ret = blk_iolatency_init(q);
|
||
+ ret = blk_throtl_init(q);
|
||
if (ret)
|
||
goto err_destroy_all;
|
||
|
||
- ret = blk_throtl_init(q);
|
||
- if (ret)
|
||
+ ret = blk_iolatency_init(q);
|
||
+ if (ret) {
|
||
+ blk_throtl_exit(q);
|
||
goto err_destroy_all;
|
||
+ }
|
||
return 0;
|
||
|
||
err_destroy_all:
|
||
diff --git a/block/blk-merge.c b/block/blk-merge.c
|
||
index f0b0bae075a0c..75abba4d4591c 100644
|
||
--- a/block/blk-merge.c
|
||
+++ b/block/blk-merge.c
|
||
@@ -154,7 +154,7 @@ static inline unsigned get_max_io_size(struct request_queue *q,
|
||
if (max_sectors > start_offset)
|
||
return max_sectors - start_offset;
|
||
|
||
- return sectors & (lbs - 1);
|
||
+ return sectors & ~(lbs - 1);
|
||
}
|
||
|
||
static inline unsigned get_max_segment_size(const struct request_queue *q,
|
||
@@ -534,10 +534,17 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
||
}
|
||
EXPORT_SYMBOL(__blk_rq_map_sg);
|
||
|
||
+static inline unsigned int blk_rq_get_max_segments(struct request *rq)
|
||
+{
|
||
+ if (req_op(rq) == REQ_OP_DISCARD)
|
||
+ return queue_max_discard_segments(rq->q);
|
||
+ return queue_max_segments(rq->q);
|
||
+}
|
||
+
|
||
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
|
||
unsigned int nr_phys_segs)
|
||
{
|
||
- if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
|
||
+ if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
|
||
goto no_merge;
|
||
|
||
if (blk_integrity_merge_bio(req->q, req, bio) == false)
|
||
@@ -625,7 +632,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
|
||
return 0;
|
||
|
||
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
|
||
- if (total_phys_segments > queue_max_segments(q))
|
||
+ if (total_phys_segments > blk_rq_get_max_segments(req))
|
||
return 0;
|
||
|
||
if (blk_integrity_merge_rq(q, req, next) == false)
|
||
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
|
||
index fdcc2c1dd1788..fd850d9e68a1a 100644
|
||
--- a/block/blk-mq-sched.c
|
||
+++ b/block/blk-mq-sched.c
|
||
@@ -77,6 +77,15 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
|
||
return;
|
||
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
||
|
||
+ /*
|
||
+ * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
|
||
+ * in blk_mq_run_hw_queue(). Its pair is the barrier in
|
||
+ * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
|
||
+ * meantime new request added to hctx->dispatch is missed to check in
|
||
+ * blk_mq_run_hw_queue().
|
||
+ */
|
||
+ smp_mb();
|
||
+
|
||
blk_mq_run_hw_queue(hctx, true);
|
||
}
|
||
|
||
diff --git a/block/blk-mq.c b/block/blk-mq.c
|
||
index 4e0d173beaa35..a366726094a89 100644
|
||
--- a/block/blk-mq.c
|
||
+++ b/block/blk-mq.c
|
||
@@ -1323,6 +1323,15 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
||
list_splice_tail_init(list, &hctx->dispatch);
|
||
spin_unlock(&hctx->lock);
|
||
|
||
+ /*
|
||
+ * Order adding requests to hctx->dispatch and checking
|
||
+ * SCHED_RESTART flag. The pair of this smp_mb() is the one
|
||
+ * in blk_mq_sched_restart(). Avoid restart code path to
|
||
+ * miss the new added requests to hctx->dispatch, meantime
|
||
+ * SCHED_RESTART is observed here.
|
||
+ */
|
||
+ smp_mb();
|
||
+
|
||
/*
|
||
* If SCHED_RESTART was set by the caller of this function and
|
||
* it is no longer set that means that it was cleared by another
|
||
@@ -1909,7 +1918,8 @@ insert:
|
||
if (bypass_insert)
|
||
return BLK_STS_RESOURCE;
|
||
|
||
- blk_mq_request_bypass_insert(rq, false, run_queue);
|
||
+ blk_mq_sched_insert_request(rq, false, run_queue, false);
|
||
+
|
||
return BLK_STS_OK;
|
||
}
|
||
|
||
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
|
||
index 5882ed46f1adb..e31cf43df2e09 100644
|
||
--- a/crypto/af_alg.c
|
||
+++ b/crypto/af_alg.c
|
||
@@ -16,6 +16,7 @@
|
||
#include <linux/module.h>
|
||
#include <linux/net.h>
|
||
#include <linux/rwsem.h>
|
||
+#include <linux/sched.h>
|
||
#include <linux/sched/signal.h>
|
||
#include <linux/security.h>
|
||
|
||
@@ -847,9 +848,15 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||
}
|
||
|
||
lock_sock(sk);
|
||
- if (ctx->init && (init || !ctx->more)) {
|
||
- err = -EINVAL;
|
||
- goto unlock;
|
||
+ if (ctx->init && !ctx->more) {
|
||
+ if (ctx->used) {
|
||
+ err = -EINVAL;
|
||
+ goto unlock;
|
||
+ }
|
||
+
|
||
+ pr_info_once(
|
||
+ "%s sent an empty control message without MSG_MORE.\n",
|
||
+ current->comm);
|
||
}
|
||
ctx->init = true;
|
||
|
||
diff --git a/drivers/base/core.c b/drivers/base/core.c
|
||
index 05d414e9e8a40..0799e1445f654 100644
|
||
--- a/drivers/base/core.c
|
||
+++ b/drivers/base/core.c
|
||
@@ -3988,9 +3988,9 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
|
||
*/
|
||
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
|
||
{
|
||
- if (fwnode) {
|
||
- struct fwnode_handle *fn = dev->fwnode;
|
||
+ struct fwnode_handle *fn = dev->fwnode;
|
||
|
||
+ if (fwnode) {
|
||
if (fwnode_is_primary(fn))
|
||
fn = fn->secondary;
|
||
|
||
@@ -4000,8 +4000,12 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
|
||
}
|
||
dev->fwnode = fwnode;
|
||
} else {
|
||
- dev->fwnode = fwnode_is_primary(dev->fwnode) ?
|
||
- dev->fwnode->secondary : NULL;
|
||
+ if (fwnode_is_primary(fn)) {
|
||
+ dev->fwnode = fn->secondary;
|
||
+ fn->secondary = NULL;
|
||
+ } else {
|
||
+ dev->fwnode = NULL;
|
||
+ }
|
||
}
|
||
}
|
||
EXPORT_SYMBOL_GPL(set_primary_fwnode);
|
||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
|
||
index 9dd85bea40260..205a06752ca90 100644
|
||
--- a/drivers/base/power/main.c
|
||
+++ b/drivers/base/power/main.c
|
||
@@ -1606,13 +1606,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||
}
|
||
|
||
/*
|
||
- * If a device configured to wake up the system from sleep states
|
||
- * has been suspended at run time and there's a resume request pending
|
||
- * for it, this is equivalent to the device signaling wakeup, so the
|
||
- * system suspend operation should be aborted.
|
||
+ * Wait for possible runtime PM transitions of the device in progress
|
||
+ * to complete and if there's a runtime resume request pending for it,
|
||
+ * resume it before proceeding with invoking the system-wide suspend
|
||
+ * callbacks for it.
|
||
+ *
|
||
+ * If the system-wide suspend callbacks below change the configuration
|
||
+ * of the device, they must disable runtime PM for it or otherwise
|
||
+ * ensure that its runtime-resume callbacks will not be confused by that
|
||
+ * change in case they are invoked going forward.
|
||
*/
|
||
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
|
||
- pm_wakeup_event(dev, 0);
|
||
+ pm_runtime_barrier(dev);
|
||
|
||
if (pm_wakeup_pending()) {
|
||
dev->power.direct_complete = false;
|
||
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
|
||
index 776083963ee6c..84433922aed16 100644
|
||
--- a/drivers/block/loop.c
|
||
+++ b/drivers/block/loop.c
|
||
@@ -877,6 +877,7 @@ static void loop_config_discard(struct loop_device *lo)
|
||
struct file *file = lo->lo_backing_file;
|
||
struct inode *inode = file->f_mapping->host;
|
||
struct request_queue *q = lo->lo_queue;
|
||
+ u32 granularity, max_discard_sectors;
|
||
|
||
/*
|
||
* If the backing device is a block device, mirror its zeroing
|
||
@@ -889,11 +890,10 @@ static void loop_config_discard(struct loop_device *lo)
|
||
struct request_queue *backingq;
|
||
|
||
backingq = bdev_get_queue(inode->i_bdev);
|
||
- blk_queue_max_discard_sectors(q,
|
||
- backingq->limits.max_write_zeroes_sectors);
|
||
|
||
- blk_queue_max_write_zeroes_sectors(q,
|
||
- backingq->limits.max_write_zeroes_sectors);
|
||
+ max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
|
||
+ granularity = backingq->limits.discard_granularity ?:
|
||
+ queue_physical_block_size(backingq);
|
||
|
||
/*
|
||
* We use punch hole to reclaim the free space used by the
|
||
@@ -902,23 +902,26 @@ static void loop_config_discard(struct loop_device *lo)
|
||
* useful information.
|
||
*/
|
||
} else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
|
||
- q->limits.discard_granularity = 0;
|
||
- q->limits.discard_alignment = 0;
|
||
- blk_queue_max_discard_sectors(q, 0);
|
||
- blk_queue_max_write_zeroes_sectors(q, 0);
|
||
+ max_discard_sectors = 0;
|
||
+ granularity = 0;
|
||
|
||
} else {
|
||
- q->limits.discard_granularity = inode->i_sb->s_blocksize;
|
||
- q->limits.discard_alignment = 0;
|
||
-
|
||
- blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
|
||
- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
|
||
+ max_discard_sectors = UINT_MAX >> 9;
|
||
+ granularity = inode->i_sb->s_blocksize;
|
||
}
|
||
|
||
- if (q->limits.max_write_zeroes_sectors)
|
||
+ if (max_discard_sectors) {
|
||
+ q->limits.discard_granularity = granularity;
|
||
+ blk_queue_max_discard_sectors(q, max_discard_sectors);
|
||
+ blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
|
||
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
|
||
- else
|
||
+ } else {
|
||
+ q->limits.discard_granularity = 0;
|
||
+ blk_queue_max_discard_sectors(q, 0);
|
||
+ blk_queue_max_write_zeroes_sectors(q, 0);
|
||
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
|
||
+ }
|
||
+ q->limits.discard_alignment = 0;
|
||
}
|
||
|
||
static void loop_unprepare_queue(struct loop_device *lo)
|
||
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
|
||
index 87b31f9ca362e..8cf13ea11cd2c 100644
|
||
--- a/drivers/block/null_blk_main.c
|
||
+++ b/drivers/block/null_blk_main.c
|
||
@@ -1139,7 +1139,7 @@ static int null_handle_rq(struct nullb_cmd *cmd)
|
||
len = bvec.bv_len;
|
||
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
|
||
op_is_write(req_op(rq)), sector,
|
||
- req_op(rq) & REQ_FUA);
|
||
+ rq->cmd_flags & REQ_FUA);
|
||
if (err) {
|
||
spin_unlock_irq(&nullb->lock);
|
||
return err;
|
||
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
|
||
index 980df853ee497..99991b6a6f0ed 100644
|
||
--- a/drivers/block/virtio_blk.c
|
||
+++ b/drivers/block/virtio_blk.c
|
||
@@ -126,16 +126,31 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
|
||
if (!range)
|
||
return -ENOMEM;
|
||
|
||
- __rq_for_each_bio(bio, req) {
|
||
- u64 sector = bio->bi_iter.bi_sector;
|
||
- u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
|
||
-
|
||
- range[n].flags = cpu_to_le32(flags);
|
||
- range[n].num_sectors = cpu_to_le32(num_sectors);
|
||
- range[n].sector = cpu_to_le64(sector);
|
||
- n++;
|
||
+ /*
|
||
+ * Single max discard segment means multi-range discard isn't
|
||
+ * supported, and block layer only runs contiguity merge like
|
||
+ * normal RW request. So we can't reply on bio for retrieving
|
||
+ * each range info.
|
||
+ */
|
||
+ if (queue_max_discard_segments(req->q) == 1) {
|
||
+ range[0].flags = cpu_to_le32(flags);
|
||
+ range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
|
||
+ range[0].sector = cpu_to_le64(blk_rq_pos(req));
|
||
+ n = 1;
|
||
+ } else {
|
||
+ __rq_for_each_bio(bio, req) {
|
||
+ u64 sector = bio->bi_iter.bi_sector;
|
||
+ u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
|
||
+
|
||
+ range[n].flags = cpu_to_le32(flags);
|
||
+ range[n].num_sectors = cpu_to_le32(num_sectors);
|
||
+ range[n].sector = cpu_to_le64(sector);
|
||
+ n++;
|
||
+ }
|
||
}
|
||
|
||
+ WARN_ON_ONCE(n != segments);
|
||
+
|
||
req->special_vec.bv_page = virt_to_page(range);
|
||
req->special_vec.bv_offset = offset_in_page(range);
|
||
req->special_vec.bv_len = sizeof(*range) * segments;
|
||
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
|
||
index c7540ad28995b..8c730a47e0537 100644
|
||
--- a/drivers/cpufreq/intel_pstate.c
|
||
+++ b/drivers/cpufreq/intel_pstate.c
|
||
@@ -649,11 +649,12 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
|
||
mutex_lock(&intel_pstate_limits_lock);
|
||
|
||
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
|
||
- u64 value;
|
||
-
|
||
- ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
|
||
- if (ret)
|
||
- goto return_pref;
|
||
+ /*
|
||
+ * Use the cached HWP Request MSR value, because the register
|
||
+ * itself may be updated by intel_pstate_hwp_boost_up() or
|
||
+ * intel_pstate_hwp_boost_down() at any time.
|
||
+ */
|
||
+ u64 value = READ_ONCE(cpu_data->hwp_req_cached);
|
||
|
||
value &= ~GENMASK_ULL(31, 24);
|
||
|
||
@@ -661,13 +662,18 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
|
||
epp = epp_values[pref_index - 1];
|
||
|
||
value |= (u64)epp << 24;
|
||
+ /*
|
||
+ * The only other updater of hwp_req_cached in the active mode,
|
||
+ * intel_pstate_hwp_set(), is called under the same lock as this
|
||
+ * function, so it cannot run in parallel with the update below.
|
||
+ */
|
||
+ WRITE_ONCE(cpu_data->hwp_req_cached, value);
|
||
ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
|
||
} else {
|
||
if (epp == -EINVAL)
|
||
epp = (pref_index - 1) << 2;
|
||
ret = intel_pstate_set_epb(cpu_data->cpu, epp);
|
||
}
|
||
-return_pref:
|
||
mutex_unlock(&intel_pstate_limits_lock);
|
||
|
||
return ret;
|
||
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
|
||
index 46c84dce6544a..5f8d94e812c8f 100644
|
||
--- a/drivers/devfreq/devfreq.c
|
||
+++ b/drivers/devfreq/devfreq.c
|
||
@@ -1690,9 +1690,9 @@ static int devfreq_summary_show(struct seq_file *s, void *data)
|
||
#endif
|
||
|
||
mutex_lock(&devfreq->lock);
|
||
- cur_freq = devfreq->previous_freq,
|
||
+ cur_freq = devfreq->previous_freq;
|
||
get_freq_range(devfreq, &min_freq, &max_freq);
|
||
- polling_ms = devfreq->profile->polling_ms,
|
||
+ polling_ms = devfreq->profile->polling_ms;
|
||
mutex_unlock(&devfreq->lock);
|
||
|
||
seq_printf(s,
|
||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
|
||
index de41d7928bff2..984354ca877de 100644
|
||
--- a/drivers/dma/Kconfig
|
||
+++ b/drivers/dma/Kconfig
|
||
@@ -285,6 +285,7 @@ config INTEL_IDMA64
|
||
config INTEL_IDXD
|
||
tristate "Intel Data Accelerators support"
|
||
depends on PCI && X86_64
|
||
+ depends on PCI_MSI
|
||
select DMA_ENGINE
|
||
select SBITMAP
|
||
help
|
||
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
|
||
index 5813e931f2f00..01ff71f7b6456 100644
|
||
--- a/drivers/edac/edac_mc.c
|
||
+++ b/drivers/edac/edac_mc.c
|
||
@@ -950,6 +950,8 @@ static void edac_ue_error(struct edac_raw_error_desc *e)
|
||
e->other_detail);
|
||
}
|
||
|
||
+ edac_inc_ue_error(e);
|
||
+
|
||
if (edac_mc_get_panic_on_ue()) {
|
||
panic("UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n",
|
||
e->msg,
|
||
@@ -959,8 +961,6 @@ static void edac_ue_error(struct edac_raw_error_desc *e)
|
||
*e->other_detail ? " - " : "",
|
||
e->other_detail);
|
||
}
|
||
-
|
||
- edac_inc_ue_error(e);
|
||
}
|
||
|
||
static void edac_inc_csrow(struct edac_raw_error_desc *e, int row, int chan)
|
||
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
|
||
index d68346a8e141a..ebe50996cc423 100644
|
||
--- a/drivers/edac/ie31200_edac.c
|
||
+++ b/drivers/edac/ie31200_edac.c
|
||
@@ -170,6 +170,8 @@
|
||
(n << (28 + (2 * skl) - PAGE_SHIFT))
|
||
|
||
static int nr_channels;
|
||
+static struct pci_dev *mci_pdev;
|
||
+static int ie31200_registered = 1;
|
||
|
||
struct ie31200_priv {
|
||
void __iomem *window;
|
||
@@ -538,12 +540,16 @@ fail_free:
|
||
static int ie31200_init_one(struct pci_dev *pdev,
|
||
const struct pci_device_id *ent)
|
||
{
|
||
- edac_dbg(0, "MC:\n");
|
||
+ int rc;
|
||
|
||
+ edac_dbg(0, "MC:\n");
|
||
if (pci_enable_device(pdev) < 0)
|
||
return -EIO;
|
||
+ rc = ie31200_probe1(pdev, ent->driver_data);
|
||
+ if (rc == 0 && !mci_pdev)
|
||
+ mci_pdev = pci_dev_get(pdev);
|
||
|
||
- return ie31200_probe1(pdev, ent->driver_data);
|
||
+ return rc;
|
||
}
|
||
|
||
static void ie31200_remove_one(struct pci_dev *pdev)
|
||
@@ -552,6 +558,8 @@ static void ie31200_remove_one(struct pci_dev *pdev)
|
||
struct ie31200_priv *priv;
|
||
|
||
edac_dbg(0, "\n");
|
||
+ pci_dev_put(mci_pdev);
|
||
+ mci_pdev = NULL;
|
||
mci = edac_mc_del_mc(&pdev->dev);
|
||
if (!mci)
|
||
return;
|
||
@@ -593,17 +601,53 @@ static struct pci_driver ie31200_driver = {
|
||
|
||
static int __init ie31200_init(void)
|
||
{
|
||
+ int pci_rc, i;
|
||
+
|
||
edac_dbg(3, "MC:\n");
|
||
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
|
||
opstate_init();
|
||
|
||
- return pci_register_driver(&ie31200_driver);
|
||
+ pci_rc = pci_register_driver(&ie31200_driver);
|
||
+ if (pci_rc < 0)
|
||
+ goto fail0;
|
||
+
|
||
+ if (!mci_pdev) {
|
||
+ ie31200_registered = 0;
|
||
+ for (i = 0; ie31200_pci_tbl[i].vendor != 0; i++) {
|
||
+ mci_pdev = pci_get_device(ie31200_pci_tbl[i].vendor,
|
||
+ ie31200_pci_tbl[i].device,
|
||
+ NULL);
|
||
+ if (mci_pdev)
|
||
+ break;
|
||
+ }
|
||
+ if (!mci_pdev) {
|
||
+ edac_dbg(0, "ie31200 pci_get_device fail\n");
|
||
+ pci_rc = -ENODEV;
|
||
+ goto fail1;
|
||
+ }
|
||
+ pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
|
||
+ if (pci_rc < 0) {
|
||
+ edac_dbg(0, "ie31200 init fail\n");
|
||
+ pci_rc = -ENODEV;
|
||
+ goto fail1;
|
||
+ }
|
||
+ }
|
||
+ return 0;
|
||
+
|
||
+fail1:
|
||
+ pci_unregister_driver(&ie31200_driver);
|
||
+fail0:
|
||
+ pci_dev_put(mci_pdev);
|
||
+
|
||
+ return pci_rc;
|
||
}
|
||
|
||
static void __exit ie31200_exit(void)
|
||
{
|
||
edac_dbg(3, "MC:\n");
|
||
pci_unregister_driver(&ie31200_driver);
|
||
+ if (!ie31200_registered)
|
||
+ ie31200_remove_one(mci_pdev);
|
||
}
|
||
|
||
module_init(ie31200_init);
|
||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
|
||
index c7fd0c47b2545..1102de76d8767 100644
|
||
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
|
||
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
|
||
@@ -195,19 +195,32 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
|
||
unsigned int engine_id,
|
||
unsigned int queue_id)
|
||
{
|
||
- uint32_t sdma_engine_reg_base[2] = {
|
||
- SOC15_REG_OFFSET(SDMA0, 0,
|
||
- mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
|
||
- SOC15_REG_OFFSET(SDMA1, 0,
|
||
- mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
|
||
- };
|
||
- uint32_t retval = sdma_engine_reg_base[engine_id]
|
||
+ uint32_t sdma_engine_reg_base = 0;
|
||
+ uint32_t sdma_rlc_reg_offset;
|
||
+
|
||
+ switch (engine_id) {
|
||
+ default:
|
||
+ dev_warn(adev->dev,
|
||
+ "Invalid sdma engine id (%d), using engine id 0\n",
|
||
+ engine_id);
|
||
+ fallthrough;
|
||
+ case 0:
|
||
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
|
||
+ mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
|
||
+ break;
|
||
+ case 1:
|
||
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
|
||
+ mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ sdma_rlc_reg_offset = sdma_engine_reg_base
|
||
+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
|
||
|
||
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
|
||
- queue_id, retval);
|
||
+ queue_id, sdma_rlc_reg_offset);
|
||
|
||
- return retval;
|
||
+ return sdma_rlc_reg_offset;
|
||
}
|
||
|
||
static inline struct v9_mqd *get_mqd(void *mqd)
|
||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
|
||
index f355d9a752d29..a1aec205435de 100644
|
||
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
|
||
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
|
||
@@ -716,8 +716,10 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
|
||
|
||
if (!drm_kms_helper_is_poll_worker()) {
|
||
r = pm_runtime_get_sync(connector->dev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(connector->dev->dev);
|
||
return connector_status_disconnected;
|
||
+ }
|
||
}
|
||
|
||
if (encoder) {
|
||
@@ -854,8 +856,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
|
||
|
||
if (!drm_kms_helper_is_poll_worker()) {
|
||
r = pm_runtime_get_sync(connector->dev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(connector->dev->dev);
|
||
return connector_status_disconnected;
|
||
+ }
|
||
}
|
||
|
||
encoder = amdgpu_connector_best_single_encoder(connector);
|
||
@@ -977,8 +981,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
|
||
|
||
if (!drm_kms_helper_is_poll_worker()) {
|
||
r = pm_runtime_get_sync(connector->dev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(connector->dev->dev);
|
||
return connector_status_disconnected;
|
||
+ }
|
||
}
|
||
|
||
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
|
||
@@ -1328,8 +1334,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
|
||
|
||
if (!drm_kms_helper_is_poll_worker()) {
|
||
r = pm_runtime_get_sync(connector->dev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(connector->dev->dev);
|
||
return connector_status_disconnected;
|
||
+ }
|
||
}
|
||
|
||
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
|
||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
|
||
index f7143d927b6d8..5e51f0acf744f 100644
|
||
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
|
||
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
|
||
@@ -282,7 +282,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
|
||
|
||
ret = pm_runtime_get_sync(dev->dev);
|
||
if (ret < 0)
|
||
- return ret;
|
||
+ goto out;
|
||
|
||
ret = drm_crtc_helper_set_config(set, ctx);
|
||
|
||
@@ -297,7 +297,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
|
||
take the current one */
|
||
if (active && !adev->have_disp_power_ref) {
|
||
adev->have_disp_power_ref = true;
|
||
- return ret;
|
||
+ goto out;
|
||
}
|
||
/* if we have no active crtcs, then drop the power ref
|
||
we got before */
|
||
@@ -306,6 +306,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
|
||
adev->have_disp_power_ref = false;
|
||
}
|
||
|
||
+out:
|
||
/* drop the power reference we got coming in here */
|
||
pm_runtime_put_autosuspend(dev->dev);
|
||
return ret;
|
||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
|
||
index 126e74758a342..d73924e35a57e 100644
|
||
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
|
||
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
|
||
@@ -1373,11 +1373,12 @@ long amdgpu_drm_ioctl(struct file *filp,
|
||
dev = file_priv->minor->dev;
|
||
ret = pm_runtime_get_sync(dev->dev);
|
||
if (ret < 0)
|
||
- return ret;
|
||
+ goto out;
|
||
|
||
ret = drm_ioctl(filp, cmd, arg);
|
||
|
||
pm_runtime_mark_last_busy(dev->dev);
|
||
+out:
|
||
pm_runtime_put_autosuspend(dev->dev);
|
||
return ret;
|
||
}
|
||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
|
||
index 3414e119f0cbf..f5a6ee7c2eaa3 100644
|
||
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
|
||
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
|
||
@@ -754,8 +754,10 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
|
||
int r;
|
||
|
||
r = pm_runtime_get_sync(dev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(dev->dev);
|
||
return 0;
|
||
+ }
|
||
|
||
seq_printf(m, "gpu recover\n");
|
||
amdgpu_device_gpu_recover(adev, NULL);
|
||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
|
||
index 21292098bc023..0a3b7d9df8a56 100644
|
||
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
|
||
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
|
||
@@ -663,8 +663,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||
* in the bitfields */
|
||
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
|
||
se_num = 0xffffffff;
|
||
+ else if (se_num >= AMDGPU_GFX_MAX_SE)
|
||
+ return -EINVAL;
|
||
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
|
||
sh_num = 0xffffffff;
|
||
+ else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
|
||
+ return -EINVAL;
|
||
|
||
if (info->read_mmr_reg.count > 128)
|
||
return -EINVAL;
|
||
@@ -992,7 +996,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||
|
||
r = pm_runtime_get_sync(dev->dev);
|
||
if (r < 0)
|
||
- return r;
|
||
+ goto pm_put;
|
||
|
||
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
|
||
if (unlikely(!fpriv)) {
|
||
@@ -1043,6 +1047,7 @@ error_pasid:
|
||
|
||
out_suspend:
|
||
pm_runtime_mark_last_busy(dev->dev);
|
||
+pm_put:
|
||
pm_runtime_put_autosuspend(dev->dev);
|
||
|
||
return r;
|
||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
|
||
index 02e6f8c4dde08..459b81fc5aef4 100644
|
||
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
|
||
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
|
||
@@ -167,8 +167,10 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
if (adev->smu.ppt_funcs->get_current_power_state)
|
||
@@ -212,8 +214,10 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
|
||
return -EINVAL;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
mutex_lock(&adev->pm.mutex);
|
||
@@ -307,8 +311,10 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
level = smu_get_performance_level(&adev->smu);
|
||
@@ -369,8 +375,10 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
|
||
}
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
current_level = smu_get_performance_level(&adev->smu);
|
||
@@ -449,8 +457,10 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
ret = smu_get_power_num_states(&adev->smu, &data);
|
||
@@ -491,8 +501,10 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
pm = smu_get_current_power_state(smu);
|
||
@@ -567,8 +579,10 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
|
||
state = data.states[idx];
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
/* only set user selected power states */
|
||
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
|
||
@@ -608,8 +622,10 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
|
||
@@ -650,8 +666,10 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
|
||
@@ -790,8 +808,10 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||
}
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
ret = smu_od_edit_dpm_table(&adev->smu, type,
|
||
@@ -847,8 +867,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
|
||
@@ -905,8 +927,10 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
|
||
pr_debug("featuremask = 0x%llx\n", featuremask);
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
|
||
@@ -942,8 +966,10 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
|
||
@@ -1001,8 +1027,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
|
||
@@ -1071,8 +1099,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
|
||
return ret;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
|
||
@@ -1101,8 +1131,10 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
|
||
@@ -1135,8 +1167,10 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
|
||
return ret;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
|
||
@@ -1165,8 +1199,10 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
|
||
@@ -1199,8 +1235,10 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
|
||
return ret;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
|
||
@@ -1231,8 +1269,10 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
|
||
@@ -1265,8 +1305,10 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
|
||
return ret;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
|
||
@@ -1297,8 +1339,10 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
|
||
@@ -1331,8 +1375,10 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
|
||
return ret;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
|
||
@@ -1363,8 +1409,10 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
|
||
@@ -1397,8 +1445,10 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
|
||
return ret;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
|
||
@@ -1429,8 +1479,10 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
|
||
@@ -1462,8 +1514,10 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
|
||
return -EINVAL;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
|
||
@@ -1498,8 +1552,10 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
|
||
@@ -1531,8 +1587,10 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
|
||
return -EINVAL;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
|
||
@@ -1587,8 +1645,10 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
size = smu_get_power_profile_mode(&adev->smu, buf);
|
||
@@ -1650,8 +1710,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
|
||
parameter[parameter_size] = profile_mode;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
|
||
@@ -1687,8 +1749,10 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
|
||
return -EPERM;
|
||
|
||
r = pm_runtime_get_sync(ddev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
/* read the IP busy sensor */
|
||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
|
||
@@ -1723,8 +1787,10 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
|
||
return -EPERM;
|
||
|
||
r = pm_runtime_get_sync(ddev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
/* read the IP busy sensor */
|
||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
|
||
@@ -1770,8 +1836,10 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
|
||
return -ENODATA;
|
||
|
||
ret = pm_runtime_get_sync(ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
|
||
|
||
@@ -2003,8 +2071,10 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
|
||
return -EINVAL;
|
||
|
||
r = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
switch (channel) {
|
||
case PP_TEMP_JUNCTION:
|
||
@@ -2134,8 +2204,10 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
pwm_mode = smu_get_fan_control_mode(&adev->smu);
|
||
@@ -2172,8 +2244,10 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
|
||
return err;
|
||
|
||
ret = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
smu_set_fan_control_mode(&adev->smu, value);
|
||
@@ -2220,8 +2294,10 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
|
||
return -EPERM;
|
||
|
||
err = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (err < 0)
|
||
+ if (err < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return err;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
pwm_mode = smu_get_fan_control_mode(&adev->smu);
|
||
@@ -2272,8 +2348,10 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
|
||
return -EPERM;
|
||
|
||
err = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (err < 0)
|
||
+ if (err < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return err;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
err = smu_get_fan_speed_percent(&adev->smu, &speed);
|
||
@@ -2305,8 +2383,10 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
|
||
return -EPERM;
|
||
|
||
err = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (err < 0)
|
||
+ if (err < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return err;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
err = smu_get_fan_speed_rpm(&adev->smu, &speed);
|
||
@@ -2337,8 +2417,10 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
|
||
return -EPERM;
|
||
|
||
r = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
|
||
(void *)&min_rpm, &size);
|
||
@@ -2365,8 +2447,10 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
|
||
return -EPERM;
|
||
|
||
r = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
|
||
(void *)&max_rpm, &size);
|
||
@@ -2392,8 +2476,10 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
|
||
return -EPERM;
|
||
|
||
err = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (err < 0)
|
||
+ if (err < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return err;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
|
||
@@ -2424,8 +2510,10 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
|
||
return -EPERM;
|
||
|
||
err = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (err < 0)
|
||
+ if (err < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return err;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
pwm_mode = smu_get_fan_control_mode(&adev->smu);
|
||
@@ -2473,8 +2561,10 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
|
||
return -EPERM;
|
||
|
||
ret = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
pwm_mode = smu_get_fan_control_mode(&adev->smu);
|
||
@@ -2519,8 +2609,10 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
|
||
return -EINVAL;
|
||
|
||
err = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (err < 0)
|
||
+ if (err < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return err;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
smu_set_fan_control_mode(&adev->smu, pwm_mode);
|
||
@@ -2551,8 +2643,10 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
|
||
return -EPERM;
|
||
|
||
r = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
/* get the voltage */
|
||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
|
||
@@ -2590,8 +2684,10 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
|
||
return -EINVAL;
|
||
|
||
r = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
/* get the voltage */
|
||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
|
||
@@ -2626,8 +2722,10 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
|
||
return -EPERM;
|
||
|
||
r = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
/* get the voltage */
|
||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
|
||
@@ -2665,8 +2763,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
|
||
return -EPERM;
|
||
|
||
r = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
smu_get_power_limit(&adev->smu, &limit, true, true);
|
||
@@ -2697,8 +2797,10 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
|
||
return -EPERM;
|
||
|
||
r = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev)) {
|
||
smu_get_power_limit(&adev->smu, &limit, false, true);
|
||
@@ -2740,8 +2842,10 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
|
||
|
||
|
||
err = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (err < 0)
|
||
+ if (err < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return err;
|
||
+ }
|
||
|
||
if (is_support_sw_smu(adev))
|
||
err = smu_set_power_limit(&adev->smu, value);
|
||
@@ -2771,8 +2875,10 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
|
||
return -EPERM;
|
||
|
||
r = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
/* get the sclk */
|
||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
|
||
@@ -2806,8 +2912,10 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
|
||
return -EPERM;
|
||
|
||
r = pm_runtime_get_sync(adev->ddev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(adev->ddev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
/* get the sclk */
|
||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
|
||
@@ -3669,8 +3777,10 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
|
||
return -EPERM;
|
||
|
||
r = pm_runtime_get_sync(dev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(dev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
amdgpu_device_ip_get_clockgating_state(adev, &flags);
|
||
seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
|
||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
|
||
index 50fe08bf2f727..3f47f35eedff1 100644
|
||
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
|
||
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
|
||
@@ -1240,7 +1240,6 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
|
||
if (!obj || !obj->ent)
|
||
return;
|
||
|
||
- debugfs_remove(obj->ent);
|
||
obj->ent = NULL;
|
||
put_obj(obj);
|
||
}
|
||
@@ -1254,7 +1253,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
|
||
amdgpu_ras_debugfs_remove(adev, &obj->head);
|
||
}
|
||
|
||
- debugfs_remove_recursive(con->dir);
|
||
con->dir = NULL;
|
||
}
|
||
/* debugfs end */
|
||
@@ -1914,9 +1912,8 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
|
||
amdgpu_ras_check_supported(adev, &con->hw_supported,
|
||
&con->supported);
|
||
if (!con->hw_supported) {
|
||
- amdgpu_ras_set_context(adev, NULL);
|
||
- kfree(con);
|
||
- return 0;
|
||
+ r = 0;
|
||
+ goto err_out;
|
||
}
|
||
|
||
con->features = 0;
|
||
@@ -1927,29 +1924,31 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
|
||
if (adev->nbio.funcs->init_ras_controller_interrupt) {
|
||
r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
|
||
if (r)
|
||
- return r;
|
||
+ goto err_out;
|
||
}
|
||
|
||
if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
|
||
r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
|
||
if (r)
|
||
- return r;
|
||
+ goto err_out;
|
||
}
|
||
|
||
amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
|
||
|
||
- if (amdgpu_ras_fs_init(adev))
|
||
- goto fs_out;
|
||
+ if (amdgpu_ras_fs_init(adev)) {
|
||
+ r = -EINVAL;
|
||
+ goto err_out;
|
||
+ }
|
||
|
||
dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
|
||
"hardware ability[%x] ras_mask[%x]\n",
|
||
con->hw_supported, con->supported);
|
||
return 0;
|
||
-fs_out:
|
||
+err_out:
|
||
amdgpu_ras_set_context(adev, NULL);
|
||
kfree(con);
|
||
|
||
- return -EINVAL;
|
||
+ return r;
|
||
}
|
||
|
||
/* helper function to handle common stuff in ip late init phase */
|
||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
|
||
index fac77a86c04b2..2c7e6efeea2ff 100644
|
||
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
|
||
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
|
||
@@ -6854,10 +6854,8 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
|
||
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
|
||
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
|
||
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
|
||
- RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
|
||
-
|
||
- /* only for Vega10 & Raven1 */
|
||
- data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
|
||
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK |
|
||
+ RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK);
|
||
|
||
if (def != data)
|
||
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
|
||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
|
||
index 0e0c42e9f6a31..6520a920cad4a 100644
|
||
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
|
||
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
|
||
@@ -1003,8 +1003,10 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
|
||
*/
|
||
if (!pdd->runtime_inuse) {
|
||
err = pm_runtime_get_sync(dev->ddev->dev);
|
||
- if (err < 0)
|
||
+ if (err < 0) {
|
||
+ pm_runtime_put_autosuspend(dev->ddev->dev);
|
||
return ERR_PTR(err);
|
||
+ }
|
||
}
|
||
|
||
err = kfd_iommu_bind_process_to_device(pdd);
|
||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
|
||
index bb77f7af2b6d9..dc3c4149f8600 100644
|
||
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
|
||
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
|
||
@@ -632,8 +632,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
|
||
|
||
ret = kobject_init_and_add(dev->kobj_node, &node_type,
|
||
sys_props.kobj_nodes, "%d", id);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ kobject_put(dev->kobj_node);
|
||
return ret;
|
||
+ }
|
||
|
||
dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node);
|
||
if (!dev->kobj_mem)
|
||
@@ -680,8 +682,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
|
||
return -ENOMEM;
|
||
ret = kobject_init_and_add(mem->kobj, &mem_type,
|
||
dev->kobj_mem, "%d", i);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ kobject_put(mem->kobj);
|
||
return ret;
|
||
+ }
|
||
|
||
mem->attr.name = "properties";
|
||
mem->attr.mode = KFD_SYSFS_FILE_MODE;
|
||
@@ -699,8 +703,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
|
||
return -ENOMEM;
|
||
ret = kobject_init_and_add(cache->kobj, &cache_type,
|
||
dev->kobj_cache, "%d", i);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ kobject_put(cache->kobj);
|
||
return ret;
|
||
+ }
|
||
|
||
cache->attr.name = "properties";
|
||
cache->attr.mode = KFD_SYSFS_FILE_MODE;
|
||
@@ -718,8 +724,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
|
||
return -ENOMEM;
|
||
ret = kobject_init_and_add(iolink->kobj, &iolink_type,
|
||
dev->kobj_iolink, "%d", i);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ kobject_put(iolink->kobj);
|
||
return ret;
|
||
+ }
|
||
|
||
iolink->attr.name = "properties";
|
||
iolink->attr.mode = KFD_SYSFS_FILE_MODE;
|
||
@@ -798,8 +806,10 @@ static int kfd_topology_update_sysfs(void)
|
||
ret = kobject_init_and_add(sys_props.kobj_topology,
|
||
&sysprops_type, &kfd_device->kobj,
|
||
"topology");
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ kobject_put(sys_props.kobj_topology);
|
||
return ret;
|
||
+ }
|
||
|
||
sys_props.kobj_nodes = kobject_create_and_add("nodes",
|
||
sys_props.kobj_topology);
|
||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
||
index 0a39a8558b294..666ebe04837af 100644
|
||
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
||
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
||
@@ -2882,51 +2882,50 @@ static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
|
||
return rc ? 0 : 1;
|
||
}
|
||
|
||
-static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
|
||
- const uint32_t user_brightness)
|
||
+static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
|
||
+ unsigned *min, unsigned *max)
|
||
{
|
||
- u32 min, max, conversion_pace;
|
||
- u32 brightness = user_brightness;
|
||
-
|
||
if (!caps)
|
||
- goto out;
|
||
+ return 0;
|
||
|
||
- if (!caps->aux_support) {
|
||
- max = caps->max_input_signal;
|
||
- min = caps->min_input_signal;
|
||
- /*
|
||
- * The brightness input is in the range 0-255
|
||
- * It needs to be rescaled to be between the
|
||
- * requested min and max input signal
|
||
- * It also needs to be scaled up by 0x101 to
|
||
- * match the DC interface which has a range of
|
||
- * 0 to 0xffff
|
||
- */
|
||
- conversion_pace = 0x101;
|
||
- brightness =
|
||
- user_brightness
|
||
- * conversion_pace
|
||
- * (max - min)
|
||
- / AMDGPU_MAX_BL_LEVEL
|
||
- + min * conversion_pace;
|
||
+ if (caps->aux_support) {
|
||
+ // Firmware limits are in nits, DC API wants millinits.
|
||
+ *max = 1000 * caps->aux_max_input_signal;
|
||
+ *min = 1000 * caps->aux_min_input_signal;
|
||
} else {
|
||
- /* TODO
|
||
- * We are doing a linear interpolation here, which is OK but
|
||
- * does not provide the optimal result. We probably want
|
||
- * something close to the Perceptual Quantizer (PQ) curve.
|
||
- */
|
||
- max = caps->aux_max_input_signal;
|
||
- min = caps->aux_min_input_signal;
|
||
-
|
||
- brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
|
||
- + user_brightness * max;
|
||
- // Multiple the value by 1000 since we use millinits
|
||
- brightness *= 1000;
|
||
- brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
|
||
+ // Firmware limits are 8-bit, PWM control is 16-bit.
|
||
+ *max = 0x101 * caps->max_input_signal;
|
||
+ *min = 0x101 * caps->min_input_signal;
|
||
}
|
||
+ return 1;
|
||
+}
|
||
+
|
||
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
|
||
+ uint32_t brightness)
|
||
+{
|
||
+ unsigned min, max;
|
||
|
||
-out:
|
||
- return brightness;
|
||
+ if (!get_brightness_range(caps, &min, &max))
|
||
+ return brightness;
|
||
+
|
||
+ // Rescale 0..255 to min..max
|
||
+ return min + DIV_ROUND_CLOSEST((max - min) * brightness,
|
||
+ AMDGPU_MAX_BL_LEVEL);
|
||
+}
|
||
+
|
||
+static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
|
||
+ uint32_t brightness)
|
||
+{
|
||
+ unsigned min, max;
|
||
+
|
||
+ if (!get_brightness_range(caps, &min, &max))
|
||
+ return brightness;
|
||
+
|
||
+ if (brightness < min)
|
||
+ return 0;
|
||
+ // Rescale min..max to 0..255
|
||
+ return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
|
||
+ max - min);
|
||
}
|
||
|
||
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
|
||
@@ -2942,7 +2941,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
|
||
|
||
link = (struct dc_link *)dm->backlight_link;
|
||
|
||
- brightness = convert_brightness(&caps, bd->props.brightness);
|
||
+ brightness = convert_brightness_from_user(&caps, bd->props.brightness);
|
||
// Change brightness based on AUX property
|
||
if (caps.aux_support)
|
||
return set_backlight_via_aux(link, brightness);
|
||
@@ -2959,7 +2958,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
|
||
|
||
if (ret == DC_ERROR_UNEXPECTED)
|
||
return bd->props.brightness;
|
||
- return ret;
|
||
+ return convert_brightness_to_user(&dm->backlight_caps, ret);
|
||
}
|
||
|
||
static const struct backlight_ops amdgpu_dm_backlight_ops = {
|
||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
|
||
index 4dfb6b55bb2ed..b321ff654df42 100644
|
||
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
|
||
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
|
||
@@ -195,10 +195,13 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
|
||
bool has_rom)
|
||
{
|
||
struct dc_gamma *gamma = NULL;
|
||
+ struct calculate_buffer cal_buffer = {0};
|
||
bool res;
|
||
|
||
ASSERT(lut && lut_size == MAX_COLOR_LEGACY_LUT_ENTRIES);
|
||
|
||
+ cal_buffer.buffer_index = -1;
|
||
+
|
||
gamma = dc_create_gamma();
|
||
if (!gamma)
|
||
return -ENOMEM;
|
||
@@ -208,7 +211,7 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
|
||
__drm_lut_to_dc_gamma(lut, gamma, true);
|
||
|
||
res = mod_color_calculate_regamma_params(func, gamma, true, has_rom,
|
||
- NULL);
|
||
+ NULL, &cal_buffer);
|
||
|
||
dc_gamma_release(&gamma);
|
||
|
||
@@ -221,10 +224,13 @@ static int __set_output_tf(struct dc_transfer_func *func,
|
||
bool has_rom)
|
||
{
|
||
struct dc_gamma *gamma = NULL;
|
||
+ struct calculate_buffer cal_buffer = {0};
|
||
bool res;
|
||
|
||
ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES);
|
||
|
||
+ cal_buffer.buffer_index = -1;
|
||
+
|
||
gamma = dc_create_gamma();
|
||
if (!gamma)
|
||
return -ENOMEM;
|
||
@@ -248,7 +254,7 @@ static int __set_output_tf(struct dc_transfer_func *func,
|
||
*/
|
||
gamma->type = GAMMA_CS_TFM_1D;
|
||
res = mod_color_calculate_regamma_params(func, gamma, false,
|
||
- has_rom, NULL);
|
||
+ has_rom, NULL, &cal_buffer);
|
||
}
|
||
|
||
dc_gamma_release(&gamma);
|
||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
|
||
index 07b2f9399671d..842abb4c475bc 100644
|
||
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
|
||
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
|
||
@@ -121,35 +121,35 @@ void enc1_update_generic_info_packet(
|
||
switch (packet_index) {
|
||
case 0:
|
||
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
|
||
- AFMT_GENERIC0_FRAME_UPDATE, 1);
|
||
+ AFMT_GENERIC0_IMMEDIATE_UPDATE, 1);
|
||
break;
|
||
case 1:
|
||
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
|
||
- AFMT_GENERIC1_FRAME_UPDATE, 1);
|
||
+ AFMT_GENERIC1_IMMEDIATE_UPDATE, 1);
|
||
break;
|
||
case 2:
|
||
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
|
||
- AFMT_GENERIC2_FRAME_UPDATE, 1);
|
||
+ AFMT_GENERIC2_IMMEDIATE_UPDATE, 1);
|
||
break;
|
||
case 3:
|
||
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
|
||
- AFMT_GENERIC3_FRAME_UPDATE, 1);
|
||
+ AFMT_GENERIC3_IMMEDIATE_UPDATE, 1);
|
||
break;
|
||
case 4:
|
||
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
|
||
- AFMT_GENERIC4_FRAME_UPDATE, 1);
|
||
+ AFMT_GENERIC4_IMMEDIATE_UPDATE, 1);
|
||
break;
|
||
case 5:
|
||
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
|
||
- AFMT_GENERIC5_FRAME_UPDATE, 1);
|
||
+ AFMT_GENERIC5_IMMEDIATE_UPDATE, 1);
|
||
break;
|
||
case 6:
|
||
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
|
||
- AFMT_GENERIC6_FRAME_UPDATE, 1);
|
||
+ AFMT_GENERIC6_IMMEDIATE_UPDATE, 1);
|
||
break;
|
||
case 7:
|
||
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
|
||
- AFMT_GENERIC7_FRAME_UPDATE, 1);
|
||
+ AFMT_GENERIC7_IMMEDIATE_UPDATE, 1);
|
||
break;
|
||
default:
|
||
break;
|
||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
|
||
index f9b9e221c698b..7507000a99ac4 100644
|
||
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
|
||
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
|
||
@@ -273,7 +273,14 @@ struct dcn10_stream_enc_registers {
|
||
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
|
||
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
|
||
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
|
||
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\
|
||
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\
|
||
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\
|
||
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\
|
||
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\
|
||
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\
|
||
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\
|
||
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\
|
||
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
|
||
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
|
||
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
|
||
@@ -337,7 +344,14 @@ struct dcn10_stream_enc_registers {
|
||
type AFMT_GENERIC2_FRAME_UPDATE;\
|
||
type AFMT_GENERIC3_FRAME_UPDATE;\
|
||
type AFMT_GENERIC4_FRAME_UPDATE;\
|
||
+ type AFMT_GENERIC0_IMMEDIATE_UPDATE;\
|
||
+ type AFMT_GENERIC1_IMMEDIATE_UPDATE;\
|
||
+ type AFMT_GENERIC2_IMMEDIATE_UPDATE;\
|
||
+ type AFMT_GENERIC3_IMMEDIATE_UPDATE;\
|
||
type AFMT_GENERIC4_IMMEDIATE_UPDATE;\
|
||
+ type AFMT_GENERIC5_IMMEDIATE_UPDATE;\
|
||
+ type AFMT_GENERIC6_IMMEDIATE_UPDATE;\
|
||
+ type AFMT_GENERIC7_IMMEDIATE_UPDATE;\
|
||
type AFMT_GENERIC5_FRAME_UPDATE;\
|
||
type AFMT_GENERIC6_FRAME_UPDATE;\
|
||
type AFMT_GENERIC7_FRAME_UPDATE;\
|
||
diff --git a/drivers/gpu/drm/amd/display/modules/color/Makefile b/drivers/gpu/drm/amd/display/modules/color/Makefile
|
||
index 65c33a76951a4..e66c19a840c29 100644
|
||
--- a/drivers/gpu/drm/amd/display/modules/color/Makefile
|
||
+++ b/drivers/gpu/drm/amd/display/modules/color/Makefile
|
||
@@ -23,7 +23,7 @@
|
||
# Makefile for the color sub-module of DAL.
|
||
#
|
||
|
||
-MOD_COLOR = color_gamma.o
|
||
+MOD_COLOR = color_gamma.o color_table.o
|
||
|
||
AMD_DAL_MOD_COLOR = $(addprefix $(AMDDALPATH)/modules/color/,$(MOD_COLOR))
|
||
#$(info ************ DAL COLOR MODULE MAKEFILE ************)
|
||
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
|
||
index bcfe34ef8c28d..b8695660b480e 100644
|
||
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
|
||
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
|
||
@@ -30,20 +30,10 @@
|
||
#include "opp.h"
|
||
#include "color_gamma.h"
|
||
|
||
-#define NUM_PTS_IN_REGION 16
|
||
-#define NUM_REGIONS 32
|
||
-#define MAX_HW_POINTS (NUM_PTS_IN_REGION*NUM_REGIONS)
|
||
-
|
||
static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
|
||
|
||
-static struct fixed31_32 pq_table[MAX_HW_POINTS + 2];
|
||
-static struct fixed31_32 de_pq_table[MAX_HW_POINTS + 2];
|
||
-
|
||
// these are helpers for calculations to reduce stack usage
|
||
// do not depend on these being preserved across calls
|
||
-static struct fixed31_32 scratch_1;
|
||
-static struct fixed31_32 scratch_2;
|
||
-static struct translate_from_linear_space_args scratch_gamma_args;
|
||
|
||
/* Helper to optimize gamma calculation, only use in translate_from_linear, in
|
||
* particular the dc_fixpt_pow function which is very expensive
|
||
@@ -56,9 +46,6 @@ static struct translate_from_linear_space_args scratch_gamma_args;
|
||
* just multiply with 2^gamma which can be computed once, and save the result so we
|
||
* recursively compute all the values.
|
||
*/
|
||
-static struct fixed31_32 pow_buffer[NUM_PTS_IN_REGION];
|
||
-static struct fixed31_32 gamma_of_2; // 2^gamma
|
||
-int pow_buffer_ptr = -1;
|
||
/*sRGB 709 2.2 2.4 P3*/
|
||
static const int32_t gamma_numerator01[] = { 31308, 180000, 0, 0, 0};
|
||
static const int32_t gamma_numerator02[] = { 12920, 4500, 0, 0, 0};
|
||
@@ -66,9 +53,6 @@ static const int32_t gamma_numerator03[] = { 55, 99, 0, 0, 0};
|
||
static const int32_t gamma_numerator04[] = { 55, 99, 0, 0, 0};
|
||
static const int32_t gamma_numerator05[] = { 2400, 2200, 2200, 2400, 2600};
|
||
|
||
-static bool pq_initialized; /* = false; */
|
||
-static bool de_pq_initialized; /* = false; */
|
||
-
|
||
/* one-time setup of X points */
|
||
void setup_x_points_distribution(void)
|
||
{
|
||
@@ -250,6 +234,8 @@ void precompute_pq(void)
|
||
struct fixed31_32 scaling_factor =
|
||
dc_fixpt_from_fraction(80, 10000);
|
||
|
||
+ struct fixed31_32 *pq_table = mod_color_get_table(type_pq_table);
|
||
+
|
||
/* pow function has problems with arguments too small */
|
||
for (i = 0; i < 32; i++)
|
||
pq_table[i] = dc_fixpt_zero;
|
||
@@ -269,7 +255,7 @@ void precompute_de_pq(void)
|
||
uint32_t begin_index, end_index;
|
||
|
||
struct fixed31_32 scaling_factor = dc_fixpt_from_int(125);
|
||
-
|
||
+ struct fixed31_32 *de_pq_table = mod_color_get_table(type_de_pq_table);
|
||
/* X points is 2^-25 to 2^7
|
||
* De-gamma X is 2^-12 to 2^0 – we are skipping first -12-(-25) = 13 regions
|
||
*/
|
||
@@ -339,6 +325,9 @@ static struct fixed31_32 translate_from_linear_space(
|
||
{
|
||
const struct fixed31_32 one = dc_fixpt_from_int(1);
|
||
|
||
+ struct fixed31_32 scratch_1, scratch_2;
|
||
+ struct calculate_buffer *cal_buffer = args->cal_buffer;
|
||
+
|
||
if (dc_fixpt_le(one, args->arg))
|
||
return one;
|
||
|
||
@@ -352,21 +341,21 @@ static struct fixed31_32 translate_from_linear_space(
|
||
|
||
return scratch_1;
|
||
} else if (dc_fixpt_le(args->a0, args->arg)) {
|
||
- if (pow_buffer_ptr == 0) {
|
||
- gamma_of_2 = dc_fixpt_pow(dc_fixpt_from_int(2),
|
||
+ if (cal_buffer->buffer_index == 0) {
|
||
+ cal_buffer->gamma_of_2 = dc_fixpt_pow(dc_fixpt_from_int(2),
|
||
dc_fixpt_recip(args->gamma));
|
||
}
|
||
scratch_1 = dc_fixpt_add(one, args->a3);
|
||
- if (pow_buffer_ptr < 16)
|
||
+ if (cal_buffer->buffer_index < 16)
|
||
scratch_2 = dc_fixpt_pow(args->arg,
|
||
dc_fixpt_recip(args->gamma));
|
||
else
|
||
- scratch_2 = dc_fixpt_mul(gamma_of_2,
|
||
- pow_buffer[pow_buffer_ptr%16]);
|
||
+ scratch_2 = dc_fixpt_mul(cal_buffer->gamma_of_2,
|
||
+ cal_buffer->buffer[cal_buffer->buffer_index%16]);
|
||
|
||
- if (pow_buffer_ptr != -1) {
|
||
- pow_buffer[pow_buffer_ptr%16] = scratch_2;
|
||
- pow_buffer_ptr++;
|
||
+ if (cal_buffer->buffer_index != -1) {
|
||
+ cal_buffer->buffer[cal_buffer->buffer_index%16] = scratch_2;
|
||
+ cal_buffer->buffer_index++;
|
||
}
|
||
|
||
scratch_1 = dc_fixpt_mul(scratch_1, scratch_2);
|
||
@@ -413,15 +402,17 @@ static struct fixed31_32 translate_from_linear_space_long(
|
||
args->a1);
|
||
}
|
||
|
||
-static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf)
|
||
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf, struct calculate_buffer *cal_buffer)
|
||
{
|
||
struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
|
||
+ struct translate_from_linear_space_args scratch_gamma_args;
|
||
|
||
scratch_gamma_args.arg = arg;
|
||
scratch_gamma_args.a0 = dc_fixpt_zero;
|
||
scratch_gamma_args.a1 = dc_fixpt_zero;
|
||
scratch_gamma_args.a2 = dc_fixpt_zero;
|
||
scratch_gamma_args.a3 = dc_fixpt_zero;
|
||
+ scratch_gamma_args.cal_buffer = cal_buffer;
|
||
scratch_gamma_args.gamma = gamma;
|
||
|
||
if (use_eetf)
|
||
@@ -467,14 +458,18 @@ static struct fixed31_32 translate_to_linear_space(
|
||
static struct fixed31_32 translate_from_linear_space_ex(
|
||
struct fixed31_32 arg,
|
||
struct gamma_coefficients *coeff,
|
||
- uint32_t color_index)
|
||
+ uint32_t color_index,
|
||
+ struct calculate_buffer *cal_buffer)
|
||
{
|
||
+ struct translate_from_linear_space_args scratch_gamma_args;
|
||
+
|
||
scratch_gamma_args.arg = arg;
|
||
scratch_gamma_args.a0 = coeff->a0[color_index];
|
||
scratch_gamma_args.a1 = coeff->a1[color_index];
|
||
scratch_gamma_args.a2 = coeff->a2[color_index];
|
||
scratch_gamma_args.a3 = coeff->a3[color_index];
|
||
scratch_gamma_args.gamma = coeff->user_gamma[color_index];
|
||
+ scratch_gamma_args.cal_buffer = cal_buffer;
|
||
|
||
return translate_from_linear_space(&scratch_gamma_args);
|
||
}
|
||
@@ -742,10 +737,11 @@ static void build_pq(struct pwl_float_data_ex *rgb_regamma,
|
||
struct fixed31_32 output;
|
||
struct fixed31_32 scaling_factor =
|
||
dc_fixpt_from_fraction(sdr_white_level, 10000);
|
||
+ struct fixed31_32 *pq_table = mod_color_get_table(type_pq_table);
|
||
|
||
- if (!pq_initialized && sdr_white_level == 80) {
|
||
+ if (!mod_color_is_table_init(type_pq_table) && sdr_white_level == 80) {
|
||
precompute_pq();
|
||
- pq_initialized = true;
|
||
+ mod_color_set_table_init_state(type_pq_table, true);
|
||
}
|
||
|
||
/* TODO: start index is from segment 2^-24, skipping first segment
|
||
@@ -787,12 +783,12 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq,
|
||
{
|
||
uint32_t i;
|
||
struct fixed31_32 output;
|
||
-
|
||
+ struct fixed31_32 *de_pq_table = mod_color_get_table(type_de_pq_table);
|
||
struct fixed31_32 scaling_factor = dc_fixpt_from_int(125);
|
||
|
||
- if (!de_pq_initialized) {
|
||
+ if (!mod_color_is_table_init(type_de_pq_table)) {
|
||
precompute_de_pq();
|
||
- de_pq_initialized = true;
|
||
+ mod_color_set_table_init_state(type_de_pq_table, true);
|
||
}
|
||
|
||
|
||
@@ -811,7 +807,9 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq,
|
||
|
||
static bool build_regamma(struct pwl_float_data_ex *rgb_regamma,
|
||
uint32_t hw_points_num,
|
||
- const struct hw_x_point *coordinate_x, enum dc_transfer_func_predefined type)
|
||
+ const struct hw_x_point *coordinate_x,
|
||
+ enum dc_transfer_func_predefined type,
|
||
+ struct calculate_buffer *cal_buffer)
|
||
{
|
||
uint32_t i;
|
||
bool ret = false;
|
||
@@ -827,20 +825,21 @@ static bool build_regamma(struct pwl_float_data_ex *rgb_regamma,
|
||
if (!build_coefficients(coeff, type))
|
||
goto release;
|
||
|
||
- memset(pow_buffer, 0, NUM_PTS_IN_REGION * sizeof(struct fixed31_32));
|
||
- pow_buffer_ptr = 0; // see variable definition for more info
|
||
+ memset(cal_buffer->buffer, 0, NUM_PTS_IN_REGION * sizeof(struct fixed31_32));
|
||
+ cal_buffer->buffer_index = 0; // see variable definition for more info
|
||
+
|
||
i = 0;
|
||
while (i <= hw_points_num) {
|
||
/*TODO use y vs r,g,b*/
|
||
rgb->r = translate_from_linear_space_ex(
|
||
- coord_x->x, coeff, 0);
|
||
+ coord_x->x, coeff, 0, cal_buffer);
|
||
rgb->g = rgb->r;
|
||
rgb->b = rgb->r;
|
||
++coord_x;
|
||
++rgb;
|
||
++i;
|
||
}
|
||
- pow_buffer_ptr = -1; // reset back to no optimize
|
||
+ cal_buffer->buffer_index = -1;
|
||
ret = true;
|
||
release:
|
||
kvfree(coeff);
|
||
@@ -932,7 +931,8 @@ static void hermite_spline_eetf(struct fixed31_32 input_x,
|
||
static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
|
||
uint32_t hw_points_num,
|
||
const struct hw_x_point *coordinate_x,
|
||
- const struct freesync_hdr_tf_params *fs_params)
|
||
+ const struct freesync_hdr_tf_params *fs_params,
|
||
+ struct calculate_buffer *cal_buffer)
|
||
{
|
||
uint32_t i;
|
||
struct pwl_float_data_ex *rgb = rgb_regamma;
|
||
@@ -969,7 +969,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
|
||
max_content = max_display;
|
||
|
||
if (!use_eetf)
|
||
- pow_buffer_ptr = 0; // see var definition for more info
|
||
+ cal_buffer->buffer_index = 0; // see var definition for more info
|
||
rgb += 32; // first 32 points have problems with fixed point, too small
|
||
coord_x += 32;
|
||
for (i = 32; i <= hw_points_num; i++) {
|
||
@@ -988,7 +988,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
|
||
if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
|
||
output = dc_fixpt_zero;
|
||
else
|
||
- output = calculate_gamma22(scaledX, use_eetf);
|
||
+ output = calculate_gamma22(scaledX, use_eetf, cal_buffer);
|
||
|
||
rgb->r = output;
|
||
rgb->g = output;
|
||
@@ -1008,7 +1008,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
|
||
++coord_x;
|
||
++rgb;
|
||
}
|
||
- pow_buffer_ptr = -1;
|
||
+ cal_buffer->buffer_index = -1;
|
||
|
||
return true;
|
||
}
|
||
@@ -1606,7 +1606,7 @@ static void build_new_custom_resulted_curve(
|
||
}
|
||
|
||
static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma,
|
||
- uint32_t hw_points_num)
|
||
+ uint32_t hw_points_num, struct calculate_buffer *cal_buffer)
|
||
{
|
||
uint32_t i;
|
||
|
||
@@ -1619,7 +1619,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma
|
||
i = 0;
|
||
while (i != hw_points_num + 1) {
|
||
rgb->r = translate_from_linear_space_ex(
|
||
- coord_x->x, &coeff, 0);
|
||
+ coord_x->x, &coeff, 0, cal_buffer);
|
||
rgb->g = rgb->r;
|
||
rgb->b = rgb->r;
|
||
++coord_x;
|
||
@@ -1674,7 +1674,8 @@ static bool map_regamma_hw_to_x_user(
|
||
#define _EXTRA_POINTS 3
|
||
|
||
bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
|
||
- const struct regamma_lut *regamma)
|
||
+ const struct regamma_lut *regamma,
|
||
+ struct calculate_buffer *cal_buffer)
|
||
{
|
||
struct gamma_coefficients coeff;
|
||
const struct hw_x_point *coord_x = coordinates_x;
|
||
@@ -1706,11 +1707,11 @@ bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
|
||
}
|
||
while (i != MAX_HW_POINTS + 1) {
|
||
output_tf->tf_pts.red[i] = translate_from_linear_space_ex(
|
||
- coord_x->x, &coeff, 0);
|
||
+ coord_x->x, &coeff, 0, cal_buffer);
|
||
output_tf->tf_pts.green[i] = translate_from_linear_space_ex(
|
||
- coord_x->x, &coeff, 1);
|
||
+ coord_x->x, &coeff, 1, cal_buffer);
|
||
output_tf->tf_pts.blue[i] = translate_from_linear_space_ex(
|
||
- coord_x->x, &coeff, 2);
|
||
+ coord_x->x, &coeff, 2, cal_buffer);
|
||
++coord_x;
|
||
++i;
|
||
}
|
||
@@ -1723,7 +1724,8 @@ bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
|
||
}
|
||
|
||
bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
|
||
- const struct regamma_lut *regamma)
|
||
+ const struct regamma_lut *regamma,
|
||
+ struct calculate_buffer *cal_buffer)
|
||
{
|
||
struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
|
||
struct dividers dividers;
|
||
@@ -1756,7 +1758,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
|
||
scale_user_regamma_ramp(rgb_user, ®amma->ramp, dividers);
|
||
|
||
if (regamma->flags.bits.applyDegamma == 1) {
|
||
- apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS);
|
||
+ apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS, cal_buffer);
|
||
copy_rgb_regamma_to_coordinates_x(coordinates_x,
|
||
MAX_HW_POINTS, rgb_regamma);
|
||
}
|
||
@@ -1943,7 +1945,8 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
|
||
struct dc_transfer_func_distributed_points *points,
|
||
struct pwl_float_data_ex *rgb_regamma,
|
||
const struct freesync_hdr_tf_params *fs_params,
|
||
- uint32_t sdr_ref_white_level)
|
||
+ uint32_t sdr_ref_white_level,
|
||
+ struct calculate_buffer *cal_buffer)
|
||
{
|
||
uint32_t i;
|
||
bool ret = false;
|
||
@@ -1979,7 +1982,8 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
|
||
build_freesync_hdr(rgb_regamma,
|
||
MAX_HW_POINTS,
|
||
coordinates_x,
|
||
- fs_params);
|
||
+ fs_params,
|
||
+ cal_buffer);
|
||
|
||
ret = true;
|
||
} else if (trans == TRANSFER_FUNCTION_HLG) {
|
||
@@ -2008,7 +2012,8 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
|
||
build_regamma(rgb_regamma,
|
||
MAX_HW_POINTS,
|
||
coordinates_x,
|
||
- trans);
|
||
+ trans,
|
||
+ cal_buffer);
|
||
|
||
ret = true;
|
||
}
|
||
@@ -2018,7 +2023,8 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
|
||
|
||
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
|
||
const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
|
||
- const struct freesync_hdr_tf_params *fs_params)
|
||
+ const struct freesync_hdr_tf_params *fs_params,
|
||
+ struct calculate_buffer *cal_buffer)
|
||
{
|
||
struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
|
||
struct dividers dividers;
|
||
@@ -2090,7 +2096,8 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
|
||
tf_pts,
|
||
rgb_regamma,
|
||
fs_params,
|
||
- output_tf->sdr_ref_white_level);
|
||
+ output_tf->sdr_ref_white_level,
|
||
+ cal_buffer);
|
||
|
||
if (ret) {
|
||
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
|
||
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
|
||
index 7f56226ba77a9..37ffbef6602b0 100644
|
||
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
|
||
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
|
||
@@ -26,6 +26,8 @@
|
||
#ifndef COLOR_MOD_COLOR_GAMMA_H_
|
||
#define COLOR_MOD_COLOR_GAMMA_H_
|
||
|
||
+#include "color_table.h"
|
||
+
|
||
struct dc_transfer_func;
|
||
struct dc_gamma;
|
||
struct dc_transfer_func_distributed_points;
|
||
@@ -83,6 +85,12 @@ struct freesync_hdr_tf_params {
|
||
unsigned int skip_tm; // skip tm
|
||
};
|
||
|
||
+struct calculate_buffer {
|
||
+ int buffer_index;
|
||
+ struct fixed31_32 buffer[NUM_PTS_IN_REGION];
|
||
+ struct fixed31_32 gamma_of_2;
|
||
+};
|
||
+
|
||
struct translate_from_linear_space_args {
|
||
struct fixed31_32 arg;
|
||
struct fixed31_32 a0;
|
||
@@ -90,6 +98,7 @@ struct translate_from_linear_space_args {
|
||
struct fixed31_32 a2;
|
||
struct fixed31_32 a3;
|
||
struct fixed31_32 gamma;
|
||
+ struct calculate_buffer *cal_buffer;
|
||
};
|
||
|
||
void setup_x_points_distribution(void);
|
||
@@ -99,7 +108,8 @@ void precompute_de_pq(void);
|
||
|
||
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
|
||
const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
|
||
- const struct freesync_hdr_tf_params *fs_params);
|
||
+ const struct freesync_hdr_tf_params *fs_params,
|
||
+ struct calculate_buffer *cal_buffer);
|
||
|
||
bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
|
||
struct dc_transfer_func *output_tf,
|
||
@@ -109,10 +119,12 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
|
||
struct dc_transfer_func_distributed_points *points);
|
||
|
||
bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
|
||
- const struct regamma_lut *regamma);
|
||
+ const struct regamma_lut *regamma,
|
||
+ struct calculate_buffer *cal_buffer);
|
||
|
||
bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
|
||
- const struct regamma_lut *regamma);
|
||
+ const struct regamma_lut *regamma,
|
||
+ struct calculate_buffer *cal_buffer);
|
||
|
||
|
||
#endif /* COLOR_MOD_COLOR_GAMMA_H_ */
|
||
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_table.c b/drivers/gpu/drm/amd/display/modules/color/color_table.c
|
||
new file mode 100644
|
||
index 0000000000000..692e536e7d057
|
||
--- /dev/null
|
||
+++ b/drivers/gpu/drm/amd/display/modules/color/color_table.c
|
||
@@ -0,0 +1,48 @@
|
||
+/*
|
||
+ * Copyright (c) 2019 Advanced Micro Devices, Inc. (unpublished)
|
||
+ *
|
||
+ * All rights reserved. This notice is intended as a precaution against
|
||
+ * inadvertent publication and does not imply publication or any waiver
|
||
+ * of confidentiality. The year included in the foregoing notice is the
|
||
+ * year of creation of the work.
|
||
+ */
|
||
+
|
||
+#include "color_table.h"
|
||
+
|
||
+static struct fixed31_32 pq_table[MAX_HW_POINTS + 2];
|
||
+static struct fixed31_32 de_pq_table[MAX_HW_POINTS + 2];
|
||
+static bool pq_initialized;
|
||
+static bool de_pg_initialized;
|
||
+
|
||
+bool mod_color_is_table_init(enum table_type type)
|
||
+{
|
||
+ bool ret = false;
|
||
+
|
||
+ if (type == type_pq_table)
|
||
+ ret = pq_initialized;
|
||
+ if (type == type_de_pq_table)
|
||
+ ret = de_pg_initialized;
|
||
+
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+struct fixed31_32 *mod_color_get_table(enum table_type type)
|
||
+{
|
||
+ struct fixed31_32 *table = NULL;
|
||
+
|
||
+ if (type == type_pq_table)
|
||
+ table = pq_table;
|
||
+ if (type == type_de_pq_table)
|
||
+ table = de_pq_table;
|
||
+
|
||
+ return table;
|
||
+}
|
||
+
|
||
+void mod_color_set_table_init_state(enum table_type type, bool state)
|
||
+{
|
||
+ if (type == type_pq_table)
|
||
+ pq_initialized = state;
|
||
+ if (type == type_de_pq_table)
|
||
+ de_pg_initialized = state;
|
||
+}
|
||
+
|
||
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_table.h b/drivers/gpu/drm/amd/display/modules/color/color_table.h
|
||
new file mode 100644
|
||
index 0000000000000..2621dd6194027
|
||
--- /dev/null
|
||
+++ b/drivers/gpu/drm/amd/display/modules/color/color_table.h
|
||
@@ -0,0 +1,47 @@
|
||
+/*
|
||
+ * Copyright 2016 Advanced Micro Devices, Inc.
|
||
+ *
|
||
+ * Permission is hereby granted, free of charge, to any person obtaining a
|
||
+ * copy of this software and associated documentation files (the "Software"),
|
||
+ * to deal in the Software without restriction, including without limitation
|
||
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||
+ * and/or sell copies of the Software, and to permit persons to whom the
|
||
+ * Software is furnished to do so, subject to the following conditions:
|
||
+ *
|
||
+ * The above copyright notice and this permission notice shall be included in
|
||
+ * all copies or substantial portions of the Software.
|
||
+ *
|
||
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||
+ * OTHER DEALINGS IN THE SOFTWARE.
|
||
+ *
|
||
+ * Authors: AMD
|
||
+ *
|
||
+ */
|
||
+
|
||
+
|
||
+#ifndef COLOR_MOD_COLOR_TABLE_H_
|
||
+#define COLOR_MOD_COLOR_TABLE_H_
|
||
+
|
||
+#include "dc_types.h"
|
||
+
|
||
+#define NUM_PTS_IN_REGION 16
|
||
+#define NUM_REGIONS 32
|
||
+#define MAX_HW_POINTS (NUM_PTS_IN_REGION*NUM_REGIONS)
|
||
+
|
||
+enum table_type {
|
||
+ type_pq_table,
|
||
+ type_de_pq_table
|
||
+};
|
||
+
|
||
+bool mod_color_is_table_init(enum table_type type);
|
||
+
|
||
+struct fixed31_32 *mod_color_get_table(enum table_type type);
|
||
+
|
||
+void mod_color_set_table_init_state(enum table_type type, bool state);
|
||
+
|
||
+#endif /* COLOR_MOD_COLOR_TABLE_H_ */
|
||
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
|
||
index eb7421e83b865..23a7fa8447e24 100644
|
||
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
|
||
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
|
||
@@ -324,22 +324,44 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
|
||
|
||
/* Choose number of frames to insert based on how close it
|
||
* can get to the mid point of the variable range.
|
||
+ * - Delta for CEIL: delta_from_mid_point_in_us_1
|
||
+ * - Delta for FLOOR: delta_from_mid_point_in_us_2
|
||
*/
|
||
- if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
|
||
- (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
|
||
- mid_point_frames_floor < 2)) {
|
||
+ if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
|
||
+ /* Check for out of range.
|
||
+ * If using CEIL produces a value that is out of range,
|
||
+ * then we are forced to use FLOOR.
|
||
+ */
|
||
+ frames_to_insert = mid_point_frames_floor;
|
||
+ } else if (mid_point_frames_floor < 2) {
|
||
+ /* Check if FLOOR would result in non-LFC. In this case
|
||
+ * choose to use CEIL
|
||
+ */
|
||
+ frames_to_insert = mid_point_frames_ceil;
|
||
+ } else if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
|
||
+ /* If choosing CEIL results in a frame duration that is
|
||
+ * closer to the mid point of the range.
|
||
+ * Choose CEIL
|
||
+ */
|
||
frames_to_insert = mid_point_frames_ceil;
|
||
- delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
|
||
- delta_from_mid_point_in_us_1;
|
||
} else {
|
||
+ /* If choosing FLOOR results in a frame duration that is
|
||
+ * closer to the mid point of the range.
|
||
+ * Choose FLOOR
|
||
+ */
|
||
frames_to_insert = mid_point_frames_floor;
|
||
- delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
|
||
- delta_from_mid_point_in_us_2;
|
||
}
|
||
|
||
/* Prefer current frame multiplier when BTR is enabled unless it drifts
|
||
* too far from the midpoint
|
||
*/
|
||
+ if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
|
||
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
|
||
+ delta_from_mid_point_in_us_1;
|
||
+ } else {
|
||
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
|
||
+ delta_from_mid_point_in_us_2;
|
||
+ }
|
||
if (in_out_vrr->btr.frames_to_insert != 0 &&
|
||
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
|
||
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
|
||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
|
||
index c9cfe90a29471..9ee8cf8267c88 100644
|
||
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
|
||
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
|
||
@@ -204,8 +204,7 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo
|
||
{
|
||
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
|
||
|
||
- if (smu10_data->need_min_deep_sleep_dcefclk &&
|
||
- smu10_data->deep_sleep_dcefclk != clock) {
|
||
+ if (clock && smu10_data->deep_sleep_dcefclk != clock) {
|
||
smu10_data->deep_sleep_dcefclk = clock;
|
||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||
PPSMC_MSG_SetMinDeepSleepDcefclk,
|
||
@@ -219,8 +218,7 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c
|
||
{
|
||
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
|
||
|
||
- if (smu10_data->dcf_actual_hard_min_freq &&
|
||
- smu10_data->dcf_actual_hard_min_freq != clock) {
|
||
+ if (clock && smu10_data->dcf_actual_hard_min_freq != clock) {
|
||
smu10_data->dcf_actual_hard_min_freq = clock;
|
||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||
PPSMC_MSG_SetHardMinDcefclkByFreq,
|
||
@@ -234,8 +232,7 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
|
||
{
|
||
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
|
||
|
||
- if (smu10_data->f_actual_hard_min_freq &&
|
||
- smu10_data->f_actual_hard_min_freq != clock) {
|
||
+ if (clock && smu10_data->f_actual_hard_min_freq != clock) {
|
||
smu10_data->f_actual_hard_min_freq = clock;
|
||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||
PPSMC_MSG_SetHardMinFclkByFreq,
|
||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
|
||
index 7783c7fd7ccb0..eff87c8968380 100644
|
||
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
|
||
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
|
||
@@ -363,17 +363,19 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
|
||
static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
|
||
struct PP_TemperatureRange *range)
|
||
{
|
||
+ struct phm_ppt_v2_information *pp_table_info =
|
||
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
|
||
+ struct phm_tdp_table *tdp_table = pp_table_info->tdp_table;
|
||
struct amdgpu_device *adev = hwmgr->adev;
|
||
- int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
|
||
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||
- int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
|
||
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||
+ int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP;
|
||
+ int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP;
|
||
uint32_t val;
|
||
|
||
- if (low < range->min)
|
||
- low = range->min;
|
||
- if (high > range->max)
|
||
- high = range->max;
|
||
+ /* compare them in unit celsius degree */
|
||
+ if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
|
||
+ low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||
+ if (high > tdp_table->usSoftwareShutdownTemp)
|
||
+ high = tdp_table->usSoftwareShutdownTemp;
|
||
|
||
if (low > high)
|
||
return -EINVAL;
|
||
@@ -382,8 +384,8 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
|
||
|
||
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
|
||
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
|
||
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
|
||
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
|
||
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
|
||
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
|
||
val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) &
|
||
(~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) &
|
||
(~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
|
||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
|
||
index c85806a6f62e3..650623106ceba 100644
|
||
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
|
||
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
|
||
@@ -170,17 +170,18 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
|
||
static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
|
||
struct PP_TemperatureRange *range)
|
||
{
|
||
+ struct phm_ppt_v3_information *pptable_information =
|
||
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
|
||
struct amdgpu_device *adev = hwmgr->adev;
|
||
- int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP *
|
||
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||
- int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP *
|
||
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||
+ int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP;
|
||
+ int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP;
|
||
uint32_t val;
|
||
|
||
- if (low < range->min)
|
||
- low = range->min;
|
||
- if (high > range->max)
|
||
- high = range->max;
|
||
+ /* compare them in unit celsius degree */
|
||
+ if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
|
||
+ low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||
+ if (high > pptable_information->us_software_shutdown_temp)
|
||
+ high = pptable_information->us_software_shutdown_temp;
|
||
|
||
if (low > high)
|
||
return -EINVAL;
|
||
@@ -189,8 +190,8 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
|
||
|
||
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
|
||
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
|
||
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
|
||
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
|
||
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
|
||
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
|
||
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
|
||
|
||
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
|
||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
|
||
index 9ff470f1b826c..9bd2874a122b4 100644
|
||
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
|
||
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
|
||
@@ -979,10 +979,7 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
|
||
{
|
||
struct vega20_hwmgr *data =
|
||
(struct vega20_hwmgr *)(hwmgr->backend);
|
||
- uint64_t features_enabled;
|
||
- int i;
|
||
- bool enabled;
|
||
- int ret = 0;
|
||
+ int i, ret = 0;
|
||
|
||
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
|
||
PPSMC_MSG_DisableAllSmuFeatures,
|
||
@@ -990,17 +987,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
|
||
"[DisableAllSMUFeatures] Failed to disable all smu features!",
|
||
return ret);
|
||
|
||
- ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
|
||
- PP_ASSERT_WITH_CODE(!ret,
|
||
- "[DisableAllSMUFeatures] Failed to get enabled smc features!",
|
||
- return ret);
|
||
-
|
||
- for (i = 0; i < GNLD_FEATURES_MAX; i++) {
|
||
- enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
|
||
- true : false;
|
||
- data->smu_features[i].enabled = enabled;
|
||
- data->smu_features[i].supported = enabled;
|
||
- }
|
||
+ for (i = 0; i < GNLD_FEATURES_MAX; i++)
|
||
+ data->smu_features[i].enabled = 0;
|
||
|
||
return 0;
|
||
}
|
||
@@ -1652,12 +1640,6 @@ static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
|
||
|
||
data->uvd_power_gated = true;
|
||
data->vce_power_gated = true;
|
||
-
|
||
- if (data->smu_features[GNLD_DPM_UVD].enabled)
|
||
- data->uvd_power_gated = false;
|
||
-
|
||
- if (data->smu_features[GNLD_DPM_VCE].enabled)
|
||
- data->vce_power_gated = false;
|
||
}
|
||
|
||
static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||
@@ -3230,10 +3212,11 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
|
||
|
||
static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
|
||
{
|
||
- uint64_t features_enabled;
|
||
- uint64_t features_to_enable;
|
||
- uint64_t features_to_disable;
|
||
- int ret = 0;
|
||
+ struct vega20_hwmgr *data =
|
||
+ (struct vega20_hwmgr *)(hwmgr->backend);
|
||
+ uint64_t features_enabled, features_to_enable, features_to_disable;
|
||
+ int i, ret = 0;
|
||
+ bool enabled;
|
||
|
||
if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
|
||
return -EINVAL;
|
||
@@ -3262,6 +3245,17 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
|
||
return ret;
|
||
}
|
||
|
||
+ /* Update the cached feature enablement state */
|
||
+ ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
|
||
+ if (ret)
|
||
+ return ret;
|
||
+
|
||
+ for (i = 0; i < GNLD_FEATURES_MAX; i++) {
|
||
+ enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
|
||
+ true : false;
|
||
+ data->smu_features[i].enabled = enabled;
|
||
+ }
|
||
+
|
||
return 0;
|
||
}
|
||
|
||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
|
||
index 7add2f60f49c4..364162ddaa9c6 100644
|
||
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
|
||
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
|
||
@@ -240,17 +240,18 @@ int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr)
|
||
static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
|
||
struct PP_TemperatureRange *range)
|
||
{
|
||
+ struct phm_ppt_v3_information *pptable_information =
|
||
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
|
||
struct amdgpu_device *adev = hwmgr->adev;
|
||
- int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP *
|
||
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||
- int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP *
|
||
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||
+ int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP;
|
||
+ int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP;
|
||
uint32_t val;
|
||
|
||
- if (low < range->min)
|
||
- low = range->min;
|
||
- if (high > range->max)
|
||
- high = range->max;
|
||
+ /* compare them in unit celsius degree */
|
||
+ if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
|
||
+ low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||
+ if (high > pptable_information->us_software_shutdown_temp)
|
||
+ high = pptable_information->us_software_shutdown_temp;
|
||
|
||
if (low > high)
|
||
return -EINVAL;
|
||
@@ -259,8 +260,8 @@ static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
|
||
|
||
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
|
||
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
|
||
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
|
||
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
|
||
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
|
||
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
|
||
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
|
||
|
||
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
|
||
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
|
||
index 56bd938961eee..f33418d6e1a08 100644
|
||
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
|
||
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
|
||
@@ -492,10 +492,8 @@ static void komeda_crtc_reset(struct drm_crtc *crtc)
|
||
crtc->state = NULL;
|
||
|
||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||
- if (state) {
|
||
- crtc->state = &state->base;
|
||
- crtc->state->crtc = crtc;
|
||
- }
|
||
+ if (state)
|
||
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
|
||
}
|
||
|
||
static struct drm_crtc_state *
|
||
@@ -616,7 +614,6 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms,
|
||
return err;
|
||
|
||
drm_crtc_helper_add(crtc, &komeda_crtc_helper_funcs);
|
||
- drm_crtc_vblank_reset(crtc);
|
||
|
||
crtc->port = kcrtc->master->of_output_port;
|
||
|
||
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
|
||
index def8c9ffafcaf..a2a10bfbccac4 100644
|
||
--- a/drivers/gpu/drm/arm/malidp_drv.c
|
||
+++ b/drivers/gpu/drm/arm/malidp_drv.c
|
||
@@ -870,7 +870,6 @@ static int malidp_bind(struct device *dev)
|
||
drm->irq_enabled = true;
|
||
|
||
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
|
||
- drm_crtc_vblank_reset(&malidp->crtc);
|
||
if (ret < 0) {
|
||
DRM_ERROR("failed to initialise vblank\n");
|
||
goto vblank_fail;
|
||
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
|
||
index 10985134ce0ba..ce246b96330b7 100644
|
||
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
|
||
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
|
||
@@ -411,10 +411,8 @@ static void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
|
||
}
|
||
|
||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||
- if (state) {
|
||
- crtc->state = &state->base;
|
||
- crtc->state->crtc = crtc;
|
||
- }
|
||
+ if (state)
|
||
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
|
||
}
|
||
|
||
static struct drm_crtc_state *
|
||
@@ -528,7 +526,6 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
|
||
}
|
||
|
||
drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
|
||
- drm_crtc_vblank_reset(&crtc->base);
|
||
|
||
drm_mode_crtc_set_gamma_size(&crtc->base, ATMEL_HLCDC_CLUT_SIZE);
|
||
drm_crtc_enable_color_mgmt(&crtc->base, 0, false,
|
||
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
|
||
index 85d163f16801f..b78e142a5620c 100644
|
||
--- a/drivers/gpu/drm/drm_atomic_helper.c
|
||
+++ b/drivers/gpu/drm/drm_atomic_helper.c
|
||
@@ -34,6 +34,7 @@
|
||
#include <drm/drm_bridge.h>
|
||
#include <drm/drm_damage_helper.h>
|
||
#include <drm/drm_device.h>
|
||
+#include <drm/drm_drv.h>
|
||
#include <drm/drm_plane_helper.h>
|
||
#include <drm/drm_print.h>
|
||
#include <drm/drm_self_refresh_helper.h>
|
||
@@ -3105,7 +3106,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
|
||
if (ret)
|
||
DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
|
||
|
||
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
|
||
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
|
||
}
|
||
EXPORT_SYMBOL(drm_atomic_helper_shutdown);
|
||
|
||
@@ -3245,7 +3246,7 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
|
||
}
|
||
|
||
unlock:
|
||
- DRM_MODESET_LOCK_ALL_END(ctx, err);
|
||
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
|
||
if (err)
|
||
return ERR_PTR(err);
|
||
|
||
@@ -3326,7 +3327,7 @@ int drm_atomic_helper_resume(struct drm_device *dev,
|
||
|
||
err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
|
||
|
||
- DRM_MODESET_LOCK_ALL_END(ctx, err);
|
||
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
|
||
drm_atomic_state_put(state);
|
||
|
||
return err;
|
||
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
|
||
index 8fce6a115dfe3..9ad74045158ec 100644
|
||
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
|
||
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
|
||
@@ -32,6 +32,7 @@
|
||
#include <drm/drm_device.h>
|
||
#include <drm/drm_plane.h>
|
||
#include <drm/drm_print.h>
|
||
+#include <drm/drm_vblank.h>
|
||
#include <drm/drm_writeback.h>
|
||
|
||
#include <linux/slab.h>
|
||
@@ -93,6 +94,9 @@ __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
|
||
if (crtc_state)
|
||
__drm_atomic_helper_crtc_state_reset(crtc_state, crtc);
|
||
|
||
+ if (drm_dev_has_vblank(crtc->dev))
|
||
+ drm_crtc_vblank_reset(crtc);
|
||
+
|
||
crtc->state = crtc_state;
|
||
}
|
||
EXPORT_SYMBOL(__drm_atomic_helper_crtc_reset);
|
||
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
|
||
index c93123ff7c218..138ff34b31db5 100644
|
||
--- a/drivers/gpu/drm/drm_color_mgmt.c
|
||
+++ b/drivers/gpu/drm/drm_color_mgmt.c
|
||
@@ -294,7 +294,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
|
||
crtc->gamma_size, &ctx);
|
||
|
||
out:
|
||
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
|
||
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
|
||
return ret;
|
||
|
||
}
|
||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
|
||
index 4936e1080e417..eb1c33e5d0f49 100644
|
||
--- a/drivers/gpu/drm/drm_crtc.c
|
||
+++ b/drivers/gpu/drm/drm_crtc.c
|
||
@@ -561,7 +561,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||
if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id))
|
||
return -EACCES;
|
||
|
||
- mutex_lock(&crtc->dev->mode_config.mutex);
|
||
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx,
|
||
DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
|
||
|
||
@@ -728,8 +727,7 @@ out:
|
||
fb = NULL;
|
||
mode = NULL;
|
||
|
||
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
|
||
- mutex_unlock(&crtc->dev->mode_config.mutex);
|
||
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
|
||
|
||
return ret;
|
||
}
|
||
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
|
||
index ffbd754a53825..954cd69117826 100644
|
||
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
|
||
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
|
||
@@ -4993,8 +4993,8 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm
|
||
|
||
crtc = conn_state->crtc;
|
||
|
||
- if (WARN_ON(!crtc))
|
||
- return -EINVAL;
|
||
+ if (!crtc)
|
||
+ continue;
|
||
|
||
if (!drm_dp_mst_dsc_aux_for_port(pos->port))
|
||
continue;
|
||
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
|
||
index 901b078abf40c..db05f386a709e 100644
|
||
--- a/drivers/gpu/drm/drm_mode_object.c
|
||
+++ b/drivers/gpu/drm/drm_mode_object.c
|
||
@@ -428,7 +428,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
|
||
out_unref:
|
||
drm_mode_object_put(obj);
|
||
out:
|
||
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
|
||
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
|
||
return ret;
|
||
}
|
||
|
||
@@ -470,7 +470,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
|
||
break;
|
||
}
|
||
drm_property_change_valid_put(prop, ref);
|
||
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
|
||
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
|
||
|
||
return ret;
|
||
}
|
||
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
|
||
index 4af173ced3277..fdbafc2b81998 100644
|
||
--- a/drivers/gpu/drm/drm_plane.c
|
||
+++ b/drivers/gpu/drm/drm_plane.c
|
||
@@ -791,7 +791,7 @@ static int setplane_internal(struct drm_plane *plane,
|
||
crtc_x, crtc_y, crtc_w, crtc_h,
|
||
src_x, src_y, src_w, src_h, &ctx);
|
||
|
||
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
|
||
+ DRM_MODESET_LOCK_ALL_END(plane->dev, ctx, ret);
|
||
|
||
return ret;
|
||
}
|
||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
|
||
index 4a512b062df8f..bb9a37d3fcff6 100644
|
||
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
|
||
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
|
||
@@ -337,9 +337,16 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
|
||
|
||
gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
|
||
gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
|
||
- gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
|
||
gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
|
||
- gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
|
||
+
|
||
+ /*
|
||
+ * Reading these two registers on GC600 rev 0x19 result in a
|
||
+ * unhandled fault: external abort on non-linefetch
|
||
+ */
|
||
+ if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) {
|
||
+ gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
|
||
+ gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
|
||
+ }
|
||
|
||
/*
|
||
* !!!! HACK ALERT !!!!
|
||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
|
||
index 4e3e95dce6d87..cd46c882269cc 100644
|
||
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
|
||
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
|
||
@@ -89,12 +89,15 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
|
||
u32 dma_addr;
|
||
int change;
|
||
|
||
+ /* block scheduler */
|
||
+ drm_sched_stop(&gpu->sched, sched_job);
|
||
+
|
||
/*
|
||
* If the GPU managed to complete this jobs fence, the timout is
|
||
* spurious. Bail out.
|
||
*/
|
||
if (dma_fence_is_signaled(submit->out_fence))
|
||
- return;
|
||
+ goto out_no_timeout;
|
||
|
||
/*
|
||
* If the GPU is still making forward progress on the front-end (which
|
||
@@ -105,12 +108,9 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
|
||
change = dma_addr - gpu->hangcheck_dma_addr;
|
||
if (change < 0 || change > 16) {
|
||
gpu->hangcheck_dma_addr = dma_addr;
|
||
- return;
|
||
+ goto out_no_timeout;
|
||
}
|
||
|
||
- /* block scheduler */
|
||
- drm_sched_stop(&gpu->sched, sched_job);
|
||
-
|
||
if(sched_job)
|
||
drm_sched_increase_karma(sched_job);
|
||
|
||
@@ -120,6 +120,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
|
||
|
||
drm_sched_resubmit_jobs(&gpu->sched);
|
||
|
||
+out_no_timeout:
|
||
/* restart scheduler after GPU is usable again */
|
||
drm_sched_start(&gpu->sched, true);
|
||
}
|
||
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
|
||
index 372354d33f552..5ac4a999f05a6 100644
|
||
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
|
||
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
|
||
@@ -1204,6 +1204,12 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
|
||
return dst;
|
||
}
|
||
|
||
+static inline bool cmd_desc_is(const struct drm_i915_cmd_descriptor * const desc,
|
||
+ const u32 cmd)
|
||
+{
|
||
+ return desc->cmd.value == (cmd & desc->cmd.mask);
|
||
+}
|
||
+
|
||
static bool check_cmd(const struct intel_engine_cs *engine,
|
||
const struct drm_i915_cmd_descriptor *desc,
|
||
const u32 *cmd, u32 length)
|
||
@@ -1242,19 +1248,19 @@ static bool check_cmd(const struct intel_engine_cs *engine,
|
||
* allowed mask/value pair given in the whitelist entry.
|
||
*/
|
||
if (reg->mask) {
|
||
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
|
||
+ if (cmd_desc_is(desc, MI_LOAD_REGISTER_MEM)) {
|
||
DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n",
|
||
reg_addr);
|
||
return false;
|
||
}
|
||
|
||
- if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
|
||
+ if (cmd_desc_is(desc, MI_LOAD_REGISTER_REG)) {
|
||
DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n",
|
||
reg_addr);
|
||
return false;
|
||
}
|
||
|
||
- if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
|
||
+ if (cmd_desc_is(desc, MI_LOAD_REGISTER_IMM(1)) &&
|
||
(offset + 2 > length ||
|
||
(cmd[offset + 1] & reg->mask) != reg->value)) {
|
||
DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n",
|
||
@@ -1478,7 +1484,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
||
break;
|
||
}
|
||
|
||
- if (desc->cmd.value == MI_BATCH_BUFFER_START) {
|
||
+ if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) {
|
||
ret = check_bbstart(cmd, offset, length, batch_length,
|
||
batch_addr, shadow_addr,
|
||
jump_whitelist);
|
||
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
|
||
index 5db06b5909438..e7b39f3ca33dc 100644
|
||
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
|
||
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
|
||
@@ -396,7 +396,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
|
||
ring->next = ring->start;
|
||
|
||
/* reset completed fence seqno: */
|
||
- ring->memptrs->fence = ring->seqno;
|
||
+ ring->memptrs->fence = ring->fctx->completed_fence;
|
||
ring->memptrs->rptr = 0;
|
||
}
|
||
|
||
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
|
||
index b5fed67c4651f..0c54b7bc19010 100644
|
||
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
|
||
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
|
||
@@ -1117,8 +1117,6 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
|
||
mdp5_crtc_destroy_state(crtc, crtc->state);
|
||
|
||
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
|
||
-
|
||
- drm_crtc_vblank_reset(crtc);
|
||
}
|
||
|
||
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
|
||
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
|
||
index 800b7757252e3..d2c2d102e7329 100644
|
||
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
|
||
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
|
||
@@ -2160,8 +2160,10 @@ nv50_disp_atomic_commit(struct drm_device *dev,
|
||
int ret, i;
|
||
|
||
ret = pm_runtime_get_sync(dev->dev);
|
||
- if (ret < 0 && ret != -EACCES)
|
||
+ if (ret < 0 && ret != -EACCES) {
|
||
+ pm_runtime_put_autosuspend(dev->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
ret = drm_atomic_helper_setup_commit(state, nonblock);
|
||
if (ret)
|
||
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
|
||
index 1b383ae0248f3..ef8ddbe445812 100644
|
||
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
|
||
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
|
||
@@ -572,8 +572,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
|
||
pm_runtime_get_noresume(dev->dev);
|
||
} else {
|
||
ret = pm_runtime_get_sync(dev->dev);
|
||
- if (ret < 0 && ret != -EACCES)
|
||
+ if (ret < 0 && ret != -EACCES) {
|
||
+ pm_runtime_put_autosuspend(dev->dev);
|
||
return conn_status;
|
||
+ }
|
||
}
|
||
|
||
nv_encoder = nouveau_connector_ddc_detect(connector);
|
||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
|
||
index d5c23d1c20d88..44e515bbbb444 100644
|
||
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
|
||
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
|
||
@@ -189,8 +189,10 @@ nouveau_fbcon_open(struct fb_info *info, int user)
|
||
struct nouveau_fbdev *fbcon = info->par;
|
||
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
|
||
int ret = pm_runtime_get_sync(drm->dev->dev);
|
||
- if (ret < 0 && ret != -EACCES)
|
||
+ if (ret < 0 && ret != -EACCES) {
|
||
+ pm_runtime_put(drm->dev->dev);
|
||
return ret;
|
||
+ }
|
||
return 0;
|
||
}
|
||
|
||
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
|
||
index fce7e944a280b..6d40914675dad 100644
|
||
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
|
||
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
|
||
@@ -697,14 +697,16 @@ static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
|
||
|
||
static void omap_crtc_reset(struct drm_crtc *crtc)
|
||
{
|
||
+ struct omap_crtc_state *state;
|
||
+
|
||
if (crtc->state)
|
||
__drm_atomic_helper_crtc_destroy_state(crtc->state);
|
||
|
||
kfree(crtc->state);
|
||
- crtc->state = kzalloc(sizeof(struct omap_crtc_state), GFP_KERNEL);
|
||
|
||
- if (crtc->state)
|
||
- crtc->state->crtc = crtc;
|
||
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||
+ if (state)
|
||
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
|
||
}
|
||
|
||
static struct drm_crtc_state *
|
||
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
|
||
index cdafd7ef1c320..cc4d754ff8c02 100644
|
||
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
|
||
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
|
||
@@ -595,7 +595,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
|
||
{
|
||
const struct soc_device_attribute *soc;
|
||
struct drm_device *ddev;
|
||
- unsigned int i;
|
||
int ret;
|
||
|
||
DBG("%s", dev_name(dev));
|
||
@@ -642,9 +641,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
|
||
goto err_cleanup_modeset;
|
||
}
|
||
|
||
- for (i = 0; i < priv->num_pipes; i++)
|
||
- drm_crtc_vblank_off(priv->pipes[i].crtc);
|
||
-
|
||
omap_fbdev_init(ddev);
|
||
|
||
drm_kms_helper_poll_init(ddev);
|
||
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
|
||
index fe12d9d91d7a5..e308344344425 100644
|
||
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
|
||
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
|
||
@@ -879,8 +879,10 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
|
||
|
||
if (!drm_kms_helper_is_poll_worker()) {
|
||
r = pm_runtime_get_sync(connector->dev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(connector->dev->dev);
|
||
return connector_status_disconnected;
|
||
+ }
|
||
}
|
||
|
||
if (encoder) {
|
||
@@ -1025,8 +1027,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
|
||
|
||
if (!drm_kms_helper_is_poll_worker()) {
|
||
r = pm_runtime_get_sync(connector->dev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(connector->dev->dev);
|
||
return connector_status_disconnected;
|
||
+ }
|
||
}
|
||
|
||
encoder = radeon_best_single_encoder(connector);
|
||
@@ -1163,8 +1167,10 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
|
||
|
||
if (!drm_kms_helper_is_poll_worker()) {
|
||
r = pm_runtime_get_sync(connector->dev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(connector->dev->dev);
|
||
return connector_status_disconnected;
|
||
+ }
|
||
}
|
||
|
||
encoder = radeon_best_single_encoder(connector);
|
||
@@ -1247,8 +1253,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
|
||
|
||
if (!drm_kms_helper_is_poll_worker()) {
|
||
r = pm_runtime_get_sync(connector->dev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(connector->dev->dev);
|
||
return connector_status_disconnected;
|
||
+ }
|
||
}
|
||
|
||
if (radeon_connector->detected_hpd_without_ddc) {
|
||
@@ -1657,8 +1665,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
|
||
|
||
if (!drm_kms_helper_is_poll_worker()) {
|
||
r = pm_runtime_get_sync(connector->dev->dev);
|
||
- if (r < 0)
|
||
+ if (r < 0) {
|
||
+ pm_runtime_put_autosuspend(connector->dev->dev);
|
||
return connector_status_disconnected;
|
||
+ }
|
||
}
|
||
|
||
if (!force && radeon_check_hpd_status_unchanged(connector)) {
|
||
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
|
||
index d73e88ddecd0f..fe86a3e677571 100644
|
||
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
|
||
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
|
||
@@ -975,8 +975,7 @@ static void rcar_du_crtc_reset(struct drm_crtc *crtc)
|
||
state->crc.source = VSP1_DU_CRC_NONE;
|
||
state->crc.index = 0;
|
||
|
||
- crtc->state = &state->state;
|
||
- crtc->state->crtc = crtc;
|
||
+ __drm_atomic_helper_crtc_reset(crtc, &state->state);
|
||
}
|
||
|
||
static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc)
|
||
@@ -1271,9 +1270,6 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
|
||
|
||
drm_crtc_helper_add(crtc, &crtc_helper_funcs);
|
||
|
||
- /* Start with vertical blanking interrupt reporting disabled. */
|
||
- drm_crtc_vblank_off(crtc);
|
||
-
|
||
/* Register the interrupt handler. */
|
||
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
|
||
/* The IRQ's are associated with the CRTC (sw)index. */
|
||
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
|
||
index 04d6848d19fcf..da8b9983b7de0 100644
|
||
--- a/drivers/gpu/drm/tegra/dc.c
|
||
+++ b/drivers/gpu/drm/tegra/dc.c
|
||
@@ -1169,7 +1169,6 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
|
||
tegra_crtc_atomic_destroy_state(crtc, crtc->state);
|
||
|
||
__drm_atomic_helper_crtc_reset(crtc, &state->base);
|
||
- drm_crtc_vblank_reset(crtc);
|
||
}
|
||
|
||
static struct drm_crtc_state *
|
||
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
|
||
index 89a226912de85..4d01c4af61cd0 100644
|
||
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
|
||
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
|
||
@@ -352,8 +352,7 @@ static void tidss_crtc_reset(struct drm_crtc *crtc)
|
||
return;
|
||
}
|
||
|
||
- crtc->state = &tcrtc->base;
|
||
- crtc->state->crtc = crtc;
|
||
+ __drm_atomic_helper_crtc_reset(crtc, &tcrtc->base);
|
||
}
|
||
|
||
static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc)
|
||
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
|
||
index c0240f7e0b198..eec359f61a06d 100644
|
||
--- a/drivers/gpu/drm/tidss/tidss_kms.c
|
||
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
|
||
@@ -253,7 +253,6 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
|
||
int tidss_modeset_init(struct tidss_device *tidss)
|
||
{
|
||
struct drm_device *ddev = &tidss->ddev;
|
||
- unsigned int i;
|
||
int ret;
|
||
|
||
dev_dbg(tidss->dev, "%s\n", __func__);
|
||
@@ -278,10 +277,6 @@ int tidss_modeset_init(struct tidss_device *tidss)
|
||
if (ret)
|
||
return ret;
|
||
|
||
- /* Start with vertical blanking interrupt reporting disabled. */
|
||
- for (i = 0; i < tidss->num_crtcs; ++i)
|
||
- drm_crtc_vblank_reset(tidss->crtcs[i]);
|
||
-
|
||
drm_mode_config_reset(ddev);
|
||
|
||
dev_dbg(tidss->dev, "%s done\n", __func__);
|
||
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
|
||
index 6ccbd01cd888c..703b5cd517519 100644
|
||
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
|
||
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
|
||
@@ -79,6 +79,7 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
|
||
}
|
||
|
||
sg_free_table(shmem->pages);
|
||
+ kfree(shmem->pages);
|
||
shmem->pages = NULL;
|
||
drm_gem_shmem_unpin(&bo->base.base);
|
||
}
|
||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
|
||
index 009f1742bed51..c4017c7a24db6 100644
|
||
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
|
||
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
|
||
@@ -387,8 +387,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
||
ldu->base.is_implicit = true;
|
||
|
||
/* Initialize primary plane */
|
||
- vmw_du_plane_reset(primary);
|
||
-
|
||
ret = drm_universal_plane_init(dev, &ldu->base.primary,
|
||
0, &vmw_ldu_plane_funcs,
|
||
vmw_primary_plane_formats,
|
||
@@ -402,8 +400,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
||
drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs);
|
||
|
||
/* Initialize cursor plane */
|
||
- vmw_du_plane_reset(cursor);
|
||
-
|
||
ret = drm_universal_plane_init(dev, &ldu->base.cursor,
|
||
0, &vmw_ldu_cursor_funcs,
|
||
vmw_cursor_plane_formats,
|
||
@@ -417,7 +413,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
||
|
||
drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
|
||
|
||
- vmw_du_connector_reset(connector);
|
||
ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
|
||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||
if (ret) {
|
||
@@ -445,7 +440,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
||
goto err_free_encoder;
|
||
}
|
||
|
||
- vmw_du_crtc_reset(crtc);
|
||
ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
|
||
&ldu->base.cursor,
|
||
&vmw_legacy_crtc_funcs, NULL);
|
||
@@ -520,6 +514,8 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
|
||
|
||
dev_priv->active_display_unit = vmw_du_legacy;
|
||
|
||
+ drm_mode_config_reset(dev);
|
||
+
|
||
DRM_INFO("Legacy Display Unit initialized\n");
|
||
|
||
return 0;
|
||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
|
||
index 32a22e4eddb1a..4bf0f5ec4fc2d 100644
|
||
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
|
||
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
|
||
@@ -859,8 +859,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
||
sou->base.is_implicit = false;
|
||
|
||
/* Initialize primary plane */
|
||
- vmw_du_plane_reset(primary);
|
||
-
|
||
ret = drm_universal_plane_init(dev, &sou->base.primary,
|
||
0, &vmw_sou_plane_funcs,
|
||
vmw_primary_plane_formats,
|
||
@@ -875,8 +873,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
||
drm_plane_enable_fb_damage_clips(primary);
|
||
|
||
/* Initialize cursor plane */
|
||
- vmw_du_plane_reset(cursor);
|
||
-
|
||
ret = drm_universal_plane_init(dev, &sou->base.cursor,
|
||
0, &vmw_sou_cursor_funcs,
|
||
vmw_cursor_plane_formats,
|
||
@@ -890,7 +886,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
||
|
||
drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
|
||
|
||
- vmw_du_connector_reset(connector);
|
||
ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
|
||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||
if (ret) {
|
||
@@ -918,8 +913,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
||
goto err_free_encoder;
|
||
}
|
||
|
||
-
|
||
- vmw_du_crtc_reset(crtc);
|
||
ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
|
||
&sou->base.cursor,
|
||
&vmw_screen_object_crtc_funcs, NULL);
|
||
@@ -973,6 +966,8 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
|
||
|
||
dev_priv->active_display_unit = vmw_du_screen_object;
|
||
|
||
+ drm_mode_config_reset(dev);
|
||
+
|
||
DRM_INFO("Screen Objects Display Unit initialized\n");
|
||
|
||
return 0;
|
||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
|
||
index 16b3856296889..cf3aafd00837c 100644
|
||
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
|
||
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
|
||
@@ -1738,8 +1738,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
|
||
stdu->base.is_implicit = false;
|
||
|
||
/* Initialize primary plane */
|
||
- vmw_du_plane_reset(primary);
|
||
-
|
||
ret = drm_universal_plane_init(dev, primary,
|
||
0, &vmw_stdu_plane_funcs,
|
||
vmw_primary_plane_formats,
|
||
@@ -1754,8 +1752,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
|
||
drm_plane_enable_fb_damage_clips(primary);
|
||
|
||
/* Initialize cursor plane */
|
||
- vmw_du_plane_reset(cursor);
|
||
-
|
||
ret = drm_universal_plane_init(dev, cursor,
|
||
0, &vmw_stdu_cursor_funcs,
|
||
vmw_cursor_plane_formats,
|
||
@@ -1769,8 +1765,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
|
||
|
||
drm_plane_helper_add(cursor, &vmw_stdu_cursor_plane_helper_funcs);
|
||
|
||
- vmw_du_connector_reset(connector);
|
||
-
|
||
ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
|
||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||
if (ret) {
|
||
@@ -1798,7 +1792,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
|
||
goto err_free_encoder;
|
||
}
|
||
|
||
- vmw_du_crtc_reset(crtc);
|
||
ret = drm_crtc_init_with_planes(dev, crtc, &stdu->base.primary,
|
||
&stdu->base.cursor,
|
||
&vmw_stdu_crtc_funcs, NULL);
|
||
@@ -1894,6 +1887,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
|
||
}
|
||
}
|
||
|
||
+ drm_mode_config_reset(dev);
|
||
+
|
||
DRM_INFO("Screen Target Display device initialized\n");
|
||
|
||
return 0;
|
||
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
|
||
index a10643aa89aa5..2ac5a99406d98 100644
|
||
--- a/drivers/gpu/host1x/job.c
|
||
+++ b/drivers/gpu/host1x/job.c
|
||
@@ -102,6 +102,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
||
{
|
||
struct host1x_client *client = job->client;
|
||
struct device *dev = client->dev;
|
||
+ struct host1x_job_gather *g;
|
||
struct iommu_domain *domain;
|
||
unsigned int i;
|
||
int err;
|
||
@@ -184,7 +185,6 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
||
}
|
||
|
||
for (i = 0; i < job->num_gathers; i++) {
|
||
- struct host1x_job_gather *g = &job->gathers[i];
|
||
size_t gather_size = 0;
|
||
struct scatterlist *sg;
|
||
struct sg_table *sgt;
|
||
@@ -194,6 +194,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
||
dma_addr_t *phys;
|
||
unsigned int j;
|
||
|
||
+ g = &job->gathers[i];
|
||
g->bo = host1x_bo_get(g->bo);
|
||
if (!g->bo) {
|
||
err = -EINVAL;
|
||
@@ -213,7 +214,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
||
sgt = host1x_bo_pin(host->dev, g->bo, phys);
|
||
if (IS_ERR(sgt)) {
|
||
err = PTR_ERR(sgt);
|
||
- goto unpin;
|
||
+ goto put;
|
||
}
|
||
|
||
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
|
||
@@ -226,7 +227,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
||
host->iova_end >> shift, true);
|
||
if (!alloc) {
|
||
err = -ENOMEM;
|
||
- goto unpin;
|
||
+ goto put;
|
||
}
|
||
|
||
err = iommu_map_sg(host->domain,
|
||
@@ -235,7 +236,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
||
if (err == 0) {
|
||
__free_iova(&host->iova, alloc);
|
||
err = -EINVAL;
|
||
- goto unpin;
|
||
+ goto put;
|
||
}
|
||
|
||
job->unpins[job->num_unpins].size = gather_size;
|
||
@@ -245,7 +246,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
||
DMA_TO_DEVICE);
|
||
if (!err) {
|
||
err = -ENOMEM;
|
||
- goto unpin;
|
||
+ goto put;
|
||
}
|
||
|
||
job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
|
||
@@ -263,6 +264,8 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
||
|
||
return 0;
|
||
|
||
+put:
|
||
+ host1x_bo_put(g->bo);
|
||
unpin:
|
||
host1x_job_unpin(job);
|
||
return err;
|
||
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
|
||
index 6f370e020feb3..7cfa9785bfbb0 100644
|
||
--- a/drivers/hid/hid-ids.h
|
||
+++ b/drivers/hid/hid-ids.h
|
||
@@ -773,6 +773,7 @@
|
||
#define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b
|
||
#define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c
|
||
#define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a
|
||
+#define USB_DEVICE_ID_LOGITECH_GROUP_AUDIO 0x0882
|
||
#define USB_DEVICE_ID_S510_RECEIVER 0xc50c
|
||
#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
|
||
#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
|
||
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
|
||
index 934fc0a798d4d..c242150d35a3a 100644
|
||
--- a/drivers/hid/hid-quirks.c
|
||
+++ b/drivers/hid/hid-quirks.c
|
||
@@ -179,6 +179,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
|
||
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
|
||
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT },
|
||
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_GROUP_AUDIO), HID_QUIRK_NOGET },
|
||
|
||
{ 0 }
|
||
};
|
||
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
|
||
index 294c84e136d72..dbd04492825d4 100644
|
||
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
|
||
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
|
||
@@ -420,6 +420,19 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
|
||
dev_err(&client->dev, "failed to change power setting.\n");
|
||
|
||
set_pwr_exit:
|
||
+
|
||
+ /*
|
||
+ * The HID over I2C specification states that if a DEVICE needs time
|
||
+ * after the PWR_ON request, it should utilise CLOCK stretching.
|
||
+ * However, it has been observered that the Windows driver provides a
|
||
+ * 1ms sleep between the PWR_ON and RESET requests.
|
||
+ * According to Goodix Windows even waits 60 ms after (other?)
|
||
+ * PWR_ON requests. Testing has confirmed that several devices
|
||
+ * will not work properly without a delay after a PWR_ON request.
|
||
+ */
|
||
+ if (!ret && power_state == I2C_HID_PWR_ON)
|
||
+ msleep(60);
|
||
+
|
||
return ret;
|
||
}
|
||
|
||
@@ -441,15 +454,6 @@ static int i2c_hid_hwreset(struct i2c_client *client)
|
||
if (ret)
|
||
goto out_unlock;
|
||
|
||
- /*
|
||
- * The HID over I2C specification states that if a DEVICE needs time
|
||
- * after the PWR_ON request, it should utilise CLOCK stretching.
|
||
- * However, it has been observered that the Windows driver provides a
|
||
- * 1ms sleep between the PWR_ON and RESET requests and that some devices
|
||
- * rely on this.
|
||
- */
|
||
- usleep_range(1000, 5000);
|
||
-
|
||
i2c_hid_dbg(ihid, "resetting...\n");
|
||
|
||
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
|
||
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
|
||
index 4140dea693e90..4f97e6c120595 100644
|
||
--- a/drivers/hid/usbhid/hiddev.c
|
||
+++ b/drivers/hid/usbhid/hiddev.c
|
||
@@ -519,12 +519,16 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
|
||
|
||
switch (cmd) {
|
||
case HIDIOCGUSAGE:
|
||
+ if (uref->usage_index >= field->report_count)
|
||
+ goto inval;
|
||
uref->value = field->value[uref->usage_index];
|
||
if (copy_to_user(user_arg, uref, sizeof(*uref)))
|
||
goto fault;
|
||
goto goodreturn;
|
||
|
||
case HIDIOCSUSAGE:
|
||
+ if (uref->usage_index >= field->report_count)
|
||
+ goto inval;
|
||
field->value[uref->usage_index] = uref->value;
|
||
goto goodreturn;
|
||
|
||
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
|
||
index 2137bc65829d3..35337922aa1bd 100644
|
||
--- a/drivers/hwmon/gsc-hwmon.c
|
||
+++ b/drivers/hwmon/gsc-hwmon.c
|
||
@@ -172,6 +172,7 @@ gsc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
|
||
case mode_temperature:
|
||
if (tmp > 0x8000)
|
||
tmp -= 0xffff;
|
||
+ tmp *= 100; /* convert to millidegrees celsius */
|
||
break;
|
||
case mode_voltage_raw:
|
||
tmp = clamp_val(tmp, 0, BIT(GSC_HWMON_RESOLUTION));
|
||
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
|
||
index b0425694f7022..242ff8bee78dd 100644
|
||
--- a/drivers/hwmon/nct7904.c
|
||
+++ b/drivers/hwmon/nct7904.c
|
||
@@ -231,7 +231,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
|
||
if (ret < 0)
|
||
return ret;
|
||
cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
|
||
- if (cnt == 0x1fff)
|
||
+ if (cnt == 0 || cnt == 0x1fff)
|
||
rpm = 0;
|
||
else
|
||
rpm = 1350000 / cnt;
|
||
@@ -243,7 +243,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
|
||
if (ret < 0)
|
||
return ret;
|
||
cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
|
||
- if (cnt == 0x1fff)
|
||
+ if (cnt == 0 || cnt == 0x1fff)
|
||
rpm = 0;
|
||
else
|
||
rpm = 1350000 / cnt;
|
||
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
|
||
index fea644921a768..f206e28af5831 100644
|
||
--- a/drivers/i2c/busses/i2c-i801.c
|
||
+++ b/drivers/i2c/busses/i2c-i801.c
|
||
@@ -67,6 +67,7 @@
|
||
* Comet Lake-H (PCH) 0x06a3 32 hard yes yes yes
|
||
* Elkhart Lake (PCH) 0x4b23 32 hard yes yes yes
|
||
* Tiger Lake-LP (PCH) 0xa0a3 32 hard yes yes yes
|
||
+ * Tiger Lake-H (PCH) 0x43a3 32 hard yes yes yes
|
||
* Jasper Lake (SOC) 0x4da3 32 hard yes yes yes
|
||
* Comet Lake-V (PCH) 0xa3a3 32 hard yes yes yes
|
||
*
|
||
@@ -221,6 +222,7 @@
|
||
#define PCI_DEVICE_ID_INTEL_GEMINILAKE_SMBUS 0x31d4
|
||
#define PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS 0x34a3
|
||
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
|
||
+#define PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS 0x43a3
|
||
#define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23
|
||
#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
|
||
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
|
||
@@ -1074,6 +1076,7 @@ static const struct pci_device_id i801_ids[] = {
|
||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS) },
|
||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS) },
|
||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) },
|
||
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS) },
|
||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
|
||
{ 0, }
|
||
};
|
||
@@ -1748,6 +1751,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||
case PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS:
|
||
case PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS:
|
||
case PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS:
|
||
+ case PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS:
|
||
case PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS:
|
||
priv->features |= FEATURE_BLOCK_PROC;
|
||
priv->features |= FEATURE_I2C_BLOCK_READ;
|
||
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
|
||
index 9e883474db8ce..c7c543483b08c 100644
|
||
--- a/drivers/i2c/busses/i2c-rcar.c
|
||
+++ b/drivers/i2c/busses/i2c-rcar.c
|
||
@@ -590,6 +590,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
|
||
/* master sent stop */
|
||
if (ssr_filtered & SSR) {
|
||
i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
|
||
+ rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */
|
||
rcar_i2c_write(priv, ICSIER, SAR);
|
||
rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
|
||
}
|
||
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
|
||
index 26f03a14a4781..4f09d4c318287 100644
|
||
--- a/drivers/i2c/i2c-core-base.c
|
||
+++ b/drivers/i2c/i2c-core-base.c
|
||
@@ -354,7 +354,7 @@ static int i2c_device_probe(struct device *dev)
|
||
* or ACPI ID table is supplied for the probing device.
|
||
*/
|
||
if (!driver->id_table &&
|
||
- !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
|
||
+ !acpi_driver_match_device(dev, dev->driver) &&
|
||
!i2c_of_match_device(dev->driver->of_match_table, client)) {
|
||
status = -ENODEV;
|
||
goto put_sync_adapter;
|
||
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
|
||
index 4959f5df21bd0..5141d49a046ba 100644
|
||
--- a/drivers/iommu/dma-iommu.c
|
||
+++ b/drivers/iommu/dma-iommu.c
|
||
@@ -1035,8 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
|
||
|
||
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||
!gfpflags_allow_blocking(gfp) && !coherent)
|
||
- cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
|
||
- gfp);
|
||
+ page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
|
||
+ gfp, NULL);
|
||
else
|
||
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
|
||
if (!cpu_addr)
|
||
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
|
||
index 49fc01f2a28d4..45a251da54537 100644
|
||
--- a/drivers/iommu/iova.c
|
||
+++ b/drivers/iommu/iova.c
|
||
@@ -811,7 +811,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
|
||
for (i = 0 ; i < mag->size; ++i) {
|
||
struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
|
||
|
||
- BUG_ON(!iova);
|
||
+ if (WARN_ON(!iova))
|
||
+ continue;
|
||
+
|
||
private_free_iova(iovad, iova);
|
||
}
|
||
|
||
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
|
||
index faa8482c8246d..4dd8a5532f893 100644
|
||
--- a/drivers/irqchip/irq-stm32-exti.c
|
||
+++ b/drivers/irqchip/irq-stm32-exti.c
|
||
@@ -431,6 +431,16 @@ static void stm32_irq_ack(struct irq_data *d)
|
||
irq_gc_unlock(gc);
|
||
}
|
||
|
||
+/* directly set the target bit without reading first. */
|
||
+static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg)
|
||
+{
|
||
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
|
||
+ void __iomem *base = chip_data->host_data->base;
|
||
+ u32 val = BIT(d->hwirq % IRQS_PER_BANK);
|
||
+
|
||
+ writel_relaxed(val, base + reg);
|
||
+}
|
||
+
|
||
static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
|
||
{
|
||
struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
|
||
@@ -464,9 +474,9 @@ static void stm32_exti_h_eoi(struct irq_data *d)
|
||
|
||
raw_spin_lock(&chip_data->rlock);
|
||
|
||
- stm32_exti_set_bit(d, stm32_bank->rpr_ofst);
|
||
+ stm32_exti_write_bit(d, stm32_bank->rpr_ofst);
|
||
if (stm32_bank->fpr_ofst != UNDEF_REG)
|
||
- stm32_exti_set_bit(d, stm32_bank->fpr_ofst);
|
||
+ stm32_exti_write_bit(d, stm32_bank->fpr_ofst);
|
||
|
||
raw_spin_unlock(&chip_data->rlock);
|
||
|
||
diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
|
||
index 17d1cb2e5f976..f922a2196b2b7 100644
|
||
--- a/drivers/media/cec/core/cec-api.c
|
||
+++ b/drivers/media/cec/core/cec-api.c
|
||
@@ -147,7 +147,13 @@ static long cec_adap_g_log_addrs(struct cec_adapter *adap,
|
||
struct cec_log_addrs log_addrs;
|
||
|
||
mutex_lock(&adap->lock);
|
||
- log_addrs = adap->log_addrs;
|
||
+ /*
|
||
+ * We use memcpy here instead of assignment since there is a
|
||
+ * hole at the end of struct cec_log_addrs that an assignment
|
||
+ * might ignore. So when we do copy_to_user() we could leak
|
||
+ * one byte of memory.
|
||
+ */
|
||
+ memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs));
|
||
if (!adap->is_configured)
|
||
memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
|
||
sizeof(log_addrs.log_addr));
|
||
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
|
||
index f7678e5a5d879..157a0ed0a8856 100644
|
||
--- a/drivers/media/i2c/imx290.c
|
||
+++ b/drivers/media/i2c/imx290.c
|
||
@@ -628,7 +628,7 @@ static int imx290_power_on(struct device *dev)
|
||
}
|
||
|
||
usleep_range(1, 2);
|
||
- gpiod_set_value_cansleep(imx290->rst_gpio, 1);
|
||
+ gpiod_set_value_cansleep(imx290->rst_gpio, 0);
|
||
usleep_range(30000, 31000);
|
||
|
||
return 0;
|
||
@@ -641,7 +641,7 @@ static int imx290_power_off(struct device *dev)
|
||
struct imx290 *imx290 = to_imx290(sd);
|
||
|
||
clk_disable_unprepare(imx290->xclk);
|
||
- gpiod_set_value_cansleep(imx290->rst_gpio, 0);
|
||
+ gpiod_set_value_cansleep(imx290->rst_gpio, 1);
|
||
regulator_bulk_disable(IMX290_NUM_SUPPLIES, imx290->supplies);
|
||
|
||
return 0;
|
||
@@ -760,7 +760,8 @@ static int imx290_probe(struct i2c_client *client)
|
||
goto free_err;
|
||
}
|
||
|
||
- imx290->rst_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
|
||
+ imx290->rst_gpio = devm_gpiod_get_optional(dev, "reset",
|
||
+ GPIOD_OUT_HIGH);
|
||
if (IS_ERR(imx290->rst_gpio)) {
|
||
dev_err(dev, "Cannot get reset gpio\n");
|
||
ret = PTR_ERR(imx290->rst_gpio);
|
||
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
|
||
index d0cdee1c6eb0b..bf36b1e22b635 100644
|
||
--- a/drivers/media/pci/ttpci/av7110.c
|
||
+++ b/drivers/media/pci/ttpci/av7110.c
|
||
@@ -406,14 +406,15 @@ static void debiirq(unsigned long cookie)
|
||
case DATA_CI_GET:
|
||
{
|
||
u8 *data = av7110->debi_virt;
|
||
+ u8 data_0 = data[0];
|
||
|
||
- if ((data[0] < 2) && data[2] == 0xff) {
|
||
+ if (data_0 < 2 && data[2] == 0xff) {
|
||
int flags = 0;
|
||
if (data[5] > 0)
|
||
flags |= CA_CI_MODULE_PRESENT;
|
||
if (data[5] > 5)
|
||
flags |= CA_CI_MODULE_READY;
|
||
- av7110->ci_slot[data[0]].flags = flags;
|
||
+ av7110->ci_slot[data_0].flags = flags;
|
||
} else
|
||
ci_get_data(&av7110->ci_rbuffer,
|
||
av7110->debi_virt,
|
||
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
|
||
index d9ec439faefa6..72a0e94e2e21a 100644
|
||
--- a/drivers/media/platform/davinci/vpif_capture.c
|
||
+++ b/drivers/media/platform/davinci/vpif_capture.c
|
||
@@ -1482,8 +1482,6 @@ probe_out:
|
||
/* Unregister video device */
|
||
video_unregister_device(&ch->video_dev);
|
||
}
|
||
- kfree(vpif_obj.sd);
|
||
- v4l2_device_unregister(&vpif_obj.v4l2_dev);
|
||
|
||
return err;
|
||
}
|
||
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
|
||
index 046222684b8b2..9a58032f818ae 100644
|
||
--- a/drivers/mfd/intel-lpss-pci.c
|
||
+++ b/drivers/mfd/intel-lpss-pci.c
|
||
@@ -201,6 +201,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
|
||
{ PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info },
|
||
{ PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
|
||
{ PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
|
||
+ /* EBG */
|
||
+ { PCI_VDEVICE(INTEL, 0x1bad), (kernel_ulong_t)&bxt_uart_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x1bae), (kernel_ulong_t)&bxt_uart_info },
|
||
/* GLK */
|
||
{ PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info },
|
||
{ PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info },
|
||
@@ -230,6 +233,22 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
|
||
{ PCI_VDEVICE(INTEL, 0x34ea), (kernel_ulong_t)&bxt_i2c_info },
|
||
{ PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info },
|
||
{ PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&spt_info },
|
||
+ /* TGL-H */
|
||
+ { PCI_VDEVICE(INTEL, 0x43a7), (kernel_ulong_t)&bxt_uart_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43a8), (kernel_ulong_t)&bxt_uart_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43a9), (kernel_ulong_t)&bxt_uart_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&bxt_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&bxt_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43ad), (kernel_ulong_t)&bxt_i2c_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43ae), (kernel_ulong_t)&bxt_i2c_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43d8), (kernel_ulong_t)&bxt_i2c_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43da), (kernel_ulong_t)&bxt_uart_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43e8), (kernel_ulong_t)&bxt_i2c_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43e9), (kernel_ulong_t)&bxt_i2c_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43ea), (kernel_ulong_t)&bxt_i2c_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43eb), (kernel_ulong_t)&bxt_i2c_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&bxt_info },
|
||
+ { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&bxt_info },
|
||
/* EHL */
|
||
{ PCI_VDEVICE(INTEL, 0x4b28), (kernel_ulong_t)&bxt_uart_info },
|
||
{ PCI_VDEVICE(INTEL, 0x4b29), (kernel_ulong_t)&bxt_uart_info },
|
||
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
|
||
index 0bc036e01ee8d..6c2b9cf45e831 100644
|
||
--- a/drivers/misc/habanalabs/debugfs.c
|
||
+++ b/drivers/misc/habanalabs/debugfs.c
|
||
@@ -19,7 +19,7 @@
|
||
static struct dentry *hl_debug_root;
|
||
|
||
static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
|
||
- u8 i2c_reg, u32 *val)
|
||
+ u8 i2c_reg, long *val)
|
||
{
|
||
struct armcp_packet pkt;
|
||
int rc;
|
||
@@ -36,7 +36,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
|
||
pkt.i2c_reg = i2c_reg;
|
||
|
||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||
- 0, (long *) val);
|
||
+ 0, val);
|
||
|
||
if (rc)
|
||
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
|
||
@@ -827,7 +827,7 @@ static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
|
||
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
|
||
struct hl_device *hdev = entry->hdev;
|
||
char tmp_buf[32];
|
||
- u32 val;
|
||
+ long val;
|
||
ssize_t rc;
|
||
|
||
if (*ppos)
|
||
@@ -842,7 +842,7 @@ static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
|
||
return rc;
|
||
}
|
||
|
||
- sprintf(tmp_buf, "0x%02x\n", val);
|
||
+ sprintf(tmp_buf, "0x%02lx\n", val);
|
||
rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
|
||
strlen(tmp_buf));
|
||
|
||
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
|
||
index fb26e743e1fd4..d0a80bfb953b0 100644
|
||
--- a/drivers/mmc/host/sdhci-of-arasan.c
|
||
+++ b/drivers/mmc/host/sdhci-of-arasan.c
|
||
@@ -1025,7 +1025,6 @@ static void arasan_dt_read_clk_phase(struct device *dev,
|
||
static void arasan_dt_parse_clk_phases(struct device *dev,
|
||
struct sdhci_arasan_clk_data *clk_data)
|
||
{
|
||
- int *iclk_phase, *oclk_phase;
|
||
u32 mio_bank = 0;
|
||
int i;
|
||
|
||
@@ -1037,28 +1036,32 @@ static void arasan_dt_parse_clk_phases(struct device *dev,
|
||
clk_data->set_clk_delays = sdhci_arasan_set_clk_delays;
|
||
|
||
if (of_device_is_compatible(dev->of_node, "xlnx,zynqmp-8.9a")) {
|
||
- iclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) ZYNQMP_ICLK_PHASE;
|
||
- oclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) ZYNQMP_OCLK_PHASE;
|
||
+ u32 zynqmp_iclk_phase[MMC_TIMING_MMC_HS400 + 1] =
|
||
+ ZYNQMP_ICLK_PHASE;
|
||
+ u32 zynqmp_oclk_phase[MMC_TIMING_MMC_HS400 + 1] =
|
||
+ ZYNQMP_OCLK_PHASE;
|
||
|
||
of_property_read_u32(dev->of_node, "xlnx,mio-bank", &mio_bank);
|
||
if (mio_bank == 2) {
|
||
- oclk_phase[MMC_TIMING_UHS_SDR104] = 90;
|
||
- oclk_phase[MMC_TIMING_MMC_HS200] = 90;
|
||
+ zynqmp_oclk_phase[MMC_TIMING_UHS_SDR104] = 90;
|
||
+ zynqmp_oclk_phase[MMC_TIMING_MMC_HS200] = 90;
|
||
}
|
||
|
||
for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) {
|
||
- clk_data->clk_phase_in[i] = iclk_phase[i];
|
||
- clk_data->clk_phase_out[i] = oclk_phase[i];
|
||
+ clk_data->clk_phase_in[i] = zynqmp_iclk_phase[i];
|
||
+ clk_data->clk_phase_out[i] = zynqmp_oclk_phase[i];
|
||
}
|
||
}
|
||
|
||
if (of_device_is_compatible(dev->of_node, "xlnx,versal-8.9a")) {
|
||
- iclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) VERSAL_ICLK_PHASE;
|
||
- oclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) VERSAL_OCLK_PHASE;
|
||
+ u32 versal_iclk_phase[MMC_TIMING_MMC_HS400 + 1] =
|
||
+ VERSAL_ICLK_PHASE;
|
||
+ u32 versal_oclk_phase[MMC_TIMING_MMC_HS400 + 1] =
|
||
+ VERSAL_OCLK_PHASE;
|
||
|
||
for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) {
|
||
- clk_data->clk_phase_in[i] = iclk_phase[i];
|
||
- clk_data->clk_phase_out[i] = oclk_phase[i];
|
||
+ clk_data->clk_phase_in[i] = versal_iclk_phase[i];
|
||
+ clk_data->clk_phase_out[i] = versal_oclk_phase[i];
|
||
}
|
||
}
|
||
|
||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
|
||
index b513b8c5c3b5e..41dd3d0f34524 100644
|
||
--- a/drivers/net/ethernet/freescale/gianfar.c
|
||
+++ b/drivers/net/ethernet/freescale/gianfar.c
|
||
@@ -750,8 +750,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
|
||
continue;
|
||
|
||
err = gfar_parse_group(child, priv, model);
|
||
- if (err)
|
||
+ if (err) {
|
||
+ of_node_put(child);
|
||
goto err_grp_init;
|
||
+ }
|
||
}
|
||
} else { /* SQ_SG_MODE */
|
||
err = gfar_parse_group(np, priv, model);
|
||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
|
||
index ec7a11d13fdc0..9e70b9a674409 100644
|
||
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
|
||
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
|
||
@@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
|
||
}
|
||
|
||
/* alloc the udl from per cpu ddp pool */
|
||
- ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
|
||
+ ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
|
||
if (!ddp->udl) {
|
||
e_err(drv, "failed allocated ddp context\n");
|
||
goto out_noddp_unmap;
|
||
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
|
||
index 4942f6112e51f..5da04e9979894 100644
|
||
--- a/drivers/net/macvlan.c
|
||
+++ b/drivers/net/macvlan.c
|
||
@@ -1269,6 +1269,9 @@ static void macvlan_port_destroy(struct net_device *dev)
|
||
static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
|
||
struct netlink_ext_ack *extack)
|
||
{
|
||
+ struct nlattr *nla, *head;
|
||
+ int rem, len;
|
||
+
|
||
if (tb[IFLA_ADDRESS]) {
|
||
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
|
||
return -EINVAL;
|
||
@@ -1316,6 +1319,20 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
|
||
return -EADDRNOTAVAIL;
|
||
}
|
||
|
||
+ if (data[IFLA_MACVLAN_MACADDR_DATA]) {
|
||
+ head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]);
|
||
+ len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
|
||
+
|
||
+ nla_for_each_attr(nla, head, len, rem) {
|
||
+ if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
|
||
+ nla_len(nla) != ETH_ALEN)
|
||
+ return -EINVAL;
|
||
+
|
||
+ if (!is_valid_ether_addr(nla_data(nla)))
|
||
+ return -EADDRNOTAVAIL;
|
||
+ }
|
||
+ }
|
||
+
|
||
if (data[IFLA_MACVLAN_MACADDR_COUNT])
|
||
return -EINVAL;
|
||
|
||
@@ -1372,10 +1389,6 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
|
||
len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
|
||
|
||
nla_for_each_attr(nla, head, len, rem) {
|
||
- if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
|
||
- nla_len(nla) != ETH_ALEN)
|
||
- continue;
|
||
-
|
||
addr = nla_data(nla);
|
||
ret = macvlan_hash_add_source(vlan, addr);
|
||
if (ret)
|
||
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
|
||
index dfc16770458d8..386ed2aa31fd9 100644
|
||
--- a/drivers/net/wan/hdlc.c
|
||
+++ b/drivers/net/wan/hdlc.c
|
||
@@ -230,6 +230,7 @@ static void hdlc_setup_dev(struct net_device *dev)
|
||
dev->max_mtu = HDLC_MAX_MTU;
|
||
dev->type = ARPHRD_RAWHDLC;
|
||
dev->hard_header_len = 16;
|
||
+ dev->needed_headroom = 0;
|
||
dev->addr_len = 0;
|
||
dev->header_ops = &hdlc_null_ops;
|
||
}
|
||
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
|
||
index f70336bb6f524..f52b9fed05931 100644
|
||
--- a/drivers/net/wan/hdlc_x25.c
|
||
+++ b/drivers/net/wan/hdlc_x25.c
|
||
@@ -107,8 +107,14 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
|
||
{
|
||
int result;
|
||
|
||
+ /* There should be a pseudo header of 1 byte added by upper layers.
|
||
+ * Check to make sure it is there before reading it.
|
||
+ */
|
||
+ if (skb->len < 1) {
|
||
+ kfree_skb(skb);
|
||
+ return NETDEV_TX_OK;
|
||
+ }
|
||
|
||
- /* X.25 to LAPB */
|
||
switch (skb->data[0]) {
|
||
case X25_IFACE_DATA: /* Data to be transmitted */
|
||
skb_pull(skb, 1);
|
||
@@ -294,6 +300,15 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
|
||
return result;
|
||
|
||
memcpy(&state(hdlc)->settings, &new_settings, size);
|
||
+
|
||
+ /* There's no header_ops so hard_header_len should be 0. */
|
||
+ dev->hard_header_len = 0;
|
||
+ /* When transmitting data:
|
||
+ * first we'll remove a pseudo header of 1 byte,
|
||
+ * then we'll prepend an LAPB header of at most 3 bytes.
|
||
+ */
|
||
+ dev->needed_headroom = 3 - 1;
|
||
+
|
||
dev->type = ARPHRD_X25;
|
||
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
|
||
netif_dormant_off(dev);
|
||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
|
||
index a757abd7a5999..f4db818cccae7 100644
|
||
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
|
||
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
|
||
@@ -84,6 +84,8 @@
|
||
|
||
#define BRCMF_ND_INFO_TIMEOUT msecs_to_jiffies(2000)
|
||
|
||
+#define BRCMF_PS_MAX_TIMEOUT_MS 2000
|
||
+
|
||
#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
|
||
(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
|
||
|
||
@@ -2941,6 +2943,12 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
|
||
else
|
||
bphy_err(drvr, "error (%d)\n", err);
|
||
}
|
||
+
|
||
+ err = brcmf_fil_iovar_int_set(ifp, "pm2_sleep_ret",
|
||
+ min_t(u32, timeout, BRCMF_PS_MAX_TIMEOUT_MS));
|
||
+ if (err)
|
||
+ bphy_err(drvr, "Unable to set pm timeout, (%d)\n", err);
|
||
+
|
||
done:
|
||
brcmf_dbg(TRACE, "Exit\n");
|
||
return err;
|
||
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
|
||
index c66c6dc003783..bad06939a247c 100644
|
||
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
|
||
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
|
||
@@ -718,8 +718,11 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
|
||
|
||
usb_anchor_urb(urb, &rtlusb->rx_submitted);
|
||
err = usb_submit_urb(urb, GFP_KERNEL);
|
||
- if (err)
|
||
+ if (err) {
|
||
+ usb_unanchor_urb(urb);
|
||
+ usb_free_urb(urb);
|
||
goto err_out;
|
||
+ }
|
||
usb_free_urb(urb);
|
||
}
|
||
return 0;
|
||
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
|
||
index 549f5b0fb0b4b..1a2b6910509ca 100644
|
||
--- a/drivers/nvme/host/fc.c
|
||
+++ b/drivers/nvme/host/fc.c
|
||
@@ -2076,7 +2076,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
|
||
if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
|
||
dev_err(ctrl->dev,
|
||
"FCP Op failed - cmdiu dma mapping failed.\n");
|
||
- ret = EFAULT;
|
||
+ ret = -EFAULT;
|
||
goto out_on_error;
|
||
}
|
||
|
||
@@ -2086,7 +2086,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
|
||
if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
|
||
dev_err(ctrl->dev,
|
||
"FCP Op failed - rspiu dma mapping failed.\n");
|
||
- ret = EFAULT;
|
||
+ ret = -EFAULT;
|
||
}
|
||
|
||
atomic_set(&op->state, FCPOP_STATE_IDLE);
|
||
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
|
||
index 2672953233434..041a755f936a6 100644
|
||
--- a/drivers/nvme/host/multipath.c
|
||
+++ b/drivers/nvme/host/multipath.c
|
||
@@ -255,12 +255,17 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
|
||
fallback = ns;
|
||
}
|
||
|
||
- /* No optimized path found, re-check the current path */
|
||
+ /*
|
||
+ * The loop above skips the current path for round-robin semantics.
|
||
+ * Fall back to the current path if either:
|
||
+ * - no other optimized path found and current is optimized,
|
||
+ * - no other usable path found and current is usable.
|
||
+ */
|
||
if (!nvme_path_is_disabled(old) &&
|
||
- old->ana_state == NVME_ANA_OPTIMIZED) {
|
||
- found = old;
|
||
- goto out;
|
||
- }
|
||
+ (old->ana_state == NVME_ANA_OPTIMIZED ||
|
||
+ (!fallback && old->ana_state == NVME_ANA_NONOPTIMIZED)))
|
||
+ return old;
|
||
+
|
||
if (!fallback)
|
||
return NULL;
|
||
found = fallback;
|
||
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
|
||
index 419e0d4ce79b1..d84b935704a3d 100644
|
||
--- a/drivers/nvme/target/configfs.c
|
||
+++ b/drivers/nvme/target/configfs.c
|
||
@@ -1035,6 +1035,7 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
|
||
up_write(&nvmet_config_sem);
|
||
|
||
kfree_rcu(new_model, rcuhead);
|
||
+ kfree(new_model_number);
|
||
|
||
return count;
|
||
}
|
||
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
|
||
index 5dd1740855770..f38e710de4789 100644
|
||
--- a/drivers/pci/controller/dwc/pcie-qcom.c
|
||
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
|
||
@@ -106,11 +106,14 @@ struct qcom_pcie_resources_2_1_0 {
|
||
struct clk *iface_clk;
|
||
struct clk *core_clk;
|
||
struct clk *phy_clk;
|
||
+ struct clk *aux_clk;
|
||
+ struct clk *ref_clk;
|
||
struct reset_control *pci_reset;
|
||
struct reset_control *axi_reset;
|
||
struct reset_control *ahb_reset;
|
||
struct reset_control *por_reset;
|
||
struct reset_control *phy_reset;
|
||
+ struct reset_control *ext_reset;
|
||
struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
|
||
};
|
||
|
||
@@ -264,6 +267,14 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
|
||
if (IS_ERR(res->phy_clk))
|
||
return PTR_ERR(res->phy_clk);
|
||
|
||
+ res->aux_clk = devm_clk_get_optional(dev, "aux");
|
||
+ if (IS_ERR(res->aux_clk))
|
||
+ return PTR_ERR(res->aux_clk);
|
||
+
|
||
+ res->ref_clk = devm_clk_get_optional(dev, "ref");
|
||
+ if (IS_ERR(res->ref_clk))
|
||
+ return PTR_ERR(res->ref_clk);
|
||
+
|
||
res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
|
||
if (IS_ERR(res->pci_reset))
|
||
return PTR_ERR(res->pci_reset);
|
||
@@ -280,6 +291,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
|
||
if (IS_ERR(res->por_reset))
|
||
return PTR_ERR(res->por_reset);
|
||
|
||
+ res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
|
||
+ if (IS_ERR(res->ext_reset))
|
||
+ return PTR_ERR(res->ext_reset);
|
||
+
|
||
res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
|
||
return PTR_ERR_OR_ZERO(res->phy_reset);
|
||
}
|
||
@@ -288,14 +303,17 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
|
||
{
|
||
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
|
||
|
||
+ clk_disable_unprepare(res->phy_clk);
|
||
reset_control_assert(res->pci_reset);
|
||
reset_control_assert(res->axi_reset);
|
||
reset_control_assert(res->ahb_reset);
|
||
reset_control_assert(res->por_reset);
|
||
- reset_control_assert(res->pci_reset);
|
||
+ reset_control_assert(res->ext_reset);
|
||
+ reset_control_assert(res->phy_reset);
|
||
clk_disable_unprepare(res->iface_clk);
|
||
clk_disable_unprepare(res->core_clk);
|
||
- clk_disable_unprepare(res->phy_clk);
|
||
+ clk_disable_unprepare(res->aux_clk);
|
||
+ clk_disable_unprepare(res->ref_clk);
|
||
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
|
||
}
|
||
|
||
@@ -326,24 +344,36 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
|
||
goto err_assert_ahb;
|
||
}
|
||
|
||
- ret = clk_prepare_enable(res->phy_clk);
|
||
- if (ret) {
|
||
- dev_err(dev, "cannot prepare/enable phy clock\n");
|
||
- goto err_clk_phy;
|
||
- }
|
||
-
|
||
ret = clk_prepare_enable(res->core_clk);
|
||
if (ret) {
|
||
dev_err(dev, "cannot prepare/enable core clock\n");
|
||
goto err_clk_core;
|
||
}
|
||
|
||
+ ret = clk_prepare_enable(res->aux_clk);
|
||
+ if (ret) {
|
||
+ dev_err(dev, "cannot prepare/enable aux clock\n");
|
||
+ goto err_clk_aux;
|
||
+ }
|
||
+
|
||
+ ret = clk_prepare_enable(res->ref_clk);
|
||
+ if (ret) {
|
||
+ dev_err(dev, "cannot prepare/enable ref clock\n");
|
||
+ goto err_clk_ref;
|
||
+ }
|
||
+
|
||
ret = reset_control_deassert(res->ahb_reset);
|
||
if (ret) {
|
||
dev_err(dev, "cannot deassert ahb reset\n");
|
||
goto err_deassert_ahb;
|
||
}
|
||
|
||
+ ret = reset_control_deassert(res->ext_reset);
|
||
+ if (ret) {
|
||
+ dev_err(dev, "cannot deassert ext reset\n");
|
||
+ goto err_deassert_ahb;
|
||
+ }
|
||
+
|
||
/* enable PCIe clocks and resets */
|
||
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
|
||
val &= ~BIT(0);
|
||
@@ -398,6 +428,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
|
||
return ret;
|
||
}
|
||
|
||
+ ret = clk_prepare_enable(res->phy_clk);
|
||
+ if (ret) {
|
||
+ dev_err(dev, "cannot prepare/enable phy clock\n");
|
||
+ goto err_deassert_ahb;
|
||
+ }
|
||
+
|
||
/* wait for clock acquisition */
|
||
usleep_range(1000, 1500);
|
||
|
||
@@ -411,10 +447,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
|
||
return 0;
|
||
|
||
err_deassert_ahb:
|
||
+ clk_disable_unprepare(res->ref_clk);
|
||
+err_clk_ref:
|
||
+ clk_disable_unprepare(res->aux_clk);
|
||
+err_clk_aux:
|
||
clk_disable_unprepare(res->core_clk);
|
||
err_clk_core:
|
||
- clk_disable_unprepare(res->phy_clk);
|
||
-err_clk_phy:
|
||
clk_disable_unprepare(res->iface_clk);
|
||
err_assert_ahb:
|
||
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
|
||
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
|
||
index cc386ef2fa122..3861505741e6d 100644
|
||
--- a/drivers/pci/slot.c
|
||
+++ b/drivers/pci/slot.c
|
||
@@ -268,13 +268,16 @@ placeholder:
|
||
slot_name = make_slot_name(name);
|
||
if (!slot_name) {
|
||
err = -ENOMEM;
|
||
+ kfree(slot);
|
||
goto err;
|
||
}
|
||
|
||
err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
|
||
"%s", slot_name);
|
||
- if (err)
|
||
+ if (err) {
|
||
+ kobject_put(&slot->kobj);
|
||
goto err;
|
||
+ }
|
||
|
||
INIT_LIST_HEAD(&slot->list);
|
||
list_add(&slot->list, &parent->slots);
|
||
@@ -293,7 +296,6 @@ out:
|
||
mutex_unlock(&pci_slot_mutex);
|
||
return slot;
|
||
err:
|
||
- kfree(slot);
|
||
slot = ERR_PTR(err);
|
||
goto out;
|
||
}
|
||
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
|
||
index b77b18fe5adcf..2f3dfb56c3fa4 100644
|
||
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
|
||
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
|
||
@@ -243,6 +243,29 @@ static int mtk_xt_find_eint_num(struct mtk_pinctrl *hw, unsigned long eint_n)
|
||
return EINT_NA;
|
||
}
|
||
|
||
+/*
|
||
+ * Virtual GPIO only used inside SOC and not being exported to outside SOC.
|
||
+ * Some modules use virtual GPIO as eint (e.g. pmif or usb).
|
||
+ * In MTK platform, external interrupt (EINT) and GPIO is 1-1 mapping
|
||
+ * and we can set GPIO as eint.
|
||
+ * But some modules use specific eint which doesn't have real GPIO pin.
|
||
+ * So we use virtual GPIO to map it.
|
||
+ */
|
||
+
|
||
+bool mtk_is_virt_gpio(struct mtk_pinctrl *hw, unsigned int gpio_n)
|
||
+{
|
||
+ const struct mtk_pin_desc *desc;
|
||
+ bool virt_gpio = false;
|
||
+
|
||
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n];
|
||
+
|
||
+ if (desc->funcs && !desc->funcs[desc->eint.eint_m].name)
|
||
+ virt_gpio = true;
|
||
+
|
||
+ return virt_gpio;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(mtk_is_virt_gpio);
|
||
+
|
||
static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n,
|
||
unsigned int *gpio_n,
|
||
struct gpio_chip **gpio_chip)
|
||
@@ -295,6 +318,9 @@ static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n)
|
||
if (err)
|
||
return err;
|
||
|
||
+ if (mtk_is_virt_gpio(hw, gpio_n))
|
||
+ return 0;
|
||
+
|
||
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n];
|
||
|
||
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE,
|
||
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
|
||
index 27df087363960..bd079f4fb1d6f 100644
|
||
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
|
||
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
|
||
@@ -315,4 +315,5 @@ int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw,
|
||
int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
|
||
const struct mtk_pin_desc *desc, u32 *val);
|
||
|
||
+bool mtk_is_virt_gpio(struct mtk_pinctrl *hw, unsigned int gpio_n);
|
||
#endif /* __PINCTRL_MTK_COMMON_V2_H */
|
||
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
|
||
index 90a432bf9fedc..a23c18251965e 100644
|
||
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
|
||
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
|
||
@@ -769,6 +769,13 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
|
||
if (gpio >= hw->soc->npins)
|
||
return -EINVAL;
|
||
|
||
+ /*
|
||
+ * "Virtual" GPIOs are always and only used for interrupts
|
||
+ * Since they are only used for interrupts, they are always inputs
|
||
+ */
|
||
+ if (mtk_is_virt_gpio(hw, gpio))
|
||
+ return 1;
|
||
+
|
||
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
|
||
|
||
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &value);
|
||
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
|
||
index 24e48d96ed766..b1c641c72f515 100644
|
||
--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
|
||
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
|
||
@@ -419,9 +419,7 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
|
||
* Disable filtering since we might add more jitter
|
||
* if b is in a random point in time.
|
||
*/
|
||
- new_timestamp = fifo_timestamp -
|
||
- fifo_info->timestamp * 1000 +
|
||
- in->timestamp * 1000;
|
||
+ new_timestamp = c - b * 1000 + a * 1000;
|
||
/*
|
||
* The timestamp can be stale if we had to use the fifo
|
||
* info timestamp.
|
||
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
|
||
index 94edbb33d0d1f..aca022239b333 100644
|
||
--- a/drivers/s390/cio/css.c
|
||
+++ b/drivers/s390/cio/css.c
|
||
@@ -677,6 +677,11 @@ static int slow_eval_known_fn(struct subchannel *sch, void *data)
|
||
rc = css_evaluate_known_subchannel(sch, 1);
|
||
if (rc == -EAGAIN)
|
||
css_schedule_eval(sch->schid);
|
||
+ /*
|
||
+ * The loop might take long time for platforms with lots of
|
||
+ * known devices. Allow scheduling here.
|
||
+ */
|
||
+ cond_resched();
|
||
}
|
||
return 0;
|
||
}
|
||
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
|
||
index 1791a393795da..07a0dadc75bf5 100644
|
||
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
|
||
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
|
||
@@ -255,9 +255,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
|
||
WARN_ON(!fcf_dev);
|
||
new->fcf_dev = NULL;
|
||
fcoe_fcf_device_delete(fcf_dev);
|
||
- kfree(new);
|
||
mutex_unlock(&cdev->lock);
|
||
}
|
||
+ kfree(new);
|
||
}
|
||
|
||
/**
|
||
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
|
||
index b766463579800..d0296f7cf45fc 100644
|
||
--- a/drivers/scsi/lpfc/lpfc_vport.c
|
||
+++ b/drivers/scsi/lpfc/lpfc_vport.c
|
||
@@ -642,27 +642,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
|
||
vport->port_state < LPFC_VPORT_READY)
|
||
return -EAGAIN;
|
||
}
|
||
+
|
||
/*
|
||
- * This is a bit of a mess. We want to ensure the shost doesn't get
|
||
- * torn down until we're done with the embedded lpfc_vport structure.
|
||
- *
|
||
- * Beyond holding a reference for this function, we also need a
|
||
- * reference for outstanding I/O requests we schedule during delete
|
||
- * processing. But once we scsi_remove_host() we can no longer obtain
|
||
- * a reference through scsi_host_get().
|
||
- *
|
||
- * So we take two references here. We release one reference at the
|
||
- * bottom of the function -- after delinking the vport. And we
|
||
- * release the other at the completion of the unreg_vpi that get's
|
||
- * initiated after we've disposed of all other resources associated
|
||
- * with the port.
|
||
+ * Take early refcount for outstanding I/O requests we schedule during
|
||
+ * delete processing for unreg_vpi. Always keep this before
|
||
+ * scsi_remove_host() as we can no longer obtain a reference through
|
||
+ * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.
|
||
*/
|
||
if (!scsi_host_get(shost))
|
||
return VPORT_INVAL;
|
||
- if (!scsi_host_get(shost)) {
|
||
- scsi_host_put(shost);
|
||
- return VPORT_INVAL;
|
||
- }
|
||
+
|
||
lpfc_free_sysfs_attr(vport);
|
||
|
||
lpfc_debugfs_terminate(vport);
|
||
@@ -809,8 +798,9 @@ skip_logo:
|
||
if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
|
||
lpfc_mbx_unreg_vpi(vport))
|
||
scsi_host_put(shost);
|
||
- } else
|
||
+ } else {
|
||
scsi_host_put(shost);
|
||
+ }
|
||
|
||
lpfc_free_vpi(phba, vport->vpi);
|
||
vport->work_port_events = 0;
|
||
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
|
||
index df670fba2ab8a..de9fd7f688d01 100644
|
||
--- a/drivers/scsi/qla2xxx/qla_gs.c
|
||
+++ b/drivers/scsi/qla2xxx/qla_gs.c
|
||
@@ -1505,11 +1505,11 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
|
||
static uint
|
||
qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
|
||
{
|
||
+ uint speeds = 0;
|
||
+
|
||
if (IS_CNA_CAPABLE(ha))
|
||
return FDMI_PORT_SPEED_10GB;
|
||
if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
|
||
- uint speeds = 0;
|
||
-
|
||
if (ha->max_supported_speed == 2) {
|
||
if (ha->min_supported_speed <= 6)
|
||
speeds |= FDMI_PORT_SPEED_64GB;
|
||
@@ -1536,9 +1536,16 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
|
||
}
|
||
return speeds;
|
||
}
|
||
- if (IS_QLA2031(ha))
|
||
- return FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
|
||
- FDMI_PORT_SPEED_4GB;
|
||
+ if (IS_QLA2031(ha)) {
|
||
+ if ((ha->pdev->subsystem_vendor == 0x103C) &&
|
||
+ (ha->pdev->subsystem_device == 0x8002)) {
|
||
+ speeds = FDMI_PORT_SPEED_16GB;
|
||
+ } else {
|
||
+ speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
|
||
+ FDMI_PORT_SPEED_4GB;
|
||
+ }
|
||
+ return speeds;
|
||
+ }
|
||
if (IS_QLA25XX(ha))
|
||
return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
|
||
FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
|
||
@@ -3436,7 +3443,6 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
|
||
list_for_each_entry(fcport, &vha->vp_fcports, list) {
|
||
if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
|
||
fcport->scan_state = QLA_FCPORT_SCAN;
|
||
- fcport->logout_on_delete = 0;
|
||
}
|
||
}
|
||
goto login_logout;
|
||
@@ -3532,10 +3538,22 @@ login_logout:
|
||
}
|
||
|
||
if (fcport->scan_state != QLA_FCPORT_FOUND) {
|
||
+ bool do_delete = false;
|
||
+
|
||
+ if (fcport->scan_needed &&
|
||
+ fcport->disc_state == DSC_LOGIN_PEND) {
|
||
+ /* Cable got disconnected after we sent
|
||
+ * a login. Do delete to prevent timeout.
|
||
+ */
|
||
+ fcport->logout_on_delete = 1;
|
||
+ do_delete = true;
|
||
+ }
|
||
+
|
||
fcport->scan_needed = 0;
|
||
- if ((qla_dual_mode_enabled(vha) ||
|
||
- qla_ini_mode_enabled(vha)) &&
|
||
- atomic_read(&fcport->state) == FCS_ONLINE) {
|
||
+ if (((qla_dual_mode_enabled(vha) ||
|
||
+ qla_ini_mode_enabled(vha)) &&
|
||
+ atomic_read(&fcport->state) == FCS_ONLINE) ||
|
||
+ do_delete) {
|
||
if (fcport->loop_id != FC_NO_LOOP_ID) {
|
||
if (fcport->flags & FCF_FCP2_DEVICE)
|
||
fcport->logout_on_delete = 0;
|
||
@@ -3736,6 +3754,18 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
|
||
unsigned long flags;
|
||
const char *name = sp->name;
|
||
|
||
+ if (res == QLA_OS_TIMER_EXPIRED) {
|
||
+ /* switch is ignoring all commands.
|
||
+ * This might be a zone disable behavior.
|
||
+ * This means we hit 64s timeout.
|
||
+ * 22s GPNFT + 44s Abort = 64s
|
||
+ */
|
||
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
|
||
+ "%s: Switch Zone check please .\n",
|
||
+ name);
|
||
+ qla2x00_mark_all_devices_lost(vha);
|
||
+ }
|
||
+
|
||
/*
|
||
* We are in an Interrupt context, queue up this
|
||
* sp for GNNFT_DONE work. This will allow all
|
||
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
|
||
index df31ee0d59b20..fdb2ce7acb912 100644
|
||
--- a/drivers/scsi/qla2xxx/qla_mbx.c
|
||
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
|
||
@@ -333,14 +333,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
|
||
if (time_after(jiffies, wait_time))
|
||
break;
|
||
|
||
- /*
|
||
- * Check if it's UNLOADING, cause we cannot poll in
|
||
- * this case, or else a NULL pointer dereference
|
||
- * is triggered.
|
||
- */
|
||
- if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
|
||
- return QLA_FUNCTION_TIMEOUT;
|
||
-
|
||
/* Check for pending interrupts. */
|
||
qla2x00_poll(ha->rsp_q_map[0]);
|
||
|
||
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
|
||
index fa695a4007f86..262dfd7635a48 100644
|
||
--- a/drivers/scsi/qla2xxx/qla_nvme.c
|
||
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
|
||
@@ -536,6 +536,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
|
||
struct nvme_private *priv = fd->private;
|
||
struct qla_nvme_rport *qla_rport = rport->private;
|
||
|
||
+ if (!priv) {
|
||
+ /* nvme association has been torn down */
|
||
+ return rval;
|
||
+ }
|
||
+
|
||
fcport = qla_rport->fcport;
|
||
|
||
if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
|
||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
|
||
index 5c7c22d0fab4b..8b6803f4f2dc1 100644
|
||
--- a/drivers/scsi/qla2xxx/qla_os.c
|
||
+++ b/drivers/scsi/qla2xxx/qla_os.c
|
||
@@ -2017,6 +2017,11 @@ skip_pio:
|
||
/* Determine queue resources */
|
||
ha->max_req_queues = ha->max_rsp_queues = 1;
|
||
ha->msix_count = QLA_BASE_VECTORS;
|
||
+
|
||
+ /* Check if FW supports MQ or not */
|
||
+ if (!(ha->fw_attributes & BIT_6))
|
||
+ goto mqiobase_exit;
|
||
+
|
||
if (!ql2xmqsupport || !ql2xnvmeenable ||
|
||
(!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
|
||
goto mqiobase_exit;
|
||
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
|
||
index fbb80a043b4fe..90289162dbd4c 100644
|
||
--- a/drivers/scsi/qla2xxx/qla_target.c
|
||
+++ b/drivers/scsi/qla2xxx/qla_target.c
|
||
@@ -1270,7 +1270,7 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
|
||
|
||
qla24xx_chk_fcp_state(sess);
|
||
|
||
- ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
|
||
+ ql_dbg(ql_dbg_disc, sess->vha, 0xe001,
|
||
"Scheduling sess %p for deletion %8phC\n",
|
||
sess, sess->port_name);
|
||
|
||
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
|
||
index b0d93bf79978f..25faad7f8e617 100644
|
||
--- a/drivers/scsi/scsi_debug.c
|
||
+++ b/drivers/scsi/scsi_debug.c
|
||
@@ -5486,9 +5486,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
|
||
u64 d = ktime_get_boottime_ns() - ns_from_boot;
|
||
|
||
if (kt <= d) { /* elapsed duration >= kt */
|
||
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
|
||
sqcp->a_cmnd = NULL;
|
||
atomic_dec(&devip->num_in_q);
|
||
clear_bit(k, sqp->in_use_bm);
|
||
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
|
||
if (new_sd_dp)
|
||
kfree(sd_dp);
|
||
/* call scsi_done() from this thread */
|
||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
|
||
index 7ae5024e78243..df07ecd94793a 100644
|
||
--- a/drivers/scsi/scsi_transport_iscsi.c
|
||
+++ b/drivers/scsi/scsi_transport_iscsi.c
|
||
@@ -3291,7 +3291,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
|
||
pr_err("%s could not find host no %u\n",
|
||
__func__, ev->u.set_flashnode.host_no);
|
||
err = -ENODEV;
|
||
- goto put_host;
|
||
+ goto exit_set_fnode;
|
||
}
|
||
|
||
idx = ev->u.set_flashnode.flashnode_idx;
|
||
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
|
||
index 136b863bc1d45..8bc8e4e62c045 100644
|
||
--- a/drivers/scsi/ufs/ufshcd.c
|
||
+++ b/drivers/scsi/ufs/ufshcd.c
|
||
@@ -1566,6 +1566,7 @@ unblock_reqs:
|
||
int ufshcd_hold(struct ufs_hba *hba, bool async)
|
||
{
|
||
int rc = 0;
|
||
+ bool flush_result;
|
||
unsigned long flags;
|
||
|
||
if (!ufshcd_is_clkgating_allowed(hba))
|
||
@@ -1597,7 +1598,9 @@ start:
|
||
break;
|
||
}
|
||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||
- flush_work(&hba->clk_gating.ungate_work);
|
||
+ flush_result = flush_work(&hba->clk_gating.ungate_work);
|
||
+ if (hba->clk_gating.is_suspended && !flush_result)
|
||
+ goto out;
|
||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||
goto start;
|
||
}
|
||
@@ -5988,7 +5991,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
|
||
*/
|
||
static irqreturn_t ufshcd_intr(int irq, void *__hba)
|
||
{
|
||
- u32 intr_status, enabled_intr_status;
|
||
+ u32 intr_status, enabled_intr_status = 0;
|
||
irqreturn_t retval = IRQ_NONE;
|
||
struct ufs_hba *hba = __hba;
|
||
int retries = hba->nutrs;
|
||
@@ -6002,7 +6005,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
|
||
* read, make sure we handle them by checking the interrupt status
|
||
* again in a loop until we process all of the reqs before returning.
|
||
*/
|
||
- do {
|
||
+ while (intr_status && retries--) {
|
||
enabled_intr_status =
|
||
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
|
||
if (intr_status)
|
||
@@ -6011,7 +6014,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
|
||
retval |= ufshcd_sl_intr(hba, enabled_intr_status);
|
||
|
||
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
|
||
- } while (intr_status && --retries);
|
||
+ }
|
||
|
||
if (enabled_intr_status && retval == IRQ_NONE) {
|
||
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
|
||
@@ -6538,7 +6541,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
||
/* command completed already */
|
||
dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
|
||
__func__, tag);
|
||
- goto out;
|
||
+ goto cleanup;
|
||
} else {
|
||
dev_err(hba->dev,
|
||
"%s: no response from device. tag = %d, err %d\n",
|
||
@@ -6572,6 +6575,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
||
goto out;
|
||
}
|
||
|
||
+cleanup:
|
||
scsi_dma_unmap(cmd);
|
||
|
||
spin_lock_irqsave(host->host_lock, flags);
|
||
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
|
||
index 9672cda2f8031..d4b33b358a31e 100644
|
||
--- a/drivers/spi/spi-stm32.c
|
||
+++ b/drivers/spi/spi-stm32.c
|
||
@@ -442,7 +442,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
|
||
{
|
||
u32 div, mbrdiv;
|
||
|
||
- div = DIV_ROUND_UP(spi->clk_rate, speed_hz);
|
||
+ /* Ensure spi->clk_rate is even */
|
||
+ div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz);
|
||
|
||
/*
|
||
* SPI framework set xfer->speed_hz to master->max_speed_hz if
|
||
@@ -468,20 +469,27 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
|
||
/**
|
||
* stm32h7_spi_prepare_fthlv - Determine FIFO threshold level
|
||
* @spi: pointer to the spi controller data structure
|
||
+ * @xfer_len: length of the message to be transferred
|
||
*/
|
||
-static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
|
||
+static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
|
||
{
|
||
- u32 fthlv, half_fifo;
|
||
+ u32 fthlv, half_fifo, packet;
|
||
|
||
/* data packet should not exceed 1/2 of fifo space */
|
||
half_fifo = (spi->fifo_size / 2);
|
||
|
||
+ /* data_packet should not exceed transfer length */
|
||
+ if (half_fifo > xfer_len)
|
||
+ packet = xfer_len;
|
||
+ else
|
||
+ packet = half_fifo;
|
||
+
|
||
if (spi->cur_bpw <= 8)
|
||
- fthlv = half_fifo;
|
||
+ fthlv = packet;
|
||
else if (spi->cur_bpw <= 16)
|
||
- fthlv = half_fifo / 2;
|
||
+ fthlv = packet / 2;
|
||
else
|
||
- fthlv = half_fifo / 4;
|
||
+ fthlv = packet / 4;
|
||
|
||
/* align packet size with data registers access */
|
||
if (spi->cur_bpw > 8)
|
||
@@ -489,6 +497,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
|
||
else
|
||
fthlv -= (fthlv % 4); /* multiple of 4 */
|
||
|
||
+ if (!fthlv)
|
||
+ fthlv = 1;
|
||
+
|
||
return fthlv;
|
||
}
|
||
|
||
@@ -967,13 +978,13 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
|
||
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
|
||
stm32h7_spi_read_rxfifo(spi, false);
|
||
|
||
- writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR);
|
||
+ writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
|
||
|
||
spin_unlock_irqrestore(&spi->lock, flags);
|
||
|
||
if (end) {
|
||
- spi_finalize_current_transfer(master);
|
||
stm32h7_spi_disable(spi);
|
||
+ spi_finalize_current_transfer(master);
|
||
}
|
||
|
||
return IRQ_HANDLED;
|
||
@@ -1394,7 +1405,7 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
|
||
cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) &
|
||
STM32H7_SPI_CFG1_DSIZE;
|
||
|
||
- spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi);
|
||
+ spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen);
|
||
fthlv = spi->cur_fthlv - 1;
|
||
|
||
cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
|
||
@@ -1586,39 +1597,33 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
|
||
unsigned long flags;
|
||
unsigned int comm_type;
|
||
int nb_words, ret = 0;
|
||
+ int mbr;
|
||
|
||
spin_lock_irqsave(&spi->lock, flags);
|
||
|
||
- if (spi->cur_bpw != transfer->bits_per_word) {
|
||
- spi->cur_bpw = transfer->bits_per_word;
|
||
- spi->cfg->set_bpw(spi);
|
||
- }
|
||
+ spi->cur_xferlen = transfer->len;
|
||
|
||
- if (spi->cur_speed != transfer->speed_hz) {
|
||
- int mbr;
|
||
+ spi->cur_bpw = transfer->bits_per_word;
|
||
+ spi->cfg->set_bpw(spi);
|
||
|
||
- /* Update spi->cur_speed with real clock speed */
|
||
- mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
|
||
- spi->cfg->baud_rate_div_min,
|
||
- spi->cfg->baud_rate_div_max);
|
||
- if (mbr < 0) {
|
||
- ret = mbr;
|
||
- goto out;
|
||
- }
|
||
-
|
||
- transfer->speed_hz = spi->cur_speed;
|
||
- stm32_spi_set_mbr(spi, mbr);
|
||
+ /* Update spi->cur_speed with real clock speed */
|
||
+ mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
|
||
+ spi->cfg->baud_rate_div_min,
|
||
+ spi->cfg->baud_rate_div_max);
|
||
+ if (mbr < 0) {
|
||
+ ret = mbr;
|
||
+ goto out;
|
||
}
|
||
|
||
- comm_type = stm32_spi_communication_type(spi_dev, transfer);
|
||
- if (spi->cur_comm != comm_type) {
|
||
- ret = spi->cfg->set_mode(spi, comm_type);
|
||
+ transfer->speed_hz = spi->cur_speed;
|
||
+ stm32_spi_set_mbr(spi, mbr);
|
||
|
||
- if (ret < 0)
|
||
- goto out;
|
||
+ comm_type = stm32_spi_communication_type(spi_dev, transfer);
|
||
+ ret = spi->cfg->set_mode(spi, comm_type);
|
||
+ if (ret < 0)
|
||
+ goto out;
|
||
|
||
- spi->cur_comm = comm_type;
|
||
- }
|
||
+ spi->cur_comm = comm_type;
|
||
|
||
if (spi->cfg->set_data_idleness)
|
||
spi->cfg->set_data_idleness(spi, transfer->len);
|
||
@@ -1636,8 +1641,6 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
|
||
goto out;
|
||
}
|
||
|
||
- spi->cur_xferlen = transfer->len;
|
||
-
|
||
dev_dbg(spi->dev, "transfer communication mode set to %d\n",
|
||
spi->cur_comm);
|
||
dev_dbg(spi->dev,
|
||
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
|
||
index be0053c795b7a..937f4e732a75c 100644
|
||
--- a/drivers/staging/rts5208/rtsx.c
|
||
+++ b/drivers/staging/rts5208/rtsx.c
|
||
@@ -972,6 +972,7 @@ ioremap_fail:
|
||
kfree(dev->chip);
|
||
chip_alloc_fail:
|
||
dev_err(&pci->dev, "%s failed\n", __func__);
|
||
+ scsi_host_put(host);
|
||
scsi_host_alloc_fail:
|
||
pci_release_regions(pci);
|
||
return err;
|
||
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
|
||
index 8533444159635..e7b3c6e5d5744 100644
|
||
--- a/drivers/target/target_core_internal.h
|
||
+++ b/drivers/target/target_core_internal.h
|
||
@@ -138,6 +138,7 @@ int init_se_kmem_caches(void);
|
||
void release_se_kmem_caches(void);
|
||
u32 scsi_get_new_index(scsi_index_t);
|
||
void transport_subsystem_check_init(void);
|
||
+void transport_uninit_session(struct se_session *);
|
||
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
|
||
void transport_dump_dev_state(struct se_device *, char *, int *);
|
||
void transport_dump_dev_info(struct se_device *, struct se_lun *,
|
||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
|
||
index 90ecdd706a017..e6e1fa68de542 100644
|
||
--- a/drivers/target/target_core_transport.c
|
||
+++ b/drivers/target/target_core_transport.c
|
||
@@ -236,6 +236,11 @@ int transport_init_session(struct se_session *se_sess)
|
||
}
|
||
EXPORT_SYMBOL(transport_init_session);
|
||
|
||
+void transport_uninit_session(struct se_session *se_sess)
|
||
+{
|
||
+ percpu_ref_exit(&se_sess->cmd_count);
|
||
+}
|
||
+
|
||
/**
|
||
* transport_alloc_session - allocate a session object and initialize it
|
||
* @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
|
||
@@ -579,7 +584,7 @@ void transport_free_session(struct se_session *se_sess)
|
||
sbitmap_queue_free(&se_sess->sess_tag_pool);
|
||
kvfree(se_sess->sess_cmd_map);
|
||
}
|
||
- percpu_ref_exit(&se_sess->cmd_count);
|
||
+ transport_uninit_session(se_sess);
|
||
kmem_cache_free(se_sess_cache, se_sess);
|
||
}
|
||
EXPORT_SYMBOL(transport_free_session);
|
||
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
|
||
index 63cca0e1e9123..9ab960cc39b6f 100644
|
||
--- a/drivers/target/target_core_user.c
|
||
+++ b/drivers/target/target_core_user.c
|
||
@@ -1220,7 +1220,14 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
||
|
||
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
|
||
|
||
- tcmu_flush_dcache_range(entry, sizeof(*entry));
|
||
+ /*
|
||
+ * Flush max. up to end of cmd ring since current entry might
|
||
+ * be a padding that is shorter than sizeof(*entry)
|
||
+ */
|
||
+ size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
|
||
+ udev->cmdr_size);
|
||
+ tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
|
||
+ ring_left : sizeof(*entry));
|
||
|
||
if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
|
||
UPDATE_HEAD(udev->cmdr_last_cleaned,
|
||
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
|
||
index 0d00ccbeb0503..44e15d7fb2f09 100644
|
||
--- a/drivers/target/target_core_xcopy.c
|
||
+++ b/drivers/target/target_core_xcopy.c
|
||
@@ -474,7 +474,7 @@ int target_xcopy_setup_pt(void)
|
||
memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
|
||
ret = transport_init_session(&xcopy_pt_sess);
|
||
if (ret < 0)
|
||
- return ret;
|
||
+ goto destroy_wq;
|
||
|
||
xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
|
||
xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
|
||
@@ -483,12 +483,19 @@ int target_xcopy_setup_pt(void)
|
||
xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
|
||
|
||
return 0;
|
||
+
|
||
+destroy_wq:
|
||
+ destroy_workqueue(xcopy_wq);
|
||
+ xcopy_wq = NULL;
|
||
+ return ret;
|
||
}
|
||
|
||
void target_xcopy_release_pt(void)
|
||
{
|
||
- if (xcopy_wq)
|
||
+ if (xcopy_wq) {
|
||
destroy_workqueue(xcopy_wq);
|
||
+ transport_uninit_session(&xcopy_pt_sess);
|
||
+ }
|
||
}
|
||
|
||
/*
|
||
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
|
||
index 04b9af7ed9415..2d0e7c7e408dc 100644
|
||
--- a/drivers/tty/serial/8250/8250_exar.c
|
||
+++ b/drivers/tty/serial/8250/8250_exar.c
|
||
@@ -744,6 +744,24 @@ static const struct exar8250_board pbn_exar_XR17V35x = {
|
||
.exit = pci_xr17v35x_exit,
|
||
};
|
||
|
||
+static const struct exar8250_board pbn_fastcom35x_2 = {
|
||
+ .num_ports = 2,
|
||
+ .setup = pci_xr17v35x_setup,
|
||
+ .exit = pci_xr17v35x_exit,
|
||
+};
|
||
+
|
||
+static const struct exar8250_board pbn_fastcom35x_4 = {
|
||
+ .num_ports = 4,
|
||
+ .setup = pci_xr17v35x_setup,
|
||
+ .exit = pci_xr17v35x_exit,
|
||
+};
|
||
+
|
||
+static const struct exar8250_board pbn_fastcom35x_8 = {
|
||
+ .num_ports = 8,
|
||
+ .setup = pci_xr17v35x_setup,
|
||
+ .exit = pci_xr17v35x_exit,
|
||
+};
|
||
+
|
||
static const struct exar8250_board pbn_exar_XR17V4358 = {
|
||
.num_ports = 12,
|
||
.setup = pci_xr17v35x_setup,
|
||
@@ -811,9 +829,9 @@ static const struct pci_device_id exar_pci_tbl[] = {
|
||
EXAR_DEVICE(EXAR, XR17V358, pbn_exar_XR17V35x),
|
||
EXAR_DEVICE(EXAR, XR17V4358, pbn_exar_XR17V4358),
|
||
EXAR_DEVICE(EXAR, XR17V8358, pbn_exar_XR17V8358),
|
||
- EXAR_DEVICE(COMMTECH, 4222PCIE, pbn_exar_XR17V35x),
|
||
- EXAR_DEVICE(COMMTECH, 4224PCIE, pbn_exar_XR17V35x),
|
||
- EXAR_DEVICE(COMMTECH, 4228PCIE, pbn_exar_XR17V35x),
|
||
+ EXAR_DEVICE(COMMTECH, 4222PCIE, pbn_fastcom35x_2),
|
||
+ EXAR_DEVICE(COMMTECH, 4224PCIE, pbn_fastcom35x_4),
|
||
+ EXAR_DEVICE(COMMTECH, 4228PCIE, pbn_fastcom35x_8),
|
||
|
||
EXAR_DEVICE(COMMTECH, 4222PCI335, pbn_fastcom335_2),
|
||
EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4),
|
||
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
|
||
index 1632f7d25acca..63a6d13f70b80 100644
|
||
--- a/drivers/tty/serial/8250/8250_port.c
|
||
+++ b/drivers/tty/serial/8250/8250_port.c
|
||
@@ -2274,6 +2274,10 @@ int serial8250_do_startup(struct uart_port *port)
|
||
|
||
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
|
||
unsigned char iir1;
|
||
+
|
||
+ if (port->irqflags & IRQF_SHARED)
|
||
+ disable_irq_nosync(port->irq);
|
||
+
|
||
/*
|
||
* Test for UARTs that do not reassert THRE when the
|
||
* transmitter is idle and the interrupt has already
|
||
@@ -2283,8 +2287,6 @@ int serial8250_do_startup(struct uart_port *port)
|
||
* allow register changes to become visible.
|
||
*/
|
||
spin_lock_irqsave(&port->lock, flags);
|
||
- if (up->port.irqflags & IRQF_SHARED)
|
||
- disable_irq_nosync(port->irq);
|
||
|
||
wait_for_xmitr(up, UART_LSR_THRE);
|
||
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
|
||
@@ -2296,9 +2298,10 @@ int serial8250_do_startup(struct uart_port *port)
|
||
iir = serial_port_in(port, UART_IIR);
|
||
serial_port_out(port, UART_IER, 0);
|
||
|
||
+ spin_unlock_irqrestore(&port->lock, flags);
|
||
+
|
||
if (port->irqflags & IRQF_SHARED)
|
||
enable_irq(port->irq);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
|
||
/*
|
||
* If the interrupt is not reasserted, or we otherwise
|
||
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
|
||
index 8efd7c2a34fe8..a8d1edcf252c7 100644
|
||
--- a/drivers/tty/serial/amba-pl011.c
|
||
+++ b/drivers/tty/serial/amba-pl011.c
|
||
@@ -2241,9 +2241,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
|
||
clk_disable(uap->clk);
|
||
}
|
||
|
||
-static void __init
|
||
-pl011_console_get_options(struct uart_amba_port *uap, int *baud,
|
||
- int *parity, int *bits)
|
||
+static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
|
||
+ int *parity, int *bits)
|
||
{
|
||
if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
|
||
unsigned int lcr_h, ibrd, fbrd;
|
||
@@ -2276,7 +2275,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud,
|
||
}
|
||
}
|
||
|
||
-static int __init pl011_console_setup(struct console *co, char *options)
|
||
+static int pl011_console_setup(struct console *co, char *options)
|
||
{
|
||
struct uart_amba_port *uap;
|
||
int baud = 38400;
|
||
@@ -2344,8 +2343,8 @@ static int __init pl011_console_setup(struct console *co, char *options)
|
||
*
|
||
* Returns 0 if console matches; otherwise non-zero to use default matching
|
||
*/
|
||
-static int __init pl011_console_match(struct console *co, char *name, int idx,
|
||
- char *options)
|
||
+static int pl011_console_match(struct console *co, char *name, int idx,
|
||
+ char *options)
|
||
{
|
||
unsigned char iotype;
|
||
resource_size_t addr;
|
||
@@ -2616,7 +2615,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
|
||
|
||
static int pl011_register_port(struct uart_amba_port *uap)
|
||
{
|
||
- int ret;
|
||
+ int ret, i;
|
||
|
||
/* Ensure interrupts from this UART are masked and cleared */
|
||
pl011_write(0, uap, REG_IMSC);
|
||
@@ -2627,6 +2626,9 @@ static int pl011_register_port(struct uart_amba_port *uap)
|
||
if (ret < 0) {
|
||
dev_err(uap->port.dev,
|
||
"Failed to register AMBA-PL011 driver\n");
|
||
+ for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
|
||
+ if (amba_ports[i] == uap)
|
||
+ amba_ports[i] = NULL;
|
||
return ret;
|
||
}
|
||
}
|
||
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
|
||
index d913d9b2762a6..815da3e78ad1a 100644
|
||
--- a/drivers/tty/serial/samsung_tty.c
|
||
+++ b/drivers/tty/serial/samsung_tty.c
|
||
@@ -1911,9 +1911,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
|
||
ourport->tx_irq = ret + 1;
|
||
}
|
||
|
||
- ret = platform_get_irq(platdev, 1);
|
||
- if (ret > 0)
|
||
- ourport->tx_irq = ret;
|
||
+ if (!s3c24xx_serial_has_interrupt_mask(port)) {
|
||
+ ret = platform_get_irq(platdev, 1);
|
||
+ if (ret > 0)
|
||
+ ourport->tx_irq = ret;
|
||
+ }
|
||
/*
|
||
* DMA is currently supported only on DT platforms, if DMA properties
|
||
* are specified.
|
||
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
|
||
index 8602ff3573218..b77b41c768fbf 100644
|
||
--- a/drivers/tty/serial/stm32-usart.c
|
||
+++ b/drivers/tty/serial/stm32-usart.c
|
||
@@ -962,7 +962,7 @@ static int stm32_init_port(struct stm32_port *stm32port,
|
||
return ret;
|
||
|
||
if (stm32port->info->cfg.has_wakeup) {
|
||
- stm32port->wakeirq = platform_get_irq(pdev, 1);
|
||
+ stm32port->wakeirq = platform_get_irq_optional(pdev, 1);
|
||
if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO)
|
||
return stm32port->wakeirq ? : -ENODEV;
|
||
}
|
||
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
|
||
index 42d8c67a481f0..c9ee8e9498d5a 100644
|
||
--- a/drivers/tty/vt/vt.c
|
||
+++ b/drivers/tty/vt/vt.c
|
||
@@ -1196,7 +1196,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
|
||
unsigned int old_rows, old_row_size, first_copied_row;
|
||
unsigned int new_cols, new_rows, new_row_size, new_screen_size;
|
||
unsigned int user;
|
||
- unsigned short *newscreen;
|
||
+ unsigned short *oldscreen, *newscreen;
|
||
struct uni_screen *new_uniscr = NULL;
|
||
|
||
WARN_CONSOLE_UNLOCKED();
|
||
@@ -1294,10 +1294,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
|
||
if (new_scr_end > new_origin)
|
||
scr_memsetw((void *)new_origin, vc->vc_video_erase_char,
|
||
new_scr_end - new_origin);
|
||
- kfree(vc->vc_screenbuf);
|
||
+ oldscreen = vc->vc_screenbuf;
|
||
vc->vc_screenbuf = newscreen;
|
||
vc->vc_screenbuf_size = new_screen_size;
|
||
set_origin(vc);
|
||
+ kfree(oldscreen);
|
||
|
||
/* do part of a reset_terminal() */
|
||
vc->vc_top = 0;
|
||
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
|
||
index daf61c28ba766..cbc85c995d92d 100644
|
||
--- a/drivers/tty/vt/vt_ioctl.c
|
||
+++ b/drivers/tty/vt/vt_ioctl.c
|
||
@@ -893,12 +893,22 @@ int vt_ioctl(struct tty_struct *tty,
|
||
console_lock();
|
||
vcp = vc_cons[i].d;
|
||
if (vcp) {
|
||
+ int ret;
|
||
+ int save_scan_lines = vcp->vc_scan_lines;
|
||
+ int save_font_height = vcp->vc_font.height;
|
||
+
|
||
if (v.v_vlin)
|
||
vcp->vc_scan_lines = v.v_vlin;
|
||
if (v.v_clin)
|
||
vcp->vc_font.height = v.v_clin;
|
||
vcp->vc_resize_user = 1;
|
||
- vc_resize(vcp, v.v_cols, v.v_rows);
|
||
+ ret = vc_resize(vcp, v.v_cols, v.v_rows);
|
||
+ if (ret) {
|
||
+ vcp->vc_scan_lines = save_scan_lines;
|
||
+ vcp->vc_font.height = save_font_height;
|
||
+ console_unlock();
|
||
+ return ret;
|
||
+ }
|
||
}
|
||
console_unlock();
|
||
}
|
||
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
|
||
index d5187b50fc828..7499ba118665a 100644
|
||
--- a/drivers/usb/class/cdc-acm.c
|
||
+++ b/drivers/usb/class/cdc-acm.c
|
||
@@ -378,21 +378,19 @@ static void acm_ctrl_irq(struct urb *urb)
|
||
if (current_size < expected_size) {
|
||
/* notification is transmitted fragmented, reassemble */
|
||
if (acm->nb_size < expected_size) {
|
||
- if (acm->nb_size) {
|
||
- kfree(acm->notification_buffer);
|
||
- acm->nb_size = 0;
|
||
- }
|
||
+ u8 *new_buffer;
|
||
alloc_size = roundup_pow_of_two(expected_size);
|
||
- /*
|
||
- * kmalloc ensures a valid notification_buffer after a
|
||
- * use of kfree in case the previous allocation was too
|
||
- * small. Final freeing is done on disconnect.
|
||
- */
|
||
- acm->notification_buffer =
|
||
- kmalloc(alloc_size, GFP_ATOMIC);
|
||
- if (!acm->notification_buffer)
|
||
+ /* Final freeing is done on disconnect. */
|
||
+ new_buffer = krealloc(acm->notification_buffer,
|
||
+ alloc_size, GFP_ATOMIC);
|
||
+ if (!new_buffer) {
|
||
+ acm->nb_index = 0;
|
||
goto exit;
|
||
+ }
|
||
+
|
||
+ acm->notification_buffer = new_buffer;
|
||
acm->nb_size = alloc_size;
|
||
+ dr = (struct usb_cdc_notification *)acm->notification_buffer;
|
||
}
|
||
|
||
copy_size = min(current_size,
|
||
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
|
||
index f81606c6a35b0..7e73e989645bd 100644
|
||
--- a/drivers/usb/core/driver.c
|
||
+++ b/drivers/usb/core/driver.c
|
||
@@ -905,6 +905,35 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||
return 0;
|
||
}
|
||
|
||
+static bool is_dev_usb_generic_driver(struct device *dev)
|
||
+{
|
||
+ struct usb_device_driver *udd = dev->driver ?
|
||
+ to_usb_device_driver(dev->driver) : NULL;
|
||
+
|
||
+ return udd == &usb_generic_driver;
|
||
+}
|
||
+
|
||
+static int __usb_bus_reprobe_drivers(struct device *dev, void *data)
|
||
+{
|
||
+ struct usb_device_driver *new_udriver = data;
|
||
+ struct usb_device *udev;
|
||
+ int ret;
|
||
+
|
||
+ if (!is_dev_usb_generic_driver(dev))
|
||
+ return 0;
|
||
+
|
||
+ udev = to_usb_device(dev);
|
||
+ if (usb_device_match_id(udev, new_udriver->id_table) == NULL &&
|
||
+ (!new_udriver->match || new_udriver->match(udev) != 0))
|
||
+ return 0;
|
||
+
|
||
+ ret = device_reprobe(dev);
|
||
+ if (ret && ret != -EPROBE_DEFER)
|
||
+ dev_err(dev, "Failed to reprobe device (error %d)\n", ret);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
/**
|
||
* usb_register_device_driver - register a USB device (not interface) driver
|
||
* @new_udriver: USB operations for the device driver
|
||
@@ -934,13 +963,20 @@ int usb_register_device_driver(struct usb_device_driver *new_udriver,
|
||
|
||
retval = driver_register(&new_udriver->drvwrap.driver);
|
||
|
||
- if (!retval)
|
||
+ if (!retval) {
|
||
pr_info("%s: registered new device driver %s\n",
|
||
usbcore_name, new_udriver->name);
|
||
- else
|
||
+ /*
|
||
+ * Check whether any device could be better served with
|
||
+ * this new driver
|
||
+ */
|
||
+ bus_for_each_dev(&usb_bus_type, NULL, new_udriver,
|
||
+ __usb_bus_reprobe_drivers);
|
||
+ } else {
|
||
printk(KERN_ERR "%s: error %d registering device "
|
||
" driver %s\n",
|
||
usbcore_name, retval, new_udriver->name);
|
||
+ }
|
||
|
||
return retval;
|
||
}
|
||
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
|
||
index 4626227a6dd22..cd08a47144bd3 100644
|
||
--- a/drivers/usb/core/generic.c
|
||
+++ b/drivers/usb/core/generic.c
|
||
@@ -207,8 +207,9 @@ static int __check_usb_generic(struct device_driver *drv, void *data)
|
||
return 0;
|
||
if (!udrv->id_table)
|
||
return 0;
|
||
-
|
||
- return usb_device_match_id(udev, udrv->id_table) != NULL;
|
||
+ if (usb_device_match_id(udev, udrv->id_table) != NULL)
|
||
+ return 1;
|
||
+ return (udrv->match && udrv->match(udev));
|
||
}
|
||
|
||
static bool usb_generic_driver_match(struct usb_device *udev)
|
||
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
|
||
index c96c50faccf72..2f068e525a374 100644
|
||
--- a/drivers/usb/core/quirks.c
|
||
+++ b/drivers/usb/core/quirks.c
|
||
@@ -370,6 +370,10 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||
{ USB_DEVICE(0x0926, 0x0202), .driver_info =
|
||
USB_QUIRK_ENDPOINT_BLACKLIST },
|
||
|
||
+ /* Sound Devices MixPre-D */
|
||
+ { USB_DEVICE(0x0926, 0x0208), .driver_info =
|
||
+ USB_QUIRK_ENDPOINT_BLACKLIST },
|
||
+
|
||
/* Keytouch QWERTY Panel keyboard */
|
||
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
|
||
USB_QUIRK_CONFIG_INTF_STRINGS },
|
||
@@ -465,6 +469,8 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||
|
||
{ USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
|
||
|
||
+ { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
|
||
+
|
||
/* DJI CineSSD */
|
||
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
|
||
|
||
@@ -509,6 +515,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
|
||
*/
|
||
static const struct usb_device_id usb_endpoint_blacklist[] = {
|
||
{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
|
||
+ { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 },
|
||
{ }
|
||
};
|
||
|
||
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
|
||
index 80c3ef134e41d..1739c5ea93c82 100644
|
||
--- a/drivers/usb/dwc3/gadget.c
|
||
+++ b/drivers/usb/dwc3/gadget.c
|
||
@@ -1054,27 +1054,25 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
|
||
* dwc3_prepare_one_trb - setup one TRB from one request
|
||
* @dep: endpoint for which this request is prepared
|
||
* @req: dwc3_request pointer
|
||
+ * @trb_length: buffer size of the TRB
|
||
* @chain: should this TRB be chained to the next?
|
||
* @node: only for isochronous endpoints. First TRB needs different type.
|
||
*/
|
||
static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
|
||
- struct dwc3_request *req, unsigned chain, unsigned node)
|
||
+ struct dwc3_request *req, unsigned int trb_length,
|
||
+ unsigned chain, unsigned node)
|
||
{
|
||
struct dwc3_trb *trb;
|
||
- unsigned int length;
|
||
dma_addr_t dma;
|
||
unsigned stream_id = req->request.stream_id;
|
||
unsigned short_not_ok = req->request.short_not_ok;
|
||
unsigned no_interrupt = req->request.no_interrupt;
|
||
unsigned is_last = req->request.is_last;
|
||
|
||
- if (req->request.num_sgs > 0) {
|
||
- length = sg_dma_len(req->start_sg);
|
||
+ if (req->request.num_sgs > 0)
|
||
dma = sg_dma_address(req->start_sg);
|
||
- } else {
|
||
- length = req->request.length;
|
||
+ else
|
||
dma = req->request.dma;
|
||
- }
|
||
|
||
trb = &dep->trb_pool[dep->trb_enqueue];
|
||
|
||
@@ -1086,7 +1084,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
|
||
|
||
req->num_trbs++;
|
||
|
||
- __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node,
|
||
+ __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
|
||
stream_id, short_not_ok, no_interrupt, is_last);
|
||
}
|
||
|
||
@@ -1096,16 +1094,27 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
|
||
struct scatterlist *sg = req->start_sg;
|
||
struct scatterlist *s;
|
||
int i;
|
||
-
|
||
+ unsigned int length = req->request.length;
|
||
unsigned int remaining = req->request.num_mapped_sgs
|
||
- req->num_queued_sgs;
|
||
|
||
+ /*
|
||
+ * If we resume preparing the request, then get the remaining length of
|
||
+ * the request and resume where we left off.
|
||
+ */
|
||
+ for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
|
||
+ length -= sg_dma_len(s);
|
||
+
|
||
for_each_sg(sg, s, remaining, i) {
|
||
- unsigned int length = req->request.length;
|
||
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
|
||
unsigned int rem = length % maxp;
|
||
+ unsigned int trb_length;
|
||
unsigned chain = true;
|
||
|
||
+ trb_length = min_t(unsigned int, length, sg_dma_len(s));
|
||
+
|
||
+ length -= trb_length;
|
||
+
|
||
/*
|
||
* IOMMU driver is coalescing the list of sgs which shares a
|
||
* page boundary into one and giving it to USB driver. With
|
||
@@ -1113,7 +1122,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
|
||
* sgs passed. So mark the chain bit to false if it isthe last
|
||
* mapped sg.
|
||
*/
|
||
- if (i == remaining - 1)
|
||
+ if ((i == remaining - 1) || !length)
|
||
chain = false;
|
||
|
||
if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
|
||
@@ -1123,7 +1132,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
|
||
req->needs_extra_trb = true;
|
||
|
||
/* prepare normal TRB */
|
||
- dwc3_prepare_one_trb(dep, req, true, i);
|
||
+ dwc3_prepare_one_trb(dep, req, trb_length, true, i);
|
||
|
||
/* Now prepare one extra TRB to align transfer size */
|
||
trb = &dep->trb_pool[dep->trb_enqueue];
|
||
@@ -1134,8 +1143,39 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
|
||
req->request.short_not_ok,
|
||
req->request.no_interrupt,
|
||
req->request.is_last);
|
||
+ } else if (req->request.zero && req->request.length &&
|
||
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
|
||
+ !rem && !chain) {
|
||
+ struct dwc3 *dwc = dep->dwc;
|
||
+ struct dwc3_trb *trb;
|
||
+
|
||
+ req->needs_extra_trb = true;
|
||
+
|
||
+ /* Prepare normal TRB */
|
||
+ dwc3_prepare_one_trb(dep, req, trb_length, true, i);
|
||
+
|
||
+ /* Prepare one extra TRB to handle ZLP */
|
||
+ trb = &dep->trb_pool[dep->trb_enqueue];
|
||
+ req->num_trbs++;
|
||
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
|
||
+ !req->direction, 1,
|
||
+ req->request.stream_id,
|
||
+ req->request.short_not_ok,
|
||
+ req->request.no_interrupt,
|
||
+ req->request.is_last);
|
||
+
|
||
+ /* Prepare one more TRB to handle MPS alignment */
|
||
+ if (!req->direction) {
|
||
+ trb = &dep->trb_pool[dep->trb_enqueue];
|
||
+ req->num_trbs++;
|
||
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
|
||
+ false, 1, req->request.stream_id,
|
||
+ req->request.short_not_ok,
|
||
+ req->request.no_interrupt,
|
||
+ req->request.is_last);
|
||
+ }
|
||
} else {
|
||
- dwc3_prepare_one_trb(dep, req, chain, i);
|
||
+ dwc3_prepare_one_trb(dep, req, trb_length, chain, i);
|
||
}
|
||
|
||
/*
|
||
@@ -1150,6 +1190,16 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
|
||
|
||
req->num_queued_sgs++;
|
||
|
||
+ /*
|
||
+ * The number of pending SG entries may not correspond to the
|
||
+ * number of mapped SG entries. If all the data are queued, then
|
||
+ * don't include unused SG entries.
|
||
+ */
|
||
+ if (length == 0) {
|
||
+ req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs;
|
||
+ break;
|
||
+ }
|
||
+
|
||
if (!dwc3_calc_trbs_left(dep))
|
||
break;
|
||
}
|
||
@@ -1169,7 +1219,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
|
||
req->needs_extra_trb = true;
|
||
|
||
/* prepare normal TRB */
|
||
- dwc3_prepare_one_trb(dep, req, true, 0);
|
||
+ dwc3_prepare_one_trb(dep, req, length, true, 0);
|
||
|
||
/* Now prepare one extra TRB to align transfer size */
|
||
trb = &dep->trb_pool[dep->trb_enqueue];
|
||
@@ -1180,6 +1230,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
|
||
req->request.no_interrupt,
|
||
req->request.is_last);
|
||
} else if (req->request.zero && req->request.length &&
|
||
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
|
||
(IS_ALIGNED(req->request.length, maxp))) {
|
||
struct dwc3 *dwc = dep->dwc;
|
||
struct dwc3_trb *trb;
|
||
@@ -1187,18 +1238,29 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
|
||
req->needs_extra_trb = true;
|
||
|
||
/* prepare normal TRB */
|
||
- dwc3_prepare_one_trb(dep, req, true, 0);
|
||
+ dwc3_prepare_one_trb(dep, req, length, true, 0);
|
||
|
||
- /* Now prepare one extra TRB to handle ZLP */
|
||
+ /* Prepare one extra TRB to handle ZLP */
|
||
trb = &dep->trb_pool[dep->trb_enqueue];
|
||
req->num_trbs++;
|
||
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
|
||
- false, 1, req->request.stream_id,
|
||
+ !req->direction, 1, req->request.stream_id,
|
||
req->request.short_not_ok,
|
||
req->request.no_interrupt,
|
||
req->request.is_last);
|
||
+
|
||
+ /* Prepare one more TRB to handle MPS alignment for OUT */
|
||
+ if (!req->direction) {
|
||
+ trb = &dep->trb_pool[dep->trb_enqueue];
|
||
+ req->num_trbs++;
|
||
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
|
||
+ false, 1, req->request.stream_id,
|
||
+ req->request.short_not_ok,
|
||
+ req->request.no_interrupt,
|
||
+ req->request.is_last);
|
||
+ }
|
||
} else {
|
||
- dwc3_prepare_one_trb(dep, req, false, 0);
|
||
+ dwc3_prepare_one_trb(dep, req, length, false, 0);
|
||
}
|
||
}
|
||
|
||
@@ -2649,8 +2711,17 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
|
||
status);
|
||
|
||
if (req->needs_extra_trb) {
|
||
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
|
||
+
|
||
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
|
||
status);
|
||
+
|
||
+ /* Reclaim MPS padding TRB for ZLP */
|
||
+ if (!req->direction && req->request.zero && req->request.length &&
|
||
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
|
||
+ (IS_ALIGNED(req->request.length, maxp)))
|
||
+ ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status);
|
||
+
|
||
req->needs_extra_trb = false;
|
||
}
|
||
|
||
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
|
||
index 1d900081b1f0c..b4206b0dede54 100644
|
||
--- a/drivers/usb/gadget/function/f_ncm.c
|
||
+++ b/drivers/usb/gadget/function/f_ncm.c
|
||
@@ -1181,12 +1181,15 @@ static int ncm_unwrap_ntb(struct gether *port,
|
||
int ndp_index;
|
||
unsigned dg_len, dg_len2;
|
||
unsigned ndp_len;
|
||
+ unsigned block_len;
|
||
struct sk_buff *skb2;
|
||
int ret = -EINVAL;
|
||
- unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
|
||
+ unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
|
||
+ unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize);
|
||
const struct ndp_parser_opts *opts = ncm->parser_opts;
|
||
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
|
||
int dgram_counter;
|
||
+ bool ndp_after_header;
|
||
|
||
/* dwSignature */
|
||
if (get_unaligned_le32(tmp) != opts->nth_sign) {
|
||
@@ -1205,25 +1208,37 @@ static int ncm_unwrap_ntb(struct gether *port,
|
||
}
|
||
tmp++; /* skip wSequence */
|
||
|
||
+ block_len = get_ncm(&tmp, opts->block_length);
|
||
/* (d)wBlockLength */
|
||
- if (get_ncm(&tmp, opts->block_length) > max_size) {
|
||
+ if (block_len > ntb_max) {
|
||
INFO(port->func.config->cdev, "OUT size exceeded\n");
|
||
goto err;
|
||
}
|
||
|
||
ndp_index = get_ncm(&tmp, opts->ndp_index);
|
||
+ ndp_after_header = false;
|
||
|
||
/* Run through all the NDP's in the NTB */
|
||
do {
|
||
- /* NCM 3.2 */
|
||
- if (((ndp_index % 4) != 0) &&
|
||
- (ndp_index < opts->nth_size)) {
|
||
+ /*
|
||
+ * NCM 3.2
|
||
+ * dwNdpIndex
|
||
+ */
|
||
+ if (((ndp_index % 4) != 0) ||
|
||
+ (ndp_index < opts->nth_size) ||
|
||
+ (ndp_index > (block_len -
|
||
+ opts->ndp_size))) {
|
||
INFO(port->func.config->cdev, "Bad index: %#X\n",
|
||
ndp_index);
|
||
goto err;
|
||
}
|
||
+ if (ndp_index == opts->nth_size)
|
||
+ ndp_after_header = true;
|
||
|
||
- /* walk through NDP */
|
||
+ /*
|
||
+ * walk through NDP
|
||
+ * dwSignature
|
||
+ */
|
||
tmp = (void *)(skb->data + ndp_index);
|
||
if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
|
||
INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
|
||
@@ -1234,14 +1249,15 @@ static int ncm_unwrap_ntb(struct gether *port,
|
||
ndp_len = get_unaligned_le16(tmp++);
|
||
/*
|
||
* NCM 3.3.1
|
||
+ * wLength
|
||
* entry is 2 items
|
||
* item size is 16/32 bits, opts->dgram_item_len * 2 bytes
|
||
* minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry
|
||
* Each entry is a dgram index and a dgram length.
|
||
*/
|
||
if ((ndp_len < opts->ndp_size
|
||
- + 2 * 2 * (opts->dgram_item_len * 2))
|
||
- || (ndp_len % opts->ndplen_align != 0)) {
|
||
+ + 2 * 2 * (opts->dgram_item_len * 2)) ||
|
||
+ (ndp_len % opts->ndplen_align != 0)) {
|
||
INFO(port->func.config->cdev, "Bad NDP length: %#X\n",
|
||
ndp_len);
|
||
goto err;
|
||
@@ -1258,8 +1274,21 @@ static int ncm_unwrap_ntb(struct gether *port,
|
||
|
||
do {
|
||
index = index2;
|
||
+ /* wDatagramIndex[0] */
|
||
+ if ((index < opts->nth_size) ||
|
||
+ (index > block_len - opts->dpe_size)) {
|
||
+ INFO(port->func.config->cdev,
|
||
+ "Bad index: %#X\n", index);
|
||
+ goto err;
|
||
+ }
|
||
+
|
||
dg_len = dg_len2;
|
||
- if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */
|
||
+ /*
|
||
+ * wDatagramLength[0]
|
||
+ * ethernet hdr + crc or larger than max frame size
|
||
+ */
|
||
+ if ((dg_len < 14 + crc_len) ||
|
||
+ (dg_len > frame_max)) {
|
||
INFO(port->func.config->cdev,
|
||
"Bad dgram length: %#X\n", dg_len);
|
||
goto err;
|
||
@@ -1283,6 +1312,37 @@ static int ncm_unwrap_ntb(struct gether *port,
|
||
index2 = get_ncm(&tmp, opts->dgram_item_len);
|
||
dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
|
||
|
||
+ if (index2 == 0 || dg_len2 == 0)
|
||
+ break;
|
||
+
|
||
+ /* wDatagramIndex[1] */
|
||
+ if (ndp_after_header) {
|
||
+ if (index2 < opts->nth_size + opts->ndp_size) {
|
||
+ INFO(port->func.config->cdev,
|
||
+ "Bad index: %#X\n", index2);
|
||
+ goto err;
|
||
+ }
|
||
+ } else {
|
||
+ if (index2 < opts->nth_size + opts->dpe_size) {
|
||
+ INFO(port->func.config->cdev,
|
||
+ "Bad index: %#X\n", index2);
|
||
+ goto err;
|
||
+ }
|
||
+ }
|
||
+ if (index2 > block_len - opts->dpe_size) {
|
||
+ INFO(port->func.config->cdev,
|
||
+ "Bad index: %#X\n", index2);
|
||
+ goto err;
|
||
+ }
|
||
+
|
||
+ /* wDatagramLength[1] */
|
||
+ if ((dg_len2 < 14 + crc_len) ||
|
||
+ (dg_len2 > frame_max)) {
|
||
+ INFO(port->func.config->cdev,
|
||
+ "Bad dgram length: %#X\n", dg_len);
|
||
+ goto err;
|
||
+ }
|
||
+
|
||
/*
|
||
* Copy the data into a new skb.
|
||
* This ensures the truesize is correct
|
||
@@ -1299,9 +1359,6 @@ static int ncm_unwrap_ntb(struct gether *port,
|
||
ndp_len -= 2 * (opts->dgram_item_len * 2);
|
||
|
||
dgram_counter++;
|
||
-
|
||
- if (index2 == 0 || dg_len2 == 0)
|
||
- break;
|
||
} while (ndp_len > 2 * (opts->dgram_item_len * 2));
|
||
} while (ndp_index);
|
||
|
||
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
|
||
index eaf556ceac32b..0a45b4ef66a67 100644
|
||
--- a/drivers/usb/gadget/function/f_tcm.c
|
||
+++ b/drivers/usb/gadget/function/f_tcm.c
|
||
@@ -753,12 +753,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
|
||
goto err_sts;
|
||
|
||
return 0;
|
||
+
|
||
err_sts:
|
||
- usb_ep_free_request(fu->ep_status, stream->req_status);
|
||
- stream->req_status = NULL;
|
||
-err_out:
|
||
usb_ep_free_request(fu->ep_out, stream->req_out);
|
||
stream->req_out = NULL;
|
||
+err_out:
|
||
+ usb_ep_free_request(fu->ep_in, stream->req_in);
|
||
+ stream->req_in = NULL;
|
||
out:
|
||
return -ENOMEM;
|
||
}
|
||
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
|
||
index eaa13fd3dc7f3..e313c3b8dcb19 100644
|
||
--- a/drivers/usb/gadget/u_f.h
|
||
+++ b/drivers/usb/gadget/u_f.h
|
||
@@ -14,6 +14,7 @@
|
||
#define __U_F_H__
|
||
|
||
#include <linux/usb/gadget.h>
|
||
+#include <linux/overflow.h>
|
||
|
||
/* Variable Length Array Macros **********************************************/
|
||
#define vla_group(groupname) size_t groupname##__next = 0
|
||
@@ -21,21 +22,36 @@
|
||
|
||
#define vla_item(groupname, type, name, n) \
|
||
size_t groupname##_##name##__offset = ({ \
|
||
- size_t align_mask = __alignof__(type) - 1; \
|
||
- size_t offset = (groupname##__next + align_mask) & ~align_mask;\
|
||
- size_t size = (n) * sizeof(type); \
|
||
- groupname##__next = offset + size; \
|
||
+ size_t offset = 0; \
|
||
+ if (groupname##__next != SIZE_MAX) { \
|
||
+ size_t align_mask = __alignof__(type) - 1; \
|
||
+ size_t size = array_size(n, sizeof(type)); \
|
||
+ offset = (groupname##__next + align_mask) & \
|
||
+ ~align_mask; \
|
||
+ if (check_add_overflow(offset, size, \
|
||
+ &groupname##__next)) { \
|
||
+ groupname##__next = SIZE_MAX; \
|
||
+ offset = 0; \
|
||
+ } \
|
||
+ } \
|
||
offset; \
|
||
})
|
||
|
||
#define vla_item_with_sz(groupname, type, name, n) \
|
||
- size_t groupname##_##name##__sz = (n) * sizeof(type); \
|
||
- size_t groupname##_##name##__offset = ({ \
|
||
- size_t align_mask = __alignof__(type) - 1; \
|
||
- size_t offset = (groupname##__next + align_mask) & ~align_mask;\
|
||
- size_t size = groupname##_##name##__sz; \
|
||
- groupname##__next = offset + size; \
|
||
- offset; \
|
||
+ size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \
|
||
+ size_t groupname##_##name##__offset = ({ \
|
||
+ size_t offset = 0; \
|
||
+ if (groupname##__next != SIZE_MAX) { \
|
||
+ size_t align_mask = __alignof__(type) - 1; \
|
||
+ offset = (groupname##__next + align_mask) & \
|
||
+ ~align_mask; \
|
||
+ if (check_add_overflow(offset, groupname##_##name##__sz,\
|
||
+ &groupname##__next)) { \
|
||
+ groupname##__next = SIZE_MAX; \
|
||
+ offset = 0; \
|
||
+ } \
|
||
+ } \
|
||
+ offset; \
|
||
})
|
||
|
||
#define vla_ptr(ptr, groupname, name) \
|
||
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
|
||
index bd40e597f2566..5f5e8a64c8e2e 100644
|
||
--- a/drivers/usb/host/ohci-exynos.c
|
||
+++ b/drivers/usb/host/ohci-exynos.c
|
||
@@ -171,9 +171,8 @@ static int exynos_ohci_probe(struct platform_device *pdev)
|
||
hcd->rsrc_len = resource_size(res);
|
||
|
||
irq = platform_get_irq(pdev, 0);
|
||
- if (!irq) {
|
||
- dev_err(&pdev->dev, "Failed to get IRQ\n");
|
||
- err = -ENODEV;
|
||
+ if (irq < 0) {
|
||
+ err = irq;
|
||
goto fail_io;
|
||
}
|
||
|
||
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
|
||
index 76c3f29562d2b..448d7b11dec4c 100644
|
||
--- a/drivers/usb/host/xhci-debugfs.c
|
||
+++ b/drivers/usb/host/xhci-debugfs.c
|
||
@@ -273,7 +273,7 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused)
|
||
|
||
static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
|
||
{
|
||
- int dci;
|
||
+ int ep_index;
|
||
dma_addr_t dma;
|
||
struct xhci_hcd *xhci;
|
||
struct xhci_ep_ctx *ep_ctx;
|
||
@@ -282,9 +282,9 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
|
||
|
||
xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
|
||
|
||
- for (dci = 1; dci < 32; dci++) {
|
||
- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
|
||
- dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
|
||
+ for (ep_index = 0; ep_index < 31; ep_index++) {
|
||
+ ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
|
||
+ dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params);
|
||
seq_printf(s, "%pad: %s\n", &dma,
|
||
xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info),
|
||
le32_to_cpu(ep_ctx->ep_info2),
|
||
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
|
||
index f37316d2c8fa4..fa8f7935c2abe 100644
|
||
--- a/drivers/usb/host/xhci-hub.c
|
||
+++ b/drivers/usb/host/xhci-hub.c
|
||
@@ -740,15 +740,6 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
|
||
{
|
||
u32 pls = status_reg & PORT_PLS_MASK;
|
||
|
||
- /* resume state is a xHCI internal state.
|
||
- * Do not report it to usb core, instead, pretend to be U3,
|
||
- * thus usb core knows it's not ready for transfer
|
||
- */
|
||
- if (pls == XDEV_RESUME) {
|
||
- *status |= USB_SS_PORT_LS_U3;
|
||
- return;
|
||
- }
|
||
-
|
||
/* When the CAS bit is set then warm reset
|
||
* should be performed on port
|
||
*/
|
||
@@ -770,6 +761,16 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
|
||
*/
|
||
pls |= USB_PORT_STAT_CONNECTION;
|
||
} else {
|
||
+ /*
|
||
+ * Resume state is an xHCI internal state. Do not report it to
|
||
+ * usb core, instead, pretend to be U3, thus usb core knows
|
||
+ * it's not ready for transfer.
|
||
+ */
|
||
+ if (pls == XDEV_RESUME) {
|
||
+ *status |= USB_SS_PORT_LS_U3;
|
||
+ return;
|
||
+ }
|
||
+
|
||
/*
|
||
* If CAS bit isn't set but the Port is already at
|
||
* Compliance Mode, fake a connection so the USB core
|
||
diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
|
||
index 59b1965ad0a3f..f97ac9f52bf4d 100644
|
||
--- a/drivers/usb/host/xhci-pci-renesas.c
|
||
+++ b/drivers/usb/host/xhci-pci-renesas.c
|
||
@@ -50,20 +50,6 @@
|
||
#define RENESAS_RETRY 10000
|
||
#define RENESAS_DELAY 10
|
||
|
||
-#define ROM_VALID_01 0x2013
|
||
-#define ROM_VALID_02 0x2026
|
||
-
|
||
-static int renesas_verify_fw_version(struct pci_dev *pdev, u32 version)
|
||
-{
|
||
- switch (version) {
|
||
- case ROM_VALID_01:
|
||
- case ROM_VALID_02:
|
||
- return 0;
|
||
- }
|
||
- dev_err(&pdev->dev, "FW has invalid version :%d\n", version);
|
||
- return -EINVAL;
|
||
-}
|
||
-
|
||
static int renesas_fw_download_image(struct pci_dev *dev,
|
||
const u32 *fw, size_t step, bool rom)
|
||
{
|
||
@@ -202,10 +188,7 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
|
||
|
||
version &= RENESAS_FW_VERSION_FIELD;
|
||
version = version >> RENESAS_FW_VERSION_OFFSET;
|
||
-
|
||
- err = renesas_verify_fw_version(pdev, version);
|
||
- if (err)
|
||
- return err;
|
||
+ dev_dbg(&pdev->dev, "Found ROM version: %x\n", version);
|
||
|
||
/*
|
||
* Test if ROM is present and loaded, if so we can skip everything
|
||
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
|
||
index ee6bf01775bba..545bdecc8f15e 100644
|
||
--- a/drivers/usb/host/xhci-tegra.c
|
||
+++ b/drivers/usb/host/xhci-tegra.c
|
||
@@ -1136,7 +1136,7 @@ static struct phy *tegra_xusb_get_phy(struct tegra_xusb *tegra, char *name,
|
||
unsigned int i, phy_count = 0;
|
||
|
||
for (i = 0; i < tegra->soc->num_types; i++) {
|
||
- if (!strncmp(tegra->soc->phy_types[i].name, "usb2",
|
||
+ if (!strncmp(tegra->soc->phy_types[i].name, name,
|
||
strlen(name)))
|
||
return tegra->phys[phy_count+port];
|
||
|
||
@@ -1258,6 +1258,8 @@ static int tegra_xusb_init_usb_phy(struct tegra_xusb *tegra)
|
||
|
||
INIT_WORK(&tegra->id_work, tegra_xhci_id_work);
|
||
tegra->id_nb.notifier_call = tegra_xhci_id_notify;
|
||
+ tegra->otg_usb2_port = -EINVAL;
|
||
+ tegra->otg_usb3_port = -EINVAL;
|
||
|
||
for (i = 0; i < tegra->num_usb_phys; i++) {
|
||
struct phy *phy = tegra_xusb_get_phy(tegra, "usb2", i);
|
||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
|
||
index ed468eed299c5..113ab5d3cbfe5 100644
|
||
--- a/drivers/usb/host/xhci.c
|
||
+++ b/drivers/usb/host/xhci.c
|
||
@@ -3236,10 +3236,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
|
||
|
||
wait_for_completion(cfg_cmd->completion);
|
||
|
||
- ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
|
||
xhci_free_command(xhci, cfg_cmd);
|
||
cleanup:
|
||
xhci_free_command(xhci, stop_cmd);
|
||
+ if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
|
||
+ ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
|
||
}
|
||
|
||
static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
|
||
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
|
||
index 407fe7570f3bc..f8686139d6f39 100644
|
||
--- a/drivers/usb/misc/lvstest.c
|
||
+++ b/drivers/usb/misc/lvstest.c
|
||
@@ -426,7 +426,7 @@ static int lvs_rh_probe(struct usb_interface *intf,
|
||
USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT);
|
||
if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) {
|
||
dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret);
|
||
- return ret;
|
||
+ return ret < 0 ? ret : -EINVAL;
|
||
}
|
||
|
||
/* submit urb to poll interrupt endpoint */
|
||
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
|
||
index fc8a5da4a07c9..0734e6dd93862 100644
|
||
--- a/drivers/usb/misc/sisusbvga/sisusb.c
|
||
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
|
||
@@ -761,7 +761,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
|
||
u8 swap8, fromkern = kernbuffer ? 1 : 0;
|
||
u16 swap16;
|
||
u32 swap32, flag = (length >> 28) & 1;
|
||
- char buf[4];
|
||
+ u8 buf[4];
|
||
|
||
/* if neither kernbuffer not userbuffer are given, assume
|
||
* data in obuf
|
||
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
|
||
index be0505b8b5d4e..785080f790738 100644
|
||
--- a/drivers/usb/misc/yurex.c
|
||
+++ b/drivers/usb/misc/yurex.c
|
||
@@ -492,7 +492,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
|
||
prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE);
|
||
dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__,
|
||
dev->cntl_buffer[0]);
|
||
- retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL);
|
||
+ retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC);
|
||
if (retval >= 0)
|
||
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
|
||
finish_wait(&dev->waitq, &wait);
|
||
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
|
||
index b6a9a74516201..e5f9557690f9e 100644
|
||
--- a/drivers/usb/storage/unusual_devs.h
|
||
+++ b/drivers/usb/storage/unusual_devs.h
|
||
@@ -2328,7 +2328,7 @@ UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114,
|
||
"JMicron",
|
||
"USB to ATA/ATAPI Bridge",
|
||
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
||
- US_FL_BROKEN_FUA ),
|
||
+ US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ),
|
||
|
||
/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
|
||
UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
|
||
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
|
||
index 162b09d69f62f..711ab240058c7 100644
|
||
--- a/drivers/usb/storage/unusual_uas.h
|
||
+++ b/drivers/usb/storage/unusual_uas.h
|
||
@@ -28,6 +28,13 @@
|
||
* and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
|
||
*/
|
||
|
||
+/* Reported-by: Till Dörges <doerges@pre-sense.de> */
|
||
+UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999,
|
||
+ "Sony",
|
||
+ "PSZ-HA*",
|
||
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
||
+ US_FL_NO_REPORT_OPCODES),
|
||
+
|
||
/* Reported-by: Julian Groß <julian.g@posteo.de> */
|
||
UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
|
||
"LaCie",
|
||
@@ -80,6 +87,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
|
||
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
||
US_FL_BROKEN_FUA),
|
||
|
||
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
|
||
+UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
|
||
+ "PNY",
|
||
+ "Pro Elite SSD",
|
||
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
||
+ US_FL_NO_ATA_1X),
|
||
+
|
||
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
|
||
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
|
||
"VIA",
|
||
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
|
||
index 82b19ebd7838e..b2111fe6d140a 100644
|
||
--- a/drivers/usb/typec/tcpm/tcpm.c
|
||
+++ b/drivers/usb/typec/tcpm/tcpm.c
|
||
@@ -3321,13 +3321,31 @@ static void run_state_machine(struct tcpm_port *port)
|
||
tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
|
||
break;
|
||
case SRC_HARD_RESET_VBUS_OFF:
|
||
- tcpm_set_vconn(port, true);
|
||
+ /*
|
||
+ * 7.1.5 Response to Hard Resets
|
||
+ * Hard Reset Signaling indicates a communication failure has occurred and the
|
||
+ * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
|
||
+ * drive VBUS to vSafe0V as shown in Figure 7-9.
|
||
+ */
|
||
+ tcpm_set_vconn(port, false);
|
||
tcpm_set_vbus(port, false);
|
||
tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
|
||
tcpm_data_role_for_source(port));
|
||
- tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
|
||
+ /*
|
||
+ * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
|
||
+ * PD_T_SRC_RECOVER before turning vbus back on.
|
||
+ * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
|
||
+ * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
|
||
+ * tells the Device Policy Manager to instruct the power supply to perform a
|
||
+ * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
|
||
+ * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
|
||
+ * re-establish communication with the Sink and resume USB Default Operation.
|
||
+ * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
|
||
+ */
|
||
+ tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
|
||
break;
|
||
case SRC_HARD_RESET_VBUS_ON:
|
||
+ tcpm_set_vconn(port, true);
|
||
tcpm_set_vbus(port, true);
|
||
port->tcpc->set_pd_rx(port->tcpc, true);
|
||
tcpm_set_attached_state(port, true);
|
||
@@ -3887,7 +3905,11 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
|
||
tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
|
||
break;
|
||
case SRC_HARD_RESET_VBUS_OFF:
|
||
- tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, 0);
|
||
+ /*
|
||
+ * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
|
||
+ * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
|
||
+ */
|
||
+ tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
|
||
break;
|
||
case HARD_RESET_SEND:
|
||
break;
|
||
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
|
||
index 048381c058a5b..261131c9e37c6 100644
|
||
--- a/drivers/usb/typec/ucsi/displayport.c
|
||
+++ b/drivers/usb/typec/ucsi/displayport.c
|
||
@@ -288,8 +288,6 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
|
||
struct typec_altmode *alt;
|
||
struct ucsi_dp *dp;
|
||
|
||
- mutex_lock(&con->lock);
|
||
-
|
||
/* We can't rely on the firmware with the capabilities. */
|
||
desc->vdo |= DP_CAP_DP_SIGNALING | DP_CAP_RECEPTACLE;
|
||
|
||
@@ -298,15 +296,12 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
|
||
desc->vdo |= all_assignments << 16;
|
||
|
||
alt = typec_port_register_altmode(con->port, desc);
|
||
- if (IS_ERR(alt)) {
|
||
- mutex_unlock(&con->lock);
|
||
+ if (IS_ERR(alt))
|
||
return alt;
|
||
- }
|
||
|
||
dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
|
||
if (!dp) {
|
||
typec_unregister_altmode(alt);
|
||
- mutex_unlock(&con->lock);
|
||
return ERR_PTR(-ENOMEM);
|
||
}
|
||
|
||
@@ -319,7 +314,5 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
|
||
alt->ops = &ucsi_displayport_ops;
|
||
typec_altmode_set_drvdata(alt, dp);
|
||
|
||
- mutex_unlock(&con->lock);
|
||
-
|
||
return alt;
|
||
}
|
||
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
|
||
index d0c63afaf345d..2999217c81090 100644
|
||
--- a/drivers/usb/typec/ucsi/ucsi.c
|
||
+++ b/drivers/usb/typec/ucsi/ucsi.c
|
||
@@ -146,40 +146,33 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
|
||
return UCSI_CCI_LENGTH(cci);
|
||
}
|
||
|
||
-static int ucsi_run_command(struct ucsi *ucsi, u64 command,
|
||
- void *data, size_t size)
|
||
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
|
||
+ void *data, size_t size)
|
||
{
|
||
u8 length;
|
||
int ret;
|
||
|
||
+ mutex_lock(&ucsi->ppm_lock);
|
||
+
|
||
ret = ucsi_exec_command(ucsi, command);
|
||
if (ret < 0)
|
||
- return ret;
|
||
+ goto out;
|
||
|
||
length = ret;
|
||
|
||
if (data) {
|
||
ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
|
||
if (ret)
|
||
- return ret;
|
||
+ goto out;
|
||
}
|
||
|
||
ret = ucsi_acknowledge_command(ucsi);
|
||
if (ret)
|
||
- return ret;
|
||
-
|
||
- return length;
|
||
-}
|
||
-
|
||
-int ucsi_send_command(struct ucsi *ucsi, u64 command,
|
||
- void *retval, size_t size)
|
||
-{
|
||
- int ret;
|
||
+ goto out;
|
||
|
||
- mutex_lock(&ucsi->ppm_lock);
|
||
- ret = ucsi_run_command(ucsi, command, retval, size);
|
||
+ ret = length;
|
||
+out:
|
||
mutex_unlock(&ucsi->ppm_lock);
|
||
-
|
||
return ret;
|
||
}
|
||
EXPORT_SYMBOL_GPL(ucsi_send_command);
|
||
@@ -205,7 +198,7 @@ void ucsi_altmode_update_active(struct ucsi_connector *con)
|
||
int i;
|
||
|
||
command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(con->num);
|
||
- ret = ucsi_run_command(con->ucsi, command, &cur, sizeof(cur));
|
||
+ ret = ucsi_send_command(con->ucsi, command, &cur, sizeof(cur));
|
||
if (ret < 0) {
|
||
if (con->ucsi->version > 0x0100) {
|
||
dev_err(con->ucsi->dev,
|
||
@@ -354,7 +347,7 @@ ucsi_register_altmodes_nvidia(struct ucsi_connector *con, u8 recipient)
|
||
command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
|
||
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
|
||
command |= UCSI_GET_ALTMODE_OFFSET(i);
|
||
- len = ucsi_run_command(con->ucsi, command, &alt, sizeof(alt));
|
||
+ len = ucsi_send_command(con->ucsi, command, &alt, sizeof(alt));
|
||
/*
|
||
* We are collecting all altmodes first and then registering.
|
||
* Some type-C device will return zero length data beyond last
|
||
@@ -431,7 +424,7 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
|
||
command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
|
||
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
|
||
command |= UCSI_GET_ALTMODE_OFFSET(i);
|
||
- len = ucsi_run_command(con->ucsi, command, alt, sizeof(alt));
|
||
+ len = ucsi_send_command(con->ucsi, command, alt, sizeof(alt));
|
||
if (len <= 0)
|
||
return len;
|
||
|
||
@@ -502,7 +495,7 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
|
||
command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
|
||
command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
|
||
command |= UCSI_GET_PDOS_SRC_PDOS;
|
||
- ret = ucsi_run_command(ucsi, command, con->src_pdos,
|
||
+ ret = ucsi_send_command(ucsi, command, con->src_pdos,
|
||
sizeof(con->src_pdos));
|
||
if (ret < 0) {
|
||
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
|
||
@@ -681,7 +674,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
|
||
*/
|
||
command = UCSI_GET_CAM_SUPPORTED;
|
||
command |= UCSI_CONNECTOR_NUMBER(con->num);
|
||
- ucsi_run_command(con->ucsi, command, NULL, 0);
|
||
+ ucsi_send_command(con->ucsi, command, NULL, 0);
|
||
}
|
||
|
||
if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
|
||
@@ -736,20 +729,24 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
|
||
u32 cci;
|
||
int ret;
|
||
|
||
+ mutex_lock(&ucsi->ppm_lock);
|
||
+
|
||
ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
|
||
sizeof(command));
|
||
if (ret < 0)
|
||
- return ret;
|
||
+ goto out;
|
||
|
||
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
|
||
|
||
do {
|
||
- if (time_is_before_jiffies(tmo))
|
||
- return -ETIMEDOUT;
|
||
+ if (time_is_before_jiffies(tmo)) {
|
||
+ ret = -ETIMEDOUT;
|
||
+ goto out;
|
||
+ }
|
||
|
||
ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
|
||
if (ret)
|
||
- return ret;
|
||
+ goto out;
|
||
|
||
/* If the PPM is still doing something else, reset it again. */
|
||
if (cci & ~UCSI_CCI_RESET_COMPLETE) {
|
||
@@ -757,13 +754,15 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
|
||
&command,
|
||
sizeof(command));
|
||
if (ret < 0)
|
||
- return ret;
|
||
+ goto out;
|
||
}
|
||
|
||
msleep(20);
|
||
} while (!(cci & UCSI_CCI_RESET_COMPLETE));
|
||
|
||
- return 0;
|
||
+out:
|
||
+ mutex_unlock(&ucsi->ppm_lock);
|
||
+ return ret;
|
||
}
|
||
|
||
static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
|
||
@@ -775,9 +774,7 @@ static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
|
||
u64 c;
|
||
|
||
/* PPM most likely stopped responding. Resetting everything. */
|
||
- mutex_lock(&con->ucsi->ppm_lock);
|
||
ucsi_reset_ppm(con->ucsi);
|
||
- mutex_unlock(&con->ucsi->ppm_lock);
|
||
|
||
c = UCSI_SET_NOTIFICATION_ENABLE | con->ucsi->ntfy;
|
||
ucsi_send_command(con->ucsi, c, NULL, 0);
|
||
@@ -901,12 +898,15 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
|
||
con->num = index + 1;
|
||
con->ucsi = ucsi;
|
||
|
||
+ /* Delay other interactions with the con until registration is complete */
|
||
+ mutex_lock(&con->lock);
|
||
+
|
||
/* Get connector capability */
|
||
command = UCSI_GET_CONNECTOR_CAPABILITY;
|
||
command |= UCSI_CONNECTOR_NUMBER(con->num);
|
||
- ret = ucsi_run_command(ucsi, command, &con->cap, sizeof(con->cap));
|
||
+ ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
|
||
if (ret < 0)
|
||
- return ret;
|
||
+ goto out;
|
||
|
||
if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
|
||
cap->data = TYPEC_PORT_DRD;
|
||
@@ -938,27 +938,32 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
|
||
|
||
ret = ucsi_register_port_psy(con);
|
||
if (ret)
|
||
- return ret;
|
||
+ goto out;
|
||
|
||
/* Register the connector */
|
||
con->port = typec_register_port(ucsi->dev, cap);
|
||
- if (IS_ERR(con->port))
|
||
- return PTR_ERR(con->port);
|
||
+ if (IS_ERR(con->port)) {
|
||
+ ret = PTR_ERR(con->port);
|
||
+ goto out;
|
||
+ }
|
||
|
||
/* Alternate modes */
|
||
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_CON);
|
||
- if (ret)
|
||
+ if (ret) {
|
||
dev_err(ucsi->dev, "con%d: failed to register alt modes\n",
|
||
con->num);
|
||
+ goto out;
|
||
+ }
|
||
|
||
/* Get the status */
|
||
command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
|
||
- ret = ucsi_run_command(ucsi, command, &con->status,
|
||
- sizeof(con->status));
|
||
+ ret = ucsi_send_command(ucsi, command, &con->status, sizeof(con->status));
|
||
if (ret < 0) {
|
||
dev_err(ucsi->dev, "con%d: failed to get status\n", con->num);
|
||
- return 0;
|
||
+ ret = 0;
|
||
+ goto out;
|
||
}
|
||
+ ret = 0; /* ucsi_send_command() returns length on success */
|
||
|
||
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
|
||
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
|
||
@@ -983,17 +988,21 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
|
||
|
||
if (con->partner) {
|
||
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
|
||
- if (ret)
|
||
+ if (ret) {
|
||
dev_err(ucsi->dev,
|
||
"con%d: failed to register alternate modes\n",
|
||
con->num);
|
||
- else
|
||
+ ret = 0;
|
||
+ } else {
|
||
ucsi_altmode_update_active(con);
|
||
+ }
|
||
}
|
||
|
||
trace_ucsi_register_port(con->num, &con->status);
|
||
|
||
- return 0;
|
||
+out:
|
||
+ mutex_unlock(&con->lock);
|
||
+ return ret;
|
||
}
|
||
|
||
/**
|
||
@@ -1009,8 +1018,6 @@ int ucsi_init(struct ucsi *ucsi)
|
||
int ret;
|
||
int i;
|
||
|
||
- mutex_lock(&ucsi->ppm_lock);
|
||
-
|
||
/* Reset the PPM */
|
||
ret = ucsi_reset_ppm(ucsi);
|
||
if (ret) {
|
||
@@ -1021,13 +1028,13 @@ int ucsi_init(struct ucsi *ucsi)
|
||
/* Enable basic notifications */
|
||
ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
|
||
command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
|
||
- ret = ucsi_run_command(ucsi, command, NULL, 0);
|
||
+ ret = ucsi_send_command(ucsi, command, NULL, 0);
|
||
if (ret < 0)
|
||
goto err_reset;
|
||
|
||
/* Get PPM capabilities */
|
||
command = UCSI_GET_CAPABILITY;
|
||
- ret = ucsi_run_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap));
|
||
+ ret = ucsi_send_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap));
|
||
if (ret < 0)
|
||
goto err_reset;
|
||
|
||
@@ -1054,12 +1061,10 @@ int ucsi_init(struct ucsi *ucsi)
|
||
/* Enable all notifications */
|
||
ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
|
||
command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
|
||
- ret = ucsi_run_command(ucsi, command, NULL, 0);
|
||
+ ret = ucsi_send_command(ucsi, command, NULL, 0);
|
||
if (ret < 0)
|
||
goto err_unregister;
|
||
|
||
- mutex_unlock(&ucsi->ppm_lock);
|
||
-
|
||
return 0;
|
||
|
||
err_unregister:
|
||
@@ -1074,8 +1079,6 @@ err_unregister:
|
||
err_reset:
|
||
ucsi_reset_ppm(ucsi);
|
||
err:
|
||
- mutex_unlock(&ucsi->ppm_lock);
|
||
-
|
||
return ret;
|
||
}
|
||
EXPORT_SYMBOL_GPL(ucsi_init);
|
||
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
|
||
index 2305d425e6c9a..9d7d642022d1f 100644
|
||
--- a/drivers/usb/usbip/stub_dev.c
|
||
+++ b/drivers/usb/usbip/stub_dev.c
|
||
@@ -461,6 +461,11 @@ static void stub_disconnect(struct usb_device *udev)
|
||
return;
|
||
}
|
||
|
||
+static bool usbip_match(struct usb_device *udev)
|
||
+{
|
||
+ return true;
|
||
+}
|
||
+
|
||
#ifdef CONFIG_PM
|
||
|
||
/* These functions need usb_port_suspend and usb_port_resume,
|
||
@@ -486,6 +491,7 @@ struct usb_device_driver stub_driver = {
|
||
.name = "usbip-host",
|
||
.probe = stub_probe,
|
||
.disconnect = stub_disconnect,
|
||
+ .match = usbip_match,
|
||
#ifdef CONFIG_PM
|
||
.suspend = stub_suspend,
|
||
.resume = stub_resume,
|
||
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
|
||
index f4554412e607f..29efa75cdfce5 100644
|
||
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
|
||
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
|
||
@@ -84,7 +84,7 @@ struct ifcvf_hw {
|
||
void __iomem * const *base;
|
||
char config_msix_name[256];
|
||
struct vdpa_callback config_cb;
|
||
-
|
||
+ unsigned int config_irq;
|
||
};
|
||
|
||
struct ifcvf_adapter {
|
||
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
|
||
index f5a60c14b9799..7a6d899e541df 100644
|
||
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
|
||
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
|
||
@@ -53,6 +53,7 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
|
||
for (i = 0; i < queues; i++)
|
||
devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
|
||
|
||
+ devm_free_irq(&pdev->dev, vf->config_irq, vf);
|
||
ifcvf_free_irq_vectors(pdev);
|
||
}
|
||
|
||
@@ -72,10 +73,14 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
|
||
snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
|
||
pci_name(pdev));
|
||
vector = 0;
|
||
- irq = pci_irq_vector(pdev, vector);
|
||
- ret = devm_request_irq(&pdev->dev, irq,
|
||
+ vf->config_irq = pci_irq_vector(pdev, vector);
|
||
+ ret = devm_request_irq(&pdev->dev, vf->config_irq,
|
||
ifcvf_config_changed, 0,
|
||
vf->config_msix_name, vf);
|
||
+ if (ret) {
|
||
+ IFCVF_ERR(pdev, "Failed to request config irq\n");
|
||
+ return ret;
|
||
+ }
|
||
|
||
for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
|
||
snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
|
||
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
|
||
index 9c4f1be856eca..547abeb39f87a 100644
|
||
--- a/drivers/video/fbdev/controlfb.c
|
||
+++ b/drivers/video/fbdev/controlfb.c
|
||
@@ -49,6 +49,8 @@
|
||
#include <linux/cuda.h>
|
||
#ifdef CONFIG_PPC_PMAC
|
||
#include <asm/prom.h>
|
||
+#endif
|
||
+#ifdef CONFIG_BOOTX_TEXT
|
||
#include <asm/btext.h>
|
||
#endif
|
||
|
||
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
|
||
index e2a490c5ae08f..fbf10e62bcde9 100644
|
||
--- a/drivers/video/fbdev/core/fbcon.c
|
||
+++ b/drivers/video/fbdev/core/fbcon.c
|
||
@@ -2191,6 +2191,9 @@ static void updatescrollmode(struct fbcon_display *p,
|
||
}
|
||
}
|
||
|
||
+#define PITCH(w) (((w) + 7) >> 3)
|
||
+#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */
|
||
+
|
||
static int fbcon_resize(struct vc_data *vc, unsigned int width,
|
||
unsigned int height, unsigned int user)
|
||
{
|
||
@@ -2200,6 +2203,24 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
|
||
struct fb_var_screeninfo var = info->var;
|
||
int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh;
|
||
|
||
+ if (ops->p && ops->p->userfont && FNTSIZE(vc->vc_font.data)) {
|
||
+ int size;
|
||
+ int pitch = PITCH(vc->vc_font.width);
|
||
+
|
||
+ /*
|
||
+ * If user font, ensure that a possible change to user font
|
||
+ * height or width will not allow a font data out-of-bounds access.
|
||
+ * NOTE: must use original charcount in calculation as font
|
||
+ * charcount can change and cannot be used to determine the
|
||
+ * font data allocated size.
|
||
+ */
|
||
+ if (pitch <= 0)
|
||
+ return -EINVAL;
|
||
+ size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data));
|
||
+ if (size > FNTSIZE(vc->vc_font.data))
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
virt_w = FBCON_SWAP(ops->rotate, width, height);
|
||
virt_h = FBCON_SWAP(ops->rotate, height, width);
|
||
virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width,
|
||
@@ -2652,7 +2673,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
|
||
int size;
|
||
int i, csum;
|
||
u8 *new_data, *data = font->data;
|
||
- int pitch = (font->width+7) >> 3;
|
||
+ int pitch = PITCH(font->width);
|
||
|
||
/* Is there a reason why fbconsole couldn't handle any charcount >256?
|
||
* If not this check should be changed to charcount < 256 */
|
||
@@ -2668,7 +2689,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
|
||
if (fbcon_invalid_charcount(info, charcount))
|
||
return -EINVAL;
|
||
|
||
- size = h * pitch * charcount;
|
||
+ size = CALC_FONTSZ(h, pitch, charcount);
|
||
|
||
new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
|
||
|
||
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
|
||
index 30e73ec4ad5c8..da7c88ffaa6a8 100644
|
||
--- a/drivers/video/fbdev/core/fbmem.c
|
||
+++ b/drivers/video/fbdev/core/fbmem.c
|
||
@@ -957,7 +957,6 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
|
||
int
|
||
fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
|
||
{
|
||
- int flags = info->flags;
|
||
int ret = 0;
|
||
u32 activate;
|
||
struct fb_var_screeninfo old_var;
|
||
@@ -1052,9 +1051,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
|
||
event.data = &mode;
|
||
fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event);
|
||
|
||
- if (flags & FBINFO_MISC_USEREVENT)
|
||
- fbcon_update_vcs(info, activate & FB_ACTIVATE_ALL);
|
||
-
|
||
return 0;
|
||
}
|
||
EXPORT_SYMBOL(fb_set_var);
|
||
@@ -1105,9 +1101,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
|
||
return -EFAULT;
|
||
console_lock();
|
||
lock_fb_info(info);
|
||
- info->flags |= FBINFO_MISC_USEREVENT;
|
||
ret = fb_set_var(info, &var);
|
||
- info->flags &= ~FBINFO_MISC_USEREVENT;
|
||
+ if (!ret)
|
||
+ fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
|
||
unlock_fb_info(info);
|
||
console_unlock();
|
||
if (!ret && copy_to_user(argp, &var, sizeof(var)))
|
||
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
|
||
index d54c88f88991d..65dae05fff8e6 100644
|
||
--- a/drivers/video/fbdev/core/fbsysfs.c
|
||
+++ b/drivers/video/fbdev/core/fbsysfs.c
|
||
@@ -91,9 +91,9 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var)
|
||
|
||
var->activate |= FB_ACTIVATE_FORCE;
|
||
console_lock();
|
||
- fb_info->flags |= FBINFO_MISC_USEREVENT;
|
||
err = fb_set_var(fb_info, var);
|
||
- fb_info->flags &= ~FBINFO_MISC_USEREVENT;
|
||
+ if (!err)
|
||
+ fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL);
|
||
console_unlock();
|
||
if (err)
|
||
return err;
|
||
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
|
||
index 4a16798b2ecd8..e2b572761bf61 100644
|
||
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
|
||
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
|
||
@@ -520,8 +520,11 @@ int dispc_runtime_get(void)
|
||
DSSDBG("dispc_runtime_get\n");
|
||
|
||
r = pm_runtime_get_sync(&dispc.pdev->dev);
|
||
- WARN_ON(r < 0);
|
||
- return r < 0 ? r : 0;
|
||
+ if (WARN_ON(r < 0)) {
|
||
+ pm_runtime_put_sync(&dispc.pdev->dev);
|
||
+ return r;
|
||
+ }
|
||
+ return 0;
|
||
}
|
||
EXPORT_SYMBOL(dispc_runtime_get);
|
||
|
||
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
|
||
index d620376216e1d..6f9c25fec9946 100644
|
||
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
|
||
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
|
||
@@ -1137,8 +1137,11 @@ static int dsi_runtime_get(struct platform_device *dsidev)
|
||
DSSDBG("dsi_runtime_get\n");
|
||
|
||
r = pm_runtime_get_sync(&dsi->pdev->dev);
|
||
- WARN_ON(r < 0);
|
||
- return r < 0 ? r : 0;
|
||
+ if (WARN_ON(r < 0)) {
|
||
+ pm_runtime_put_sync(&dsi->pdev->dev);
|
||
+ return r;
|
||
+ }
|
||
+ return 0;
|
||
}
|
||
|
||
static void dsi_runtime_put(struct platform_device *dsidev)
|
||
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
|
||
index bfc5c4c5a26ad..a6b1c1598040d 100644
|
||
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
|
||
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
|
||
@@ -768,8 +768,11 @@ int dss_runtime_get(void)
|
||
DSSDBG("dss_runtime_get\n");
|
||
|
||
r = pm_runtime_get_sync(&dss.pdev->dev);
|
||
- WARN_ON(r < 0);
|
||
- return r < 0 ? r : 0;
|
||
+ if (WARN_ON(r < 0)) {
|
||
+ pm_runtime_put_sync(&dss.pdev->dev);
|
||
+ return r;
|
||
+ }
|
||
+ return 0;
|
||
}
|
||
|
||
void dss_runtime_put(void)
|
||
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
|
||
index 7060ae56c062c..4804aab342981 100644
|
||
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
|
||
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
|
||
@@ -39,9 +39,10 @@ static int hdmi_runtime_get(void)
|
||
DSSDBG("hdmi_runtime_get\n");
|
||
|
||
r = pm_runtime_get_sync(&hdmi.pdev->dev);
|
||
- WARN_ON(r < 0);
|
||
- if (r < 0)
|
||
+ if (WARN_ON(r < 0)) {
|
||
+ pm_runtime_put_sync(&hdmi.pdev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
return 0;
|
||
}
|
||
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
|
||
index ac49531e47327..a06b6f1355bdb 100644
|
||
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
|
||
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
|
||
@@ -43,9 +43,10 @@ static int hdmi_runtime_get(void)
|
||
DSSDBG("hdmi_runtime_get\n");
|
||
|
||
r = pm_runtime_get_sync(&hdmi.pdev->dev);
|
||
- WARN_ON(r < 0);
|
||
- if (r < 0)
|
||
+ if (WARN_ON(r < 0)) {
|
||
+ pm_runtime_put_sync(&hdmi.pdev->dev);
|
||
return r;
|
||
+ }
|
||
|
||
return 0;
|
||
}
|
||
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
|
||
index d5404d56c922f..0b0ad20afd630 100644
|
||
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
|
||
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
|
||
@@ -348,8 +348,11 @@ static int venc_runtime_get(void)
|
||
DSSDBG("venc_runtime_get\n");
|
||
|
||
r = pm_runtime_get_sync(&venc.pdev->dev);
|
||
- WARN_ON(r < 0);
|
||
- return r < 0 ? r : 0;
|
||
+ if (WARN_ON(r < 0)) {
|
||
+ pm_runtime_put_sync(&venc.pdev->dev);
|
||
+ return r;
|
||
+ }
|
||
+ return 0;
|
||
}
|
||
|
||
static void venc_runtime_put(void)
|
||
diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
|
||
index 9df78fb772672..203c254f8f6cb 100644
|
||
--- a/drivers/video/fbdev/ps3fb.c
|
||
+++ b/drivers/video/fbdev/ps3fb.c
|
||
@@ -29,6 +29,7 @@
|
||
#include <linux/freezer.h>
|
||
#include <linux/uaccess.h>
|
||
#include <linux/fb.h>
|
||
+#include <linux/fbcon.h>
|
||
#include <linux/init.h>
|
||
|
||
#include <asm/cell-regs.h>
|
||
@@ -824,12 +825,12 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
|
||
var = info->var;
|
||
fb_videomode_to_var(&var, vmode);
|
||
console_lock();
|
||
- info->flags |= FBINFO_MISC_USEREVENT;
|
||
/* Force, in case only special bits changed */
|
||
var.activate |= FB_ACTIVATE_FORCE;
|
||
par->new_mode_id = val;
|
||
retval = fb_set_var(info, &var);
|
||
- info->flags &= ~FBINFO_MISC_USEREVENT;
|
||
+ if (!retval)
|
||
+ fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
|
||
console_unlock();
|
||
}
|
||
break;
|
||
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
|
||
index 140c7bf33a989..90b8f56fbadb1 100644
|
||
--- a/drivers/xen/events/events_base.c
|
||
+++ b/drivers/xen/events/events_base.c
|
||
@@ -156,7 +156,7 @@ int get_evtchn_to_irq(evtchn_port_t evtchn)
|
||
/* Get info for IRQ */
|
||
struct irq_info *info_for_irq(unsigned irq)
|
||
{
|
||
- return irq_get_handler_data(irq);
|
||
+ return irq_get_chip_data(irq);
|
||
}
|
||
|
||
/* Constructors for packed IRQ information. */
|
||
@@ -377,7 +377,7 @@ static void xen_irq_init(unsigned irq)
|
||
info->type = IRQT_UNBOUND;
|
||
info->refcnt = -1;
|
||
|
||
- irq_set_handler_data(irq, info);
|
||
+ irq_set_chip_data(irq, info);
|
||
|
||
list_add_tail(&info->list, &xen_irq_list_head);
|
||
}
|
||
@@ -426,14 +426,14 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
|
||
|
||
static void xen_free_irq(unsigned irq)
|
||
{
|
||
- struct irq_info *info = irq_get_handler_data(irq);
|
||
+ struct irq_info *info = irq_get_chip_data(irq);
|
||
|
||
if (WARN_ON(!info))
|
||
return;
|
||
|
||
list_del(&info->list);
|
||
|
||
- irq_set_handler_data(irq, NULL);
|
||
+ irq_set_chip_data(irq, NULL);
|
||
|
||
WARN_ON(info->refcnt > 0);
|
||
|
||
@@ -603,7 +603,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
|
||
static void __unbind_from_irq(unsigned int irq)
|
||
{
|
||
evtchn_port_t evtchn = evtchn_from_irq(irq);
|
||
- struct irq_info *info = irq_get_handler_data(irq);
|
||
+ struct irq_info *info = irq_get_chip_data(irq);
|
||
|
||
if (info->refcnt > 0) {
|
||
info->refcnt--;
|
||
@@ -1108,7 +1108,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
|
||
|
||
void unbind_from_irqhandler(unsigned int irq, void *dev_id)
|
||
{
|
||
- struct irq_info *info = irq_get_handler_data(irq);
|
||
+ struct irq_info *info = irq_get_chip_data(irq);
|
||
|
||
if (WARN_ON(!info))
|
||
return;
|
||
@@ -1142,7 +1142,7 @@ int evtchn_make_refcounted(evtchn_port_t evtchn)
|
||
if (irq == -1)
|
||
return -ENOENT;
|
||
|
||
- info = irq_get_handler_data(irq);
|
||
+ info = irq_get_chip_data(irq);
|
||
|
||
if (!info)
|
||
return -ENOENT;
|
||
@@ -1170,7 +1170,7 @@ int evtchn_get(evtchn_port_t evtchn)
|
||
if (irq == -1)
|
||
goto done;
|
||
|
||
- info = irq_get_handler_data(irq);
|
||
+ info = irq_get_chip_data(irq);
|
||
|
||
if (!info)
|
||
goto done;
|
||
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
|
||
index 6fdb3392a06d5..284d9afa900b3 100644
|
||
--- a/fs/btrfs/ctree.h
|
||
+++ b/fs/btrfs/ctree.h
|
||
@@ -2468,7 +2468,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
|
||
u64 bytenr, u64 num_bytes);
|
||
int btrfs_exclude_logged_extents(struct extent_buffer *eb);
|
||
int btrfs_cross_ref_exist(struct btrfs_root *root,
|
||
- u64 objectid, u64 offset, u64 bytenr);
|
||
+ u64 objectid, u64 offset, u64 bytenr, bool strict);
|
||
struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
|
||
struct btrfs_root *root,
|
||
u64 parent, u64 root_objectid,
|
||
@@ -2854,7 +2854,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
|
||
u64 start, u64 len);
|
||
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
|
||
u64 *orig_start, u64 *orig_block_len,
|
||
- u64 *ram_bytes);
|
||
+ u64 *ram_bytes, bool strict);
|
||
|
||
void __btrfs_del_delalloc_inode(struct btrfs_root *root,
|
||
struct btrfs_inode *inode);
|
||
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
|
||
index 66618a1794ea7..983f4d58ae59b 100644
|
||
--- a/fs/btrfs/disk-io.c
|
||
+++ b/fs/btrfs/disk-io.c
|
||
@@ -4574,6 +4574,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
|
||
cache->io_ctl.inode = NULL;
|
||
iput(inode);
|
||
}
|
||
+ ASSERT(cache->io_ctl.pages == NULL);
|
||
btrfs_put_block_group(cache);
|
||
}
|
||
|
||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
|
||
index de6fe176fdfb3..5871ef78edbac 100644
|
||
--- a/fs/btrfs/extent-tree.c
|
||
+++ b/fs/btrfs/extent-tree.c
|
||
@@ -2306,7 +2306,8 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
|
||
|
||
static noinline int check_committed_ref(struct btrfs_root *root,
|
||
struct btrfs_path *path,
|
||
- u64 objectid, u64 offset, u64 bytenr)
|
||
+ u64 objectid, u64 offset, u64 bytenr,
|
||
+ bool strict)
|
||
{
|
||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||
struct btrfs_root *extent_root = fs_info->extent_root;
|
||
@@ -2348,9 +2349,13 @@ static noinline int check_committed_ref(struct btrfs_root *root,
|
||
btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
|
||
goto out;
|
||
|
||
- /* If extent created before last snapshot => it's definitely shared */
|
||
- if (btrfs_extent_generation(leaf, ei) <=
|
||
- btrfs_root_last_snapshot(&root->root_item))
|
||
+ /*
|
||
+ * If extent created before last snapshot => it's shared unless the
|
||
+ * snapshot has been deleted. Use the heuristic if strict is false.
|
||
+ */
|
||
+ if (!strict &&
|
||
+ (btrfs_extent_generation(leaf, ei) <=
|
||
+ btrfs_root_last_snapshot(&root->root_item)))
|
||
goto out;
|
||
|
||
iref = (struct btrfs_extent_inline_ref *)(ei + 1);
|
||
@@ -2375,7 +2380,7 @@ out:
|
||
}
|
||
|
||
int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
|
||
- u64 bytenr)
|
||
+ u64 bytenr, bool strict)
|
||
{
|
||
struct btrfs_path *path;
|
||
int ret;
|
||
@@ -2386,7 +2391,7 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
|
||
|
||
do {
|
||
ret = check_committed_ref(root, path, objectid,
|
||
- offset, bytenr);
|
||
+ offset, bytenr, strict);
|
||
if (ret && ret != -ENOENT)
|
||
goto out;
|
||
|
||
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
|
||
index 1523aa4eaff07..e485f0275e1a6 100644
|
||
--- a/fs/btrfs/file.c
|
||
+++ b/fs/btrfs/file.c
|
||
@@ -1568,7 +1568,7 @@ int btrfs_check_can_nocow(struct btrfs_inode *inode, loff_t pos,
|
||
}
|
||
|
||
ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
|
||
- NULL, NULL, NULL);
|
||
+ NULL, NULL, NULL, false);
|
||
if (ret <= 0) {
|
||
ret = 0;
|
||
if (!nowait)
|
||
@@ -3176,14 +3176,14 @@ reserve_space:
|
||
if (ret < 0)
|
||
goto out;
|
||
space_reserved = true;
|
||
- ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
|
||
- alloc_start, bytes_to_reserve);
|
||
- if (ret)
|
||
- goto out;
|
||
ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
|
||
&cached_state);
|
||
if (ret)
|
||
goto out;
|
||
+ ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
|
||
+ alloc_start, bytes_to_reserve);
|
||
+ if (ret)
|
||
+ goto out;
|
||
ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
|
||
alloc_end - alloc_start,
|
||
i_blocksize(inode),
|
||
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
|
||
index 6f7b6bca6dc5b..53cfcf017b8db 100644
|
||
--- a/fs/btrfs/free-space-cache.c
|
||
+++ b/fs/btrfs/free-space-cache.c
|
||
@@ -1186,7 +1186,6 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root,
|
||
ret = update_cache_item(trans, root, inode, path, offset,
|
||
io_ctl->entries, io_ctl->bitmaps);
|
||
out:
|
||
- io_ctl_free(io_ctl);
|
||
if (ret) {
|
||
invalidate_inode_pages2(inode->i_mapping);
|
||
BTRFS_I(inode)->generation = 0;
|
||
@@ -1346,6 +1345,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
|
||
* them out later
|
||
*/
|
||
io_ctl_drop_pages(io_ctl);
|
||
+ io_ctl_free(io_ctl);
|
||
|
||
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
|
||
i_size_read(inode) - 1, &cached_state);
|
||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
|
||
index 7ba1218b1630e..cb2a6893ec417 100644
|
||
--- a/fs/btrfs/inode.c
|
||
+++ b/fs/btrfs/inode.c
|
||
@@ -1611,7 +1611,7 @@ next_slot:
|
||
goto out_check;
|
||
ret = btrfs_cross_ref_exist(root, ino,
|
||
found_key.offset -
|
||
- extent_offset, disk_bytenr);
|
||
+ extent_offset, disk_bytenr, false);
|
||
if (ret) {
|
||
/*
|
||
* ret could be -EIO if the above fails to read
|
||
@@ -6957,7 +6957,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
|
||
*/
|
||
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
|
||
u64 *orig_start, u64 *orig_block_len,
|
||
- u64 *ram_bytes)
|
||
+ u64 *ram_bytes, bool strict)
|
||
{
|
||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||
struct btrfs_path *path;
|
||
@@ -7035,8 +7035,9 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
|
||
* Do the same check as in btrfs_cross_ref_exist but without the
|
||
* unnecessary search.
|
||
*/
|
||
- if (btrfs_file_extent_generation(leaf, fi) <=
|
||
- btrfs_root_last_snapshot(&root->root_item))
|
||
+ if (!strict &&
|
||
+ (btrfs_file_extent_generation(leaf, fi) <=
|
||
+ btrfs_root_last_snapshot(&root->root_item)))
|
||
goto out;
|
||
|
||
backref_offset = btrfs_file_extent_offset(leaf, fi);
|
||
@@ -7072,7 +7073,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
|
||
*/
|
||
|
||
ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
|
||
- key.offset - backref_offset, disk_bytenr);
|
||
+ key.offset - backref_offset, disk_bytenr,
|
||
+ strict);
|
||
if (ret) {
|
||
ret = 0;
|
||
goto out;
|
||
@@ -7293,7 +7295,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
|
||
block_start = em->block_start + (start - em->start);
|
||
|
||
if (can_nocow_extent(inode, start, &len, &orig_start,
|
||
- &orig_block_len, &ram_bytes) == 1 &&
|
||
+ &orig_block_len, &ram_bytes, false) == 1 &&
|
||
btrfs_inc_nocow_writers(fs_info, block_start)) {
|
||
struct extent_map *em2;
|
||
|
||
@@ -8640,7 +8642,7 @@ void btrfs_destroy_inode(struct inode *inode)
|
||
btrfs_put_ordered_extent(ordered);
|
||
}
|
||
}
|
||
- btrfs_qgroup_check_reserved_leak(inode);
|
||
+ btrfs_qgroup_check_reserved_leak(BTRFS_I(inode));
|
||
inode_tree_del(inode);
|
||
btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
|
||
btrfs_inode_clear_file_extent_range(BTRFS_I(inode), 0, (u64)-1);
|
||
@@ -10103,7 +10105,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
||
free_extent_map(em);
|
||
em = NULL;
|
||
|
||
- ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL);
|
||
+ ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true);
|
||
if (ret < 0) {
|
||
goto out;
|
||
} else if (ret) {
|
||
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
|
||
index 5bd4089ad0e1a..574a669894774 100644
|
||
--- a/fs/btrfs/qgroup.c
|
||
+++ b/fs/btrfs/qgroup.c
|
||
@@ -3742,7 +3742,7 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
|
||
* Check qgroup reserved space leaking, normally at destroy inode
|
||
* time
|
||
*/
|
||
-void btrfs_qgroup_check_reserved_leak(struct inode *inode)
|
||
+void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
|
||
{
|
||
struct extent_changeset changeset;
|
||
struct ulist_node *unode;
|
||
@@ -3750,19 +3750,19 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
|
||
int ret;
|
||
|
||
extent_changeset_init(&changeset);
|
||
- ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
|
||
+ ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
|
||
EXTENT_QGROUP_RESERVED, &changeset);
|
||
|
||
WARN_ON(ret < 0);
|
||
if (WARN_ON(changeset.bytes_changed)) {
|
||
ULIST_ITER_INIT(&iter);
|
||
while ((unode = ulist_next(&changeset.range_changed, &iter))) {
|
||
- btrfs_warn(BTRFS_I(inode)->root->fs_info,
|
||
- "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
|
||
- inode->i_ino, unode->val, unode->aux);
|
||
+ btrfs_warn(inode->root->fs_info,
|
||
+ "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
|
||
+ btrfs_ino(inode), unode->val, unode->aux);
|
||
}
|
||
- btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
|
||
- BTRFS_I(inode)->root->root_key.objectid,
|
||
+ btrfs_qgroup_free_refroot(inode->root->fs_info,
|
||
+ inode->root->root_key.objectid,
|
||
changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
|
||
|
||
}
|
||
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
|
||
index 1bc6544594690..406366f20cb0a 100644
|
||
--- a/fs/btrfs/qgroup.h
|
||
+++ b/fs/btrfs/qgroup.h
|
||
@@ -399,7 +399,7 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root);
|
||
*/
|
||
void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
|
||
|
||
-void btrfs_qgroup_check_reserved_leak(struct inode *inode);
|
||
+void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode);
|
||
|
||
/* btrfs_qgroup_swapped_blocks related functions */
|
||
void btrfs_qgroup_init_swapped_blocks(
|
||
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
|
||
index 56cd2cf571588..9eb03b0e0dd43 100644
|
||
--- a/fs/btrfs/super.c
|
||
+++ b/fs/btrfs/super.c
|
||
@@ -558,6 +558,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
|
||
} else if (strncmp(args[0].from, "lzo", 3) == 0) {
|
||
compress_type = "lzo";
|
||
info->compress_type = BTRFS_COMPRESS_LZO;
|
||
+ info->compress_level = 0;
|
||
btrfs_set_opt(info->mount_opt, COMPRESS);
|
||
btrfs_clear_opt(info->mount_opt, NODATACOW);
|
||
btrfs_clear_opt(info->mount_opt, NODATASUM);
|
||
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
|
||
index d22ff1e0963c6..065439b4bdda5 100644
|
||
--- a/fs/btrfs/tree-log.c
|
||
+++ b/fs/btrfs/tree-log.c
|
||
@@ -3449,11 +3449,13 @@ fail:
|
||
btrfs_free_path(path);
|
||
out_unlock:
|
||
mutex_unlock(&dir->log_mutex);
|
||
- if (ret == -ENOSPC) {
|
||
+ if (err == -ENOSPC) {
|
||
btrfs_set_log_full_commit(trans);
|
||
- ret = 0;
|
||
- } else if (ret < 0)
|
||
- btrfs_abort_transaction(trans, ret);
|
||
+ err = 0;
|
||
+ } else if (err < 0 && err != -ENOENT) {
|
||
+ /* ENOENT can be returned if the entry hasn't been fsynced yet */
|
||
+ btrfs_abort_transaction(trans, err);
|
||
+ }
|
||
|
||
btrfs_end_log_trans(root);
|
||
|
||
diff --git a/fs/buffer.c b/fs/buffer.c
|
||
index 64fe82ec65ff1..75a8849abb5d2 100644
|
||
--- a/fs/buffer.c
|
||
+++ b/fs/buffer.c
|
||
@@ -3160,6 +3160,15 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
|
||
WARN_ON(atomic_read(&bh->b_count) < 1);
|
||
lock_buffer(bh);
|
||
if (test_clear_buffer_dirty(bh)) {
|
||
+ /*
|
||
+ * The bh should be mapped, but it might not be if the
|
||
+ * device was hot-removed. Not much we can do but fail the I/O.
|
||
+ */
|
||
+ if (!buffer_mapped(bh)) {
|
||
+ unlock_buffer(bh);
|
||
+ return -EIO;
|
||
+ }
|
||
+
|
||
get_bh(bh);
|
||
bh->b_end_io = end_buffer_write_sync;
|
||
ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
|
||
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
|
||
index 160644ddaeed7..d51c3f2fdca02 100644
|
||
--- a/fs/ceph/file.c
|
||
+++ b/fs/ceph/file.c
|
||
@@ -1538,6 +1538,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||
struct inode *inode = file_inode(filp);
|
||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||
struct page *pinned_page = NULL;
|
||
+ bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
|
||
ssize_t ret;
|
||
int want, got = 0;
|
||
int retry_op = 0, read = 0;
|
||
@@ -1546,7 +1547,7 @@ again:
|
||
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
|
||
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
|
||
|
||
- if (iocb->ki_flags & IOCB_DIRECT)
|
||
+ if (direct_lock)
|
||
ceph_start_io_direct(inode);
|
||
else
|
||
ceph_start_io_read(inode);
|
||
@@ -1603,7 +1604,7 @@ again:
|
||
}
|
||
ceph_put_cap_refs(ci, got);
|
||
|
||
- if (iocb->ki_flags & IOCB_DIRECT)
|
||
+ if (direct_lock)
|
||
ceph_end_io_direct(inode);
|
||
else
|
||
ceph_end_io_read(inode);
|
||
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
|
||
index 946f9a92658ab..903b6a35b321b 100644
|
||
--- a/fs/ceph/mds_client.c
|
||
+++ b/fs/ceph/mds_client.c
|
||
@@ -4285,6 +4285,9 @@ static void delayed_work(struct work_struct *work)
|
||
|
||
dout("mdsc delayed_work\n");
|
||
|
||
+ if (mdsc->stopping)
|
||
+ return;
|
||
+
|
||
mutex_lock(&mdsc->mutex);
|
||
renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
|
||
renew_caps = time_after_eq(jiffies, HZ*renew_interval +
|
||
@@ -4660,7 +4663,16 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
|
||
static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
|
||
{
|
||
dout("stop\n");
|
||
- cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
|
||
+ /*
|
||
+ * Make sure the delayed work stopped before releasing
|
||
+ * the resources.
|
||
+ *
|
||
+ * Because the cancel_delayed_work_sync() will only
|
||
+ * guarantee that the work finishes executing. But the
|
||
+ * delayed work will re-arm itself again after that.
|
||
+ */
|
||
+ flush_delayed_work(&mdsc->delayed_work);
|
||
+
|
||
if (mdsc->mdsmap)
|
||
ceph_mdsmap_destroy(mdsc->mdsmap);
|
||
kfree(mdsc->sessions);
|
||
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
|
||
index e830a9d4e10d3..11aa37693e436 100644
|
||
--- a/fs/ext4/block_validity.c
|
||
+++ b/fs/ext4/block_validity.c
|
||
@@ -254,14 +254,6 @@ int ext4_setup_system_zone(struct super_block *sb)
|
||
int flex_size = ext4_flex_bg_size(sbi);
|
||
int ret;
|
||
|
||
- if (!test_opt(sb, BLOCK_VALIDITY)) {
|
||
- if (sbi->system_blks)
|
||
- ext4_release_system_zone(sb);
|
||
- return 0;
|
||
- }
|
||
- if (sbi->system_blks)
|
||
- return 0;
|
||
-
|
||
system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
|
||
if (!system_blks)
|
||
return -ENOMEM;
|
||
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
|
||
index 42815304902b8..ff46defc65683 100644
|
||
--- a/fs/ext4/ext4.h
|
||
+++ b/fs/ext4/ext4.h
|
||
@@ -1054,6 +1054,7 @@ struct ext4_inode_info {
|
||
struct timespec64 i_crtime;
|
||
|
||
/* mballoc */
|
||
+ atomic_t i_prealloc_active;
|
||
struct list_head i_prealloc_list;
|
||
spinlock_t i_prealloc_lock;
|
||
|
||
@@ -1501,6 +1502,7 @@ struct ext4_sb_info {
|
||
unsigned int s_mb_stats;
|
||
unsigned int s_mb_order2_reqs;
|
||
unsigned int s_mb_group_prealloc;
|
||
+ unsigned int s_mb_max_inode_prealloc;
|
||
unsigned int s_max_dir_size_kb;
|
||
/* where last allocation was done - for stream allocation */
|
||
unsigned long s_mb_last_group;
|
||
@@ -1585,6 +1587,9 @@ struct ext4_sb_info {
|
||
#ifdef CONFIG_EXT4_DEBUG
|
||
unsigned long s_simulate_fail;
|
||
#endif
|
||
+ /* Record the errseq of the backing block device */
|
||
+ errseq_t s_bdev_wb_err;
|
||
+ spinlock_t s_bdev_wb_lock;
|
||
};
|
||
|
||
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
|
||
@@ -2651,7 +2656,7 @@ extern int ext4_mb_release(struct super_block *);
|
||
extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
|
||
struct ext4_allocation_request *, int *);
|
||
extern int ext4_mb_reserve_blocks(struct super_block *, int);
|
||
-extern void ext4_discard_preallocations(struct inode *);
|
||
+extern void ext4_discard_preallocations(struct inode *, unsigned int);
|
||
extern int __init ext4_init_mballoc(void);
|
||
extern void ext4_exit_mballoc(void);
|
||
extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
|
||
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
|
||
index 0c76cdd44d90d..760b9ee49dc00 100644
|
||
--- a/fs/ext4/ext4_jbd2.c
|
||
+++ b/fs/ext4/ext4_jbd2.c
|
||
@@ -195,6 +195,28 @@ static void ext4_journal_abort_handle(const char *caller, unsigned int line,
|
||
jbd2_journal_abort_handle(handle);
|
||
}
|
||
|
||
+static void ext4_check_bdev_write_error(struct super_block *sb)
|
||
+{
|
||
+ struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
|
||
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||
+ int err;
|
||
+
|
||
+ /*
|
||
+ * If the block device has write error flag, it may have failed to
|
||
+ * async write out metadata buffers in the background. In this case,
|
||
+ * we could read old data from disk and write it out again, which
|
||
+ * may lead to on-disk filesystem inconsistency.
|
||
+ */
|
||
+ if (errseq_check(&mapping->wb_err, READ_ONCE(sbi->s_bdev_wb_err))) {
|
||
+ spin_lock(&sbi->s_bdev_wb_lock);
|
||
+ err = errseq_check_and_advance(&mapping->wb_err, &sbi->s_bdev_wb_err);
|
||
+ spin_unlock(&sbi->s_bdev_wb_lock);
|
||
+ if (err)
|
||
+ ext4_error_err(sb, -err,
|
||
+ "Error while async write back metadata");
|
||
+ }
|
||
+}
|
||
+
|
||
int __ext4_journal_get_write_access(const char *where, unsigned int line,
|
||
handle_t *handle, struct buffer_head *bh)
|
||
{
|
||
@@ -202,6 +224,9 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
|
||
|
||
might_sleep();
|
||
|
||
+ if (bh->b_bdev->bd_super)
|
||
+ ext4_check_bdev_write_error(bh->b_bdev->bd_super);
|
||
+
|
||
if (ext4_handle_valid(handle)) {
|
||
err = jbd2_journal_get_write_access(handle, bh);
|
||
if (err)
|
||
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
|
||
index d75054570e44c..11a321dd11e7e 100644
|
||
--- a/fs/ext4/extents.c
|
||
+++ b/fs/ext4/extents.c
|
||
@@ -100,7 +100,7 @@ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
|
||
* i_mutex. So we can safely drop the i_data_sem here.
|
||
*/
|
||
BUG_ON(EXT4_JOURNAL(inode) == NULL);
|
||
- ext4_discard_preallocations(inode);
|
||
+ ext4_discard_preallocations(inode, 0);
|
||
up_write(&EXT4_I(inode)->i_data_sem);
|
||
*dropped = 1;
|
||
return 0;
|
||
@@ -4268,7 +4268,7 @@ got_allocated_blocks:
|
||
* not a good idea to call discard here directly,
|
||
* but otherwise we'd need to call it every free().
|
||
*/
|
||
- ext4_discard_preallocations(inode);
|
||
+ ext4_discard_preallocations(inode, 0);
|
||
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
|
||
fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
|
||
ext4_free_blocks(handle, inode, NULL, newblock,
|
||
@@ -5295,7 +5295,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
|
||
}
|
||
|
||
down_write(&EXT4_I(inode)->i_data_sem);
|
||
- ext4_discard_preallocations(inode);
|
||
+ ext4_discard_preallocations(inode, 0);
|
||
|
||
ret = ext4_es_remove_extent(inode, punch_start,
|
||
EXT_MAX_BLOCKS - punch_start);
|
||
@@ -5309,7 +5309,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
|
||
up_write(&EXT4_I(inode)->i_data_sem);
|
||
goto out_stop;
|
||
}
|
||
- ext4_discard_preallocations(inode);
|
||
+ ext4_discard_preallocations(inode, 0);
|
||
|
||
ret = ext4_ext_shift_extents(inode, handle, punch_stop,
|
||
punch_stop - punch_start, SHIFT_LEFT);
|
||
@@ -5441,7 +5441,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
||
goto out_stop;
|
||
|
||
down_write(&EXT4_I(inode)->i_data_sem);
|
||
- ext4_discard_preallocations(inode);
|
||
+ ext4_discard_preallocations(inode, 0);
|
||
|
||
path = ext4_find_extent(inode, offset_lblk, NULL, 0);
|
||
if (IS_ERR(path)) {
|
||
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
|
||
index 8f742b53f1d40..4ee9a4dc01a88 100644
|
||
--- a/fs/ext4/file.c
|
||
+++ b/fs/ext4/file.c
|
||
@@ -148,7 +148,7 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
|
||
!EXT4_I(inode)->i_reserved_data_blocks)
|
||
{
|
||
down_write(&EXT4_I(inode)->i_data_sem);
|
||
- ext4_discard_preallocations(inode);
|
||
+ ext4_discard_preallocations(inode, 0);
|
||
up_write(&EXT4_I(inode)->i_data_sem);
|
||
}
|
||
if (is_dx(inode) && filp->private_data)
|
||
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
|
||
index 4026418257121..e8ca405673923 100644
|
||
--- a/fs/ext4/indirect.c
|
||
+++ b/fs/ext4/indirect.c
|
||
@@ -696,7 +696,7 @@ static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
|
||
* i_mutex. So we can safely drop the i_data_sem here.
|
||
*/
|
||
BUG_ON(EXT4_JOURNAL(inode) == NULL);
|
||
- ext4_discard_preallocations(inode);
|
||
+ ext4_discard_preallocations(inode, 0);
|
||
up_write(&EXT4_I(inode)->i_data_sem);
|
||
*dropped = 1;
|
||
return 0;
|
||
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
|
||
index 92573f8540ab7..9c0629ffb4261 100644
|
||
--- a/fs/ext4/inode.c
|
||
+++ b/fs/ext4/inode.c
|
||
@@ -383,7 +383,7 @@ void ext4_da_update_reserve_space(struct inode *inode,
|
||
*/
|
||
if ((ei->i_reserved_data_blocks == 0) &&
|
||
!inode_is_open_for_write(inode))
|
||
- ext4_discard_preallocations(inode);
|
||
+ ext4_discard_preallocations(inode, 0);
|
||
}
|
||
|
||
static int __check_block_validity(struct inode *inode, const char *func,
|
||
@@ -4055,7 +4055,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
|
||
if (stop_block > first_block) {
|
||
|
||
down_write(&EXT4_I(inode)->i_data_sem);
|
||
- ext4_discard_preallocations(inode);
|
||
+ ext4_discard_preallocations(inode, 0);
|
||
|
||
ret = ext4_es_remove_extent(inode, first_block,
|
||
stop_block - first_block);
|
||
@@ -4210,7 +4210,7 @@ int ext4_truncate(struct inode *inode)
|
||
|
||
down_write(&EXT4_I(inode)->i_data_sem);
|
||
|
||
- ext4_discard_preallocations(inode);
|
||
+ ext4_discard_preallocations(inode, 0);
|
||
|
||
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
|
||
err = ext4_ext_truncate(handle, inode);
|
||
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
|
||
index 999cf6add39c6..a5fcc238c6693 100644
|
||
--- a/fs/ext4/ioctl.c
|
||
+++ b/fs/ext4/ioctl.c
|
||
@@ -202,7 +202,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
|
||
reset_inode_seed(inode);
|
||
reset_inode_seed(inode_bl);
|
||
|
||
- ext4_discard_preallocations(inode);
|
||
+ ext4_discard_preallocations(inode, 0);
|
||
|
||
err = ext4_mark_inode_dirty(handle, inode);
|
||
if (err < 0) {
|
||
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
|
||
index 38719c156573c..e88eff999bd15 100644
|
||
--- a/fs/ext4/mballoc.c
|
||
+++ b/fs/ext4/mballoc.c
|
||
@@ -2177,6 +2177,7 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
|
||
{
|
||
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
|
||
struct super_block *sb = ac->ac_sb;
|
||
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||
bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
|
||
ext4_grpblk_t free;
|
||
int ret = 0;
|
||
@@ -2195,7 +2196,25 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
|
||
|
||
/* We only do this if the grp has never been initialized */
|
||
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
|
||
- ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
|
||
+ struct ext4_group_desc *gdp =
|
||
+ ext4_get_group_desc(sb, group, NULL);
|
||
+ int ret;
|
||
+
|
||
+ /* cr=0/1 is a very optimistic search to find large
|
||
+ * good chunks almost for free. If buddy data is not
|
||
+ * ready, then this optimization makes no sense. But
|
||
+ * we never skip the first block group in a flex_bg,
|
||
+ * since this gets used for metadata block allocation,
|
||
+ * and we want to make sure we locate metadata blocks
|
||
+ * in the first block group in the flex_bg if possible.
|
||
+ */
|
||
+ if (cr < 2 &&
|
||
+ (!sbi->s_log_groups_per_flex ||
|
||
+ ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
|
||
+ !(ext4_has_group_desc_csum(sb) &&
|
||
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
|
||
+ return 0;
|
||
+ ret = ext4_mb_init_group(sb, group, GFP_NOFS);
|
||
if (ret)
|
||
return ret;
|
||
}
|
||
@@ -2736,6 +2755,7 @@ int ext4_mb_init(struct super_block *sb)
|
||
sbi->s_mb_stats = MB_DEFAULT_STATS;
|
||
sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
|
||
sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
|
||
+ sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
|
||
/*
|
||
* The default group preallocation is 512, which for 4k block
|
||
* sizes translates to 2 megabytes. However for bigalloc file
|
||
@@ -3674,6 +3694,26 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
|
||
mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
|
||
}
|
||
|
||
+static void ext4_mb_mark_pa_deleted(struct super_block *sb,
|
||
+ struct ext4_prealloc_space *pa)
|
||
+{
|
||
+ struct ext4_inode_info *ei;
|
||
+
|
||
+ if (pa->pa_deleted) {
|
||
+ ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
|
||
+ pa->pa_type, pa->pa_pstart, pa->pa_lstart,
|
||
+ pa->pa_len);
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ pa->pa_deleted = 1;
|
||
+
|
||
+ if (pa->pa_type == MB_INODE_PA) {
|
||
+ ei = EXT4_I(pa->pa_inode);
|
||
+ atomic_dec(&ei->i_prealloc_active);
|
||
+ }
|
||
+}
|
||
+
|
||
static void ext4_mb_pa_callback(struct rcu_head *head)
|
||
{
|
||
struct ext4_prealloc_space *pa;
|
||
@@ -3706,7 +3746,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
|
||
return;
|
||
}
|
||
|
||
- pa->pa_deleted = 1;
|
||
+ ext4_mb_mark_pa_deleted(sb, pa);
|
||
spin_unlock(&pa->pa_lock);
|
||
|
||
grp_blk = pa->pa_pstart;
|
||
@@ -3830,6 +3870,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
|
||
spin_lock(pa->pa_obj_lock);
|
||
list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
|
||
spin_unlock(pa->pa_obj_lock);
|
||
+ atomic_inc(&ei->i_prealloc_active);
|
||
}
|
||
|
||
/*
|
||
@@ -4040,7 +4081,7 @@ repeat:
|
||
}
|
||
|
||
/* seems this one can be freed ... */
|
||
- pa->pa_deleted = 1;
|
||
+ ext4_mb_mark_pa_deleted(sb, pa);
|
||
|
||
/* we can trust pa_free ... */
|
||
free += pa->pa_free;
|
||
@@ -4103,7 +4144,7 @@ out_dbg:
|
||
*
|
||
* FIXME!! Make sure it is valid at all the call sites
|
||
*/
|
||
-void ext4_discard_preallocations(struct inode *inode)
|
||
+void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
|
||
{
|
||
struct ext4_inode_info *ei = EXT4_I(inode);
|
||
struct super_block *sb = inode->i_sb;
|
||
@@ -4121,15 +4162,19 @@ void ext4_discard_preallocations(struct inode *inode)
|
||
|
||
mb_debug(sb, "discard preallocation for inode %lu\n",
|
||
inode->i_ino);
|
||
- trace_ext4_discard_preallocations(inode);
|
||
+ trace_ext4_discard_preallocations(inode,
|
||
+ atomic_read(&ei->i_prealloc_active), needed);
|
||
|
||
INIT_LIST_HEAD(&list);
|
||
|
||
+ if (needed == 0)
|
||
+ needed = UINT_MAX;
|
||
+
|
||
repeat:
|
||
/* first, collect all pa's in the inode */
|
||
spin_lock(&ei->i_prealloc_lock);
|
||
- while (!list_empty(&ei->i_prealloc_list)) {
|
||
- pa = list_entry(ei->i_prealloc_list.next,
|
||
+ while (!list_empty(&ei->i_prealloc_list) && needed) {
|
||
+ pa = list_entry(ei->i_prealloc_list.prev,
|
||
struct ext4_prealloc_space, pa_inode_list);
|
||
BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
|
||
spin_lock(&pa->pa_lock);
|
||
@@ -4146,10 +4191,11 @@ repeat:
|
||
|
||
}
|
||
if (pa->pa_deleted == 0) {
|
||
- pa->pa_deleted = 1;
|
||
+ ext4_mb_mark_pa_deleted(sb, pa);
|
||
spin_unlock(&pa->pa_lock);
|
||
list_del_rcu(&pa->pa_inode_list);
|
||
list_add(&pa->u.pa_tmp_list, &list);
|
||
+ needed--;
|
||
continue;
|
||
}
|
||
|
||
@@ -4450,7 +4496,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
|
||
BUG_ON(pa->pa_type != MB_GROUP_PA);
|
||
|
||
/* seems this one can be freed ... */
|
||
- pa->pa_deleted = 1;
|
||
+ ext4_mb_mark_pa_deleted(sb, pa);
|
||
spin_unlock(&pa->pa_lock);
|
||
|
||
list_del_rcu(&pa->pa_inode_list);
|
||
@@ -4548,11 +4594,30 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
|
||
return ;
|
||
}
|
||
|
||
+/*
|
||
+ * if per-inode prealloc list is too long, trim some PA
|
||
+ */
|
||
+static void ext4_mb_trim_inode_pa(struct inode *inode)
|
||
+{
|
||
+ struct ext4_inode_info *ei = EXT4_I(inode);
|
||
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||
+ int count, delta;
|
||
+
|
||
+ count = atomic_read(&ei->i_prealloc_active);
|
||
+ delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
|
||
+ if (count > sbi->s_mb_max_inode_prealloc + delta) {
|
||
+ count -= sbi->s_mb_max_inode_prealloc;
|
||
+ ext4_discard_preallocations(inode, count);
|
||
+ }
|
||
+}
|
||
+
|
||
/*
|
||
* release all resource we used in allocation
|
||
*/
|
||
static int ext4_mb_release_context(struct ext4_allocation_context *ac)
|
||
{
|
||
+ struct inode *inode = ac->ac_inode;
|
||
+ struct ext4_inode_info *ei = EXT4_I(inode);
|
||
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
|
||
struct ext4_prealloc_space *pa = ac->ac_pa;
|
||
if (pa) {
|
||
@@ -4579,6 +4644,17 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
|
||
spin_unlock(pa->pa_obj_lock);
|
||
ext4_mb_add_n_trim(ac);
|
||
}
|
||
+
|
||
+ if (pa->pa_type == MB_INODE_PA) {
|
||
+ /*
|
||
+ * treat per-inode prealloc list as a lru list, then try
|
||
+ * to trim the least recently used PA.
|
||
+ */
|
||
+ spin_lock(pa->pa_obj_lock);
|
||
+ list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
|
||
+ spin_unlock(pa->pa_obj_lock);
|
||
+ }
|
||
+
|
||
ext4_mb_put_pa(ac, ac->ac_sb, pa);
|
||
}
|
||
if (ac->ac_bitmap_page)
|
||
@@ -4588,6 +4664,7 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
|
||
if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
|
||
mutex_unlock(&ac->ac_lg->lg_mutex);
|
||
ext4_mb_collect_stats(ac);
|
||
+ ext4_mb_trim_inode_pa(inode);
|
||
return 0;
|
||
}
|
||
|
||
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
|
||
index 6b4d17c2935d6..e75b4749aa1c2 100644
|
||
--- a/fs/ext4/mballoc.h
|
||
+++ b/fs/ext4/mballoc.h
|
||
@@ -73,6 +73,10 @@
|
||
*/
|
||
#define MB_DEFAULT_GROUP_PREALLOC 512
|
||
|
||
+/*
|
||
+ * maximum length of inode prealloc list
|
||
+ */
|
||
+#define MB_DEFAULT_MAX_INODE_PREALLOC 512
|
||
|
||
struct ext4_free_data {
|
||
/* this links the free block information from sb_info */
|
||
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
|
||
index 1ed86fb6c3026..0d601b8228753 100644
|
||
--- a/fs/ext4/move_extent.c
|
||
+++ b/fs/ext4/move_extent.c
|
||
@@ -686,8 +686,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
|
||
|
||
out:
|
||
if (*moved_len) {
|
||
- ext4_discard_preallocations(orig_inode);
|
||
- ext4_discard_preallocations(donor_inode);
|
||
+ ext4_discard_preallocations(orig_inode, 0);
|
||
+ ext4_discard_preallocations(donor_inode, 0);
|
||
}
|
||
|
||
ext4_ext_drop_refs(path);
|
||
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
|
||
index 330957ed1f05c..0b38bf29c07e0 100644
|
||
--- a/fs/ext4/super.c
|
||
+++ b/fs/ext4/super.c
|
||
@@ -66,10 +66,10 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
|
||
unsigned long journal_devnum);
|
||
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
|
||
static int ext4_commit_super(struct super_block *sb, int sync);
|
||
-static void ext4_mark_recovery_complete(struct super_block *sb,
|
||
+static int ext4_mark_recovery_complete(struct super_block *sb,
|
||
struct ext4_super_block *es);
|
||
-static void ext4_clear_journal_err(struct super_block *sb,
|
||
- struct ext4_super_block *es);
|
||
+static int ext4_clear_journal_err(struct super_block *sb,
|
||
+ struct ext4_super_block *es);
|
||
static int ext4_sync_fs(struct super_block *sb, int wait);
|
||
static int ext4_remount(struct super_block *sb, int *flags, char *data);
|
||
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
|
||
@@ -1123,6 +1123,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
|
||
inode_set_iversion(&ei->vfs_inode, 1);
|
||
spin_lock_init(&ei->i_raw_lock);
|
||
INIT_LIST_HEAD(&ei->i_prealloc_list);
|
||
+ atomic_set(&ei->i_prealloc_active, 0);
|
||
spin_lock_init(&ei->i_prealloc_lock);
|
||
ext4_es_init_tree(&ei->i_es_tree);
|
||
rwlock_init(&ei->i_es_lock);
|
||
@@ -1216,7 +1217,7 @@ void ext4_clear_inode(struct inode *inode)
|
||
{
|
||
invalidate_inode_buffers(inode);
|
||
clear_inode(inode);
|
||
- ext4_discard_preallocations(inode);
|
||
+ ext4_discard_preallocations(inode, 0);
|
||
ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
|
||
dquot_drop(inode);
|
||
if (EXT4_I(inode)->jinode) {
|
||
@@ -4698,11 +4699,13 @@ no_journal:
|
||
|
||
ext4_set_resv_clusters(sb);
|
||
|
||
- err = ext4_setup_system_zone(sb);
|
||
- if (err) {
|
||
- ext4_msg(sb, KERN_ERR, "failed to initialize system "
|
||
- "zone (%d)", err);
|
||
- goto failed_mount4a;
|
||
+ if (test_opt(sb, BLOCK_VALIDITY)) {
|
||
+ err = ext4_setup_system_zone(sb);
|
||
+ if (err) {
|
||
+ ext4_msg(sb, KERN_ERR, "failed to initialize system "
|
||
+ "zone (%d)", err);
|
||
+ goto failed_mount4a;
|
||
+ }
|
||
}
|
||
|
||
ext4_ext_init(sb);
|
||
@@ -4765,12 +4768,23 @@ no_journal:
|
||
}
|
||
#endif /* CONFIG_QUOTA */
|
||
|
||
+ /*
|
||
+ * Save the original bdev mapping's wb_err value which could be
|
||
+ * used to detect the metadata async write error.
|
||
+ */
|
||
+ spin_lock_init(&sbi->s_bdev_wb_lock);
|
||
+ if (!sb_rdonly(sb))
|
||
+ errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
|
||
+ &sbi->s_bdev_wb_err);
|
||
+ sb->s_bdev->bd_super = sb;
|
||
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
|
||
ext4_orphan_cleanup(sb, es);
|
||
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
|
||
if (needs_recovery) {
|
||
ext4_msg(sb, KERN_INFO, "recovery complete");
|
||
- ext4_mark_recovery_complete(sb, es);
|
||
+ err = ext4_mark_recovery_complete(sb, es);
|
||
+ if (err)
|
||
+ goto failed_mount8;
|
||
}
|
||
if (EXT4_SB(sb)->s_journal) {
|
||
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
|
||
@@ -4813,10 +4827,8 @@ cantfind_ext4:
|
||
ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
|
||
goto failed_mount;
|
||
|
||
-#ifdef CONFIG_QUOTA
|
||
failed_mount8:
|
||
ext4_unregister_sysfs(sb);
|
||
-#endif
|
||
failed_mount7:
|
||
ext4_unregister_li_request(sb);
|
||
failed_mount6:
|
||
@@ -4956,7 +4968,8 @@ static journal_t *ext4_get_journal(struct super_block *sb,
|
||
struct inode *journal_inode;
|
||
journal_t *journal;
|
||
|
||
- BUG_ON(!ext4_has_feature_journal(sb));
|
||
+ if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
|
||
+ return NULL;
|
||
|
||
journal_inode = ext4_get_journal_inode(sb, journal_inum);
|
||
if (!journal_inode)
|
||
@@ -4986,7 +4999,8 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
|
||
struct ext4_super_block *es;
|
||
struct block_device *bdev;
|
||
|
||
- BUG_ON(!ext4_has_feature_journal(sb));
|
||
+ if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
|
||
+ return NULL;
|
||
|
||
bdev = ext4_blkdev_get(j_dev, sb);
|
||
if (bdev == NULL)
|
||
@@ -5077,8 +5091,10 @@ static int ext4_load_journal(struct super_block *sb,
|
||
dev_t journal_dev;
|
||
int err = 0;
|
||
int really_read_only;
|
||
+ int journal_dev_ro;
|
||
|
||
- BUG_ON(!ext4_has_feature_journal(sb));
|
||
+ if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
|
||
+ return -EFSCORRUPTED;
|
||
|
||
if (journal_devnum &&
|
||
journal_devnum != le32_to_cpu(es->s_journal_dev)) {
|
||
@@ -5088,7 +5104,31 @@ static int ext4_load_journal(struct super_block *sb,
|
||
} else
|
||
journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
|
||
|
||
- really_read_only = bdev_read_only(sb->s_bdev);
|
||
+ if (journal_inum && journal_dev) {
|
||
+ ext4_msg(sb, KERN_ERR,
|
||
+ "filesystem has both journal inode and journal device!");
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ if (journal_inum) {
|
||
+ journal = ext4_get_journal(sb, journal_inum);
|
||
+ if (!journal)
|
||
+ return -EINVAL;
|
||
+ } else {
|
||
+ journal = ext4_get_dev_journal(sb, journal_dev);
|
||
+ if (!journal)
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
+ journal_dev_ro = bdev_read_only(journal->j_dev);
|
||
+ really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
|
||
+
|
||
+ if (journal_dev_ro && !sb_rdonly(sb)) {
|
||
+ ext4_msg(sb, KERN_ERR,
|
||
+ "journal device read-only, try mounting with '-o ro'");
|
||
+ err = -EROFS;
|
||
+ goto err_out;
|
||
+ }
|
||
|
||
/*
|
||
* Are we loading a blank journal or performing recovery after a
|
||
@@ -5103,27 +5143,14 @@ static int ext4_load_journal(struct super_block *sb,
|
||
ext4_msg(sb, KERN_ERR, "write access "
|
||
"unavailable, cannot proceed "
|
||
"(try mounting with noload)");
|
||
- return -EROFS;
|
||
+ err = -EROFS;
|
||
+ goto err_out;
|
||
}
|
||
ext4_msg(sb, KERN_INFO, "write access will "
|
||
"be enabled during recovery");
|
||
}
|
||
}
|
||
|
||
- if (journal_inum && journal_dev) {
|
||
- ext4_msg(sb, KERN_ERR, "filesystem has both journal "
|
||
- "and inode journals!");
|
||
- return -EINVAL;
|
||
- }
|
||
-
|
||
- if (journal_inum) {
|
||
- if (!(journal = ext4_get_journal(sb, journal_inum)))
|
||
- return -EINVAL;
|
||
- } else {
|
||
- if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
|
||
- return -EINVAL;
|
||
- }
|
||
-
|
||
if (!(journal->j_flags & JBD2_BARRIER))
|
||
ext4_msg(sb, KERN_INFO, "barriers disabled");
|
||
|
||
@@ -5143,12 +5170,16 @@ static int ext4_load_journal(struct super_block *sb,
|
||
|
||
if (err) {
|
||
ext4_msg(sb, KERN_ERR, "error loading journal");
|
||
- jbd2_journal_destroy(journal);
|
||
- return err;
|
||
+ goto err_out;
|
||
}
|
||
|
||
EXT4_SB(sb)->s_journal = journal;
|
||
- ext4_clear_journal_err(sb, es);
|
||
+ err = ext4_clear_journal_err(sb, es);
|
||
+ if (err) {
|
||
+ EXT4_SB(sb)->s_journal = NULL;
|
||
+ jbd2_journal_destroy(journal);
|
||
+ return err;
|
||
+ }
|
||
|
||
if (!really_read_only && journal_devnum &&
|
||
journal_devnum != le32_to_cpu(es->s_journal_dev)) {
|
||
@@ -5159,6 +5190,10 @@ static int ext4_load_journal(struct super_block *sb,
|
||
}
|
||
|
||
return 0;
|
||
+
|
||
+err_out:
|
||
+ jbd2_journal_destroy(journal);
|
||
+ return err;
|
||
}
|
||
|
||
static int ext4_commit_super(struct super_block *sb, int sync)
|
||
@@ -5170,13 +5205,6 @@ static int ext4_commit_super(struct super_block *sb, int sync)
|
||
if (!sbh || block_device_ejected(sb))
|
||
return error;
|
||
|
||
- /*
|
||
- * The superblock bh should be mapped, but it might not be if the
|
||
- * device was hot-removed. Not much we can do but fail the I/O.
|
||
- */
|
||
- if (!buffer_mapped(sbh))
|
||
- return error;
|
||
-
|
||
/*
|
||
* If the file system is mounted read-only, don't update the
|
||
* superblock write time. This avoids updating the superblock
|
||
@@ -5244,26 +5272,32 @@ static int ext4_commit_super(struct super_block *sb, int sync)
|
||
* remounting) the filesystem readonly, then we will end up with a
|
||
* consistent fs on disk. Record that fact.
|
||
*/
|
||
-static void ext4_mark_recovery_complete(struct super_block *sb,
|
||
- struct ext4_super_block *es)
|
||
+static int ext4_mark_recovery_complete(struct super_block *sb,
|
||
+ struct ext4_super_block *es)
|
||
{
|
||
+ int err;
|
||
journal_t *journal = EXT4_SB(sb)->s_journal;
|
||
|
||
if (!ext4_has_feature_journal(sb)) {
|
||
- BUG_ON(journal != NULL);
|
||
- return;
|
||
+ if (journal != NULL) {
|
||
+ ext4_error(sb, "Journal got removed while the fs was "
|
||
+ "mounted!");
|
||
+ return -EFSCORRUPTED;
|
||
+ }
|
||
+ return 0;
|
||
}
|
||
jbd2_journal_lock_updates(journal);
|
||
- if (jbd2_journal_flush(journal) < 0)
|
||
+ err = jbd2_journal_flush(journal);
|
||
+ if (err < 0)
|
||
goto out;
|
||
|
||
if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
|
||
ext4_clear_feature_journal_needs_recovery(sb);
|
||
ext4_commit_super(sb, 1);
|
||
}
|
||
-
|
||
out:
|
||
jbd2_journal_unlock_updates(journal);
|
||
+ return err;
|
||
}
|
||
|
||
/*
|
||
@@ -5271,14 +5305,17 @@ out:
|
||
* has recorded an error from a previous lifetime, move that error to the
|
||
* main filesystem now.
|
||
*/
|
||
-static void ext4_clear_journal_err(struct super_block *sb,
|
||
+static int ext4_clear_journal_err(struct super_block *sb,
|
||
struct ext4_super_block *es)
|
||
{
|
||
journal_t *journal;
|
||
int j_errno;
|
||
const char *errstr;
|
||
|
||
- BUG_ON(!ext4_has_feature_journal(sb));
|
||
+ if (!ext4_has_feature_journal(sb)) {
|
||
+ ext4_error(sb, "Journal got removed while the fs was mounted!");
|
||
+ return -EFSCORRUPTED;
|
||
+ }
|
||
|
||
journal = EXT4_SB(sb)->s_journal;
|
||
|
||
@@ -5303,6 +5340,7 @@ static void ext4_clear_journal_err(struct super_block *sb,
|
||
jbd2_journal_clear_err(journal);
|
||
jbd2_journal_update_sb_errno(journal);
|
||
}
|
||
+ return 0;
|
||
}
|
||
|
||
/*
|
||
@@ -5445,7 +5483,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
||
{
|
||
struct ext4_super_block *es;
|
||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||
- unsigned long old_sb_flags;
|
||
+ unsigned long old_sb_flags, vfs_flags;
|
||
struct ext4_mount_options old_opts;
|
||
int enable_quota = 0;
|
||
ext4_group_t g;
|
||
@@ -5488,6 +5526,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
||
if (sbi->s_journal && sbi->s_journal->j_task->io_context)
|
||
journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
|
||
|
||
+ /*
|
||
+ * Some options can be enabled by ext4 and/or by VFS mount flag
|
||
+ * either way we need to make sure it matches in both *flags and
|
||
+ * s_flags. Copy those selected flags from *flags to s_flags
|
||
+ */
|
||
+ vfs_flags = SB_LAZYTIME | SB_I_VERSION;
|
||
+ sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags);
|
||
+
|
||
if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
|
||
err = -EINVAL;
|
||
goto restore_opts;
|
||
@@ -5541,9 +5587,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
||
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
|
||
}
|
||
|
||
- if (*flags & SB_LAZYTIME)
|
||
- sb->s_flags |= SB_LAZYTIME;
|
||
-
|
||
if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
|
||
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
|
||
err = -EROFS;
|
||
@@ -5573,8 +5616,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
||
(sbi->s_mount_state & EXT4_VALID_FS))
|
||
es->s_state = cpu_to_le16(sbi->s_mount_state);
|
||
|
||
- if (sbi->s_journal)
|
||
+ if (sbi->s_journal) {
|
||
+ /*
|
||
+ * We let remount-ro finish even if marking fs
|
||
+ * as clean failed...
|
||
+ */
|
||
ext4_mark_recovery_complete(sb, es);
|
||
+ }
|
||
if (sbi->s_mmp_tsk)
|
||
kthread_stop(sbi->s_mmp_tsk);
|
||
} else {
|
||
@@ -5616,14 +5664,25 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
||
goto restore_opts;
|
||
}
|
||
|
||
+ /*
|
||
+ * Update the original bdev mapping's wb_err value
|
||
+ * which could be used to detect the metadata async
|
||
+ * write error.
|
||
+ */
|
||
+ errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
|
||
+ &sbi->s_bdev_wb_err);
|
||
+
|
||
/*
|
||
* Mounting a RDONLY partition read-write, so reread
|
||
* and store the current valid flag. (It may have
|
||
* been changed by e2fsck since we originally mounted
|
||
* the partition.)
|
||
*/
|
||
- if (sbi->s_journal)
|
||
- ext4_clear_journal_err(sb, es);
|
||
+ if (sbi->s_journal) {
|
||
+ err = ext4_clear_journal_err(sb, es);
|
||
+ if (err)
|
||
+ goto restore_opts;
|
||
+ }
|
||
sbi->s_mount_state = le16_to_cpu(es->s_state);
|
||
|
||
err = ext4_setup_super(sb, es, 0);
|
||
@@ -5653,7 +5712,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
||
ext4_register_li_request(sb, first_not_zeroed);
|
||
}
|
||
|
||
- ext4_setup_system_zone(sb);
|
||
+ /*
|
||
+ * Handle creation of system zone data early because it can fail.
|
||
+ * Releasing of existing data is done when we are sure remount will
|
||
+ * succeed.
|
||
+ */
|
||
+ if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) {
|
||
+ err = ext4_setup_system_zone(sb);
|
||
+ if (err)
|
||
+ goto restore_opts;
|
||
+ }
|
||
+
|
||
if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
|
||
err = ext4_commit_super(sb, 1);
|
||
if (err)
|
||
@@ -5674,8 +5743,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
||
}
|
||
}
|
||
#endif
|
||
+ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
|
||
+ ext4_release_system_zone(sb);
|
||
+
|
||
+ /*
|
||
+ * Some options can be enabled by ext4 and/or by VFS mount flag
|
||
+ * either way we need to make sure it matches in both *flags and
|
||
+ * s_flags. Copy those selected flags from s_flags to *flags
|
||
+ */
|
||
+ *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags);
|
||
|
||
- *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
|
||
ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
|
||
kfree(orig_data);
|
||
return 0;
|
||
@@ -5689,6 +5766,8 @@ restore_opts:
|
||
sbi->s_commit_interval = old_opts.s_commit_interval;
|
||
sbi->s_min_batch_time = old_opts.s_min_batch_time;
|
||
sbi->s_max_batch_time = old_opts.s_max_batch_time;
|
||
+ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
|
||
+ ext4_release_system_zone(sb);
|
||
#ifdef CONFIG_QUOTA
|
||
sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
|
||
for (i = 0; i < EXT4_MAXQUOTAS; i++) {
|
||
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
|
||
index 6c9fc9e21c138..92f04e9e94413 100644
|
||
--- a/fs/ext4/sysfs.c
|
||
+++ b/fs/ext4/sysfs.c
|
||
@@ -215,6 +215,7 @@ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
|
||
EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
|
||
EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
|
||
EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
|
||
+EXT4_RW_ATTR_SBI_UI(mb_max_inode_prealloc, s_mb_max_inode_prealloc);
|
||
EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
|
||
EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
|
||
EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
|
||
@@ -257,6 +258,7 @@ static struct attribute *ext4_attrs[] = {
|
||
ATTR_LIST(mb_order2_req),
|
||
ATTR_LIST(mb_stream_req),
|
||
ATTR_LIST(mb_group_prealloc),
|
||
+ ATTR_LIST(mb_max_inode_prealloc),
|
||
ATTR_LIST(max_writeback_mb_bump),
|
||
ATTR_LIST(extent_max_zeroout_kb),
|
||
ATTR_LIST(trigger_fs_error),
|
||
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
|
||
index b35a50f4953c5..7d9afd54e9d8f 100644
|
||
--- a/fs/f2fs/f2fs.h
|
||
+++ b/fs/f2fs/f2fs.h
|
||
@@ -3287,7 +3287,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
|
||
void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
|
||
void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
|
||
int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
|
||
-void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
|
||
+int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
|
||
int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
|
||
int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
|
||
int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
|
||
@@ -3750,7 +3750,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
|
||
int f2fs_convert_inline_inode(struct inode *inode);
|
||
int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
|
||
int f2fs_write_inline_data(struct inode *inode, struct page *page);
|
||
-bool f2fs_recover_inline_data(struct inode *inode, struct page *npage);
|
||
+int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
|
||
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
|
||
const struct f2fs_filename *fname,
|
||
struct page **res_page);
|
||
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
|
||
index dbade310dc792..cf2c347bd7a3d 100644
|
||
--- a/fs/f2fs/inline.c
|
||
+++ b/fs/f2fs/inline.c
|
||
@@ -253,7 +253,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
|
||
return 0;
|
||
}
|
||
|
||
-bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
|
||
+int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
|
||
{
|
||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||
struct f2fs_inode *ri = NULL;
|
||
@@ -275,7 +275,8 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
|
||
ri && (ri->i_inline & F2FS_INLINE_DATA)) {
|
||
process_inline:
|
||
ipage = f2fs_get_node_page(sbi, inode->i_ino);
|
||
- f2fs_bug_on(sbi, IS_ERR(ipage));
|
||
+ if (IS_ERR(ipage))
|
||
+ return PTR_ERR(ipage);
|
||
|
||
f2fs_wait_on_page_writeback(ipage, NODE, true, true);
|
||
|
||
@@ -288,21 +289,25 @@ process_inline:
|
||
|
||
set_page_dirty(ipage);
|
||
f2fs_put_page(ipage, 1);
|
||
- return true;
|
||
+ return 1;
|
||
}
|
||
|
||
if (f2fs_has_inline_data(inode)) {
|
||
ipage = f2fs_get_node_page(sbi, inode->i_ino);
|
||
- f2fs_bug_on(sbi, IS_ERR(ipage));
|
||
+ if (IS_ERR(ipage))
|
||
+ return PTR_ERR(ipage);
|
||
f2fs_truncate_inline_inode(inode, ipage, 0);
|
||
clear_inode_flag(inode, FI_INLINE_DATA);
|
||
f2fs_put_page(ipage, 1);
|
||
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
|
||
- if (f2fs_truncate_blocks(inode, 0, false))
|
||
- return false;
|
||
+ int ret;
|
||
+
|
||
+ ret = f2fs_truncate_blocks(inode, 0, false);
|
||
+ if (ret)
|
||
+ return ret;
|
||
goto process_inline;
|
||
}
|
||
- return false;
|
||
+ return 0;
|
||
}
|
||
|
||
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
|
||
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
|
||
index e61ce7fb0958b..98736d0598b8d 100644
|
||
--- a/fs/f2fs/node.c
|
||
+++ b/fs/f2fs/node.c
|
||
@@ -2576,7 +2576,7 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
|
||
return nr - nr_shrink;
|
||
}
|
||
|
||
-void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
|
||
+int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
|
||
{
|
||
void *src_addr, *dst_addr;
|
||
size_t inline_size;
|
||
@@ -2584,7 +2584,8 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
|
||
struct f2fs_inode *ri;
|
||
|
||
ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
|
||
- f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
|
||
+ if (IS_ERR(ipage))
|
||
+ return PTR_ERR(ipage);
|
||
|
||
ri = F2FS_INODE(page);
|
||
if (ri->i_inline & F2FS_INLINE_XATTR) {
|
||
@@ -2603,6 +2604,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
|
||
update_inode:
|
||
f2fs_update_inode(inode, ipage);
|
||
f2fs_put_page(ipage, 1);
|
||
+ return 0;
|
||
}
|
||
|
||
int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
|
||
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
|
||
index ae5310f02e7ff..2807251944668 100644
|
||
--- a/fs/f2fs/recovery.c
|
||
+++ b/fs/f2fs/recovery.c
|
||
@@ -544,7 +544,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
|
||
|
||
/* step 1: recover xattr */
|
||
if (IS_INODE(page)) {
|
||
- f2fs_recover_inline_xattr(inode, page);
|
||
+ err = f2fs_recover_inline_xattr(inode, page);
|
||
+ if (err)
|
||
+ goto out;
|
||
} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
|
||
err = f2fs_recover_xattr_data(inode, page);
|
||
if (!err)
|
||
@@ -553,8 +555,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
|
||
}
|
||
|
||
/* step 2: recover inline data */
|
||
- if (f2fs_recover_inline_data(inode, page))
|
||
+ err = f2fs_recover_inline_data(inode, page);
|
||
+ if (err) {
|
||
+ if (err == 1)
|
||
+ err = 0;
|
||
goto out;
|
||
+ }
|
||
|
||
/* step 3: recover data indices */
|
||
start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
|
||
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
|
||
index 20e56b0fa46a9..0deb839da0a03 100644
|
||
--- a/fs/f2fs/super.c
|
||
+++ b/fs/f2fs/super.c
|
||
@@ -1173,6 +1173,9 @@ static void f2fs_put_super(struct super_block *sb)
|
||
int i;
|
||
bool dropped;
|
||
|
||
+ /* unregister procfs/sysfs entries in advance to avoid race case */
|
||
+ f2fs_unregister_sysfs(sbi);
|
||
+
|
||
f2fs_quota_off_umount(sb);
|
||
|
||
/* prevent remaining shrinker jobs */
|
||
@@ -1238,8 +1241,6 @@ static void f2fs_put_super(struct super_block *sb)
|
||
|
||
kvfree(sbi->ckpt);
|
||
|
||
- f2fs_unregister_sysfs(sbi);
|
||
-
|
||
sb->s_fs_info = NULL;
|
||
if (sbi->s_chksum_driver)
|
||
crypto_free_shash(sbi->s_chksum_driver);
|
||
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
|
||
index e877c59b9fdb4..c5e32ceb94827 100644
|
||
--- a/fs/f2fs/sysfs.c
|
||
+++ b/fs/f2fs/sysfs.c
|
||
@@ -223,6 +223,13 @@ static ssize_t avg_vblocks_show(struct f2fs_attr *a,
|
||
}
|
||
#endif
|
||
|
||
+static ssize_t main_blkaddr_show(struct f2fs_attr *a,
|
||
+ struct f2fs_sb_info *sbi, char *buf)
|
||
+{
|
||
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||
+ (unsigned long long)MAIN_BLKADDR(sbi));
|
||
+}
|
||
+
|
||
static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
|
||
struct f2fs_sb_info *sbi, char *buf)
|
||
{
|
||
@@ -522,7 +529,6 @@ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
|
||
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle, gc_mode);
|
||
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_urgent, gc_mode);
|
||
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
|
||
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, main_blkaddr, main_blkaddr);
|
||
F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_small_discards, max_discards);
|
||
F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_granularity, discard_granularity);
|
||
F2FS_RW_ATTR(RESERVED_BLOCKS, f2fs_sb_info, reserved_blocks, reserved_blocks);
|
||
@@ -565,6 +571,7 @@ F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
|
||
F2FS_GENERAL_RO_ATTR(unusable);
|
||
F2FS_GENERAL_RO_ATTR(encoding);
|
||
F2FS_GENERAL_RO_ATTR(mounted_time_sec);
|
||
+F2FS_GENERAL_RO_ATTR(main_blkaddr);
|
||
#ifdef CONFIG_F2FS_STAT_FS
|
||
F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_foreground_calls, cp_count);
|
||
F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_background_calls, bg_cp_count);
|
||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
|
||
index a605c3dddabc7..ae17d64a3e189 100644
|
||
--- a/fs/fs-writeback.c
|
||
+++ b/fs/fs-writeback.c
|
||
@@ -42,7 +42,6 @@
|
||
struct wb_writeback_work {
|
||
long nr_pages;
|
||
struct super_block *sb;
|
||
- unsigned long *older_than_this;
|
||
enum writeback_sync_modes sync_mode;
|
||
unsigned int tagged_writepages:1;
|
||
unsigned int for_kupdate:1;
|
||
@@ -144,7 +143,9 @@ static void inode_io_list_del_locked(struct inode *inode,
|
||
struct bdi_writeback *wb)
|
||
{
|
||
assert_spin_locked(&wb->list_lock);
|
||
+ assert_spin_locked(&inode->i_lock);
|
||
|
||
+ inode->i_state &= ~I_SYNC_QUEUED;
|
||
list_del_init(&inode->i_io_list);
|
||
wb_io_lists_depopulated(wb);
|
||
}
|
||
@@ -1122,7 +1123,9 @@ void inode_io_list_del(struct inode *inode)
|
||
struct bdi_writeback *wb;
|
||
|
||
wb = inode_to_wb_and_lock_list(inode);
|
||
+ spin_lock(&inode->i_lock);
|
||
inode_io_list_del_locked(inode, wb);
|
||
+ spin_unlock(&inode->i_lock);
|
||
spin_unlock(&wb->list_lock);
|
||
}
|
||
EXPORT_SYMBOL(inode_io_list_del);
|
||
@@ -1172,8 +1175,10 @@ void sb_clear_inode_writeback(struct inode *inode)
|
||
* the case then the inode must have been redirtied while it was being written
|
||
* out and we don't reset its dirtied_when.
|
||
*/
|
||
-static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
|
||
+static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
|
||
{
|
||
+ assert_spin_locked(&inode->i_lock);
|
||
+
|
||
if (!list_empty(&wb->b_dirty)) {
|
||
struct inode *tail;
|
||
|
||
@@ -1182,6 +1187,14 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
|
||
inode->dirtied_when = jiffies;
|
||
}
|
||
inode_io_list_move_locked(inode, wb, &wb->b_dirty);
|
||
+ inode->i_state &= ~I_SYNC_QUEUED;
|
||
+}
|
||
+
|
||
+static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
|
||
+{
|
||
+ spin_lock(&inode->i_lock);
|
||
+ redirty_tail_locked(inode, wb);
|
||
+ spin_unlock(&inode->i_lock);
|
||
}
|
||
|
||
/*
|
||
@@ -1220,16 +1233,13 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
|
||
#define EXPIRE_DIRTY_ATIME 0x0001
|
||
|
||
/*
|
||
- * Move expired (dirtied before work->older_than_this) dirty inodes from
|
||
+ * Move expired (dirtied before dirtied_before) dirty inodes from
|
||
* @delaying_queue to @dispatch_queue.
|
||
*/
|
||
static int move_expired_inodes(struct list_head *delaying_queue,
|
||
struct list_head *dispatch_queue,
|
||
- int flags,
|
||
- struct wb_writeback_work *work)
|
||
+ int flags, unsigned long dirtied_before)
|
||
{
|
||
- unsigned long *older_than_this = NULL;
|
||
- unsigned long expire_time;
|
||
LIST_HEAD(tmp);
|
||
struct list_head *pos, *node;
|
||
struct super_block *sb = NULL;
|
||
@@ -1237,21 +1247,17 @@ static int move_expired_inodes(struct list_head *delaying_queue,
|
||
int do_sb_sort = 0;
|
||
int moved = 0;
|
||
|
||
- if ((flags & EXPIRE_DIRTY_ATIME) == 0)
|
||
- older_than_this = work->older_than_this;
|
||
- else if (!work->for_sync) {
|
||
- expire_time = jiffies - (dirtytime_expire_interval * HZ);
|
||
- older_than_this = &expire_time;
|
||
- }
|
||
while (!list_empty(delaying_queue)) {
|
||
inode = wb_inode(delaying_queue->prev);
|
||
- if (older_than_this &&
|
||
- inode_dirtied_after(inode, *older_than_this))
|
||
+ if (inode_dirtied_after(inode, dirtied_before))
|
||
break;
|
||
list_move(&inode->i_io_list, &tmp);
|
||
moved++;
|
||
+ spin_lock(&inode->i_lock);
|
||
if (flags & EXPIRE_DIRTY_ATIME)
|
||
- set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
|
||
+ inode->i_state |= I_DIRTY_TIME_EXPIRED;
|
||
+ inode->i_state |= I_SYNC_QUEUED;
|
||
+ spin_unlock(&inode->i_lock);
|
||
if (sb_is_blkdev_sb(inode->i_sb))
|
||
continue;
|
||
if (sb && sb != inode->i_sb)
|
||
@@ -1289,18 +1295,22 @@ out:
|
||
* |
|
||
* +--> dequeue for IO
|
||
*/
|
||
-static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
|
||
+static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
|
||
+ unsigned long dirtied_before)
|
||
{
|
||
int moved;
|
||
+ unsigned long time_expire_jif = dirtied_before;
|
||
|
||
assert_spin_locked(&wb->list_lock);
|
||
list_splice_init(&wb->b_more_io, &wb->b_io);
|
||
- moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
|
||
+ moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, dirtied_before);
|
||
+ if (!work->for_sync)
|
||
+ time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
|
||
moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
|
||
- EXPIRE_DIRTY_ATIME, work);
|
||
+ EXPIRE_DIRTY_ATIME, time_expire_jif);
|
||
if (moved)
|
||
wb_io_lists_populated(wb);
|
||
- trace_writeback_queue_io(wb, work, moved);
|
||
+ trace_writeback_queue_io(wb, work, dirtied_before, moved);
|
||
}
|
||
|
||
static int write_inode(struct inode *inode, struct writeback_control *wbc)
|
||
@@ -1394,7 +1404,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
|
||
* writeback is not making progress due to locked
|
||
* buffers. Skip this inode for now.
|
||
*/
|
||
- redirty_tail(inode, wb);
|
||
+ redirty_tail_locked(inode, wb);
|
||
return;
|
||
}
|
||
|
||
@@ -1414,7 +1424,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
|
||
* retrying writeback of the dirty page/inode
|
||
* that cannot be performed immediately.
|
||
*/
|
||
- redirty_tail(inode, wb);
|
||
+ redirty_tail_locked(inode, wb);
|
||
}
|
||
} else if (inode->i_state & I_DIRTY) {
|
||
/*
|
||
@@ -1422,10 +1432,11 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
|
||
* such as delayed allocation during submission or metadata
|
||
* updates after data IO completion.
|
||
*/
|
||
- redirty_tail(inode, wb);
|
||
+ redirty_tail_locked(inode, wb);
|
||
} else if (inode->i_state & I_DIRTY_TIME) {
|
||
inode->dirtied_when = jiffies;
|
||
inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
|
||
+ inode->i_state &= ~I_SYNC_QUEUED;
|
||
} else {
|
||
/* The inode is clean. Remove from writeback lists. */
|
||
inode_io_list_del_locked(inode, wb);
|
||
@@ -1669,8 +1680,8 @@ static long writeback_sb_inodes(struct super_block *sb,
|
||
*/
|
||
spin_lock(&inode->i_lock);
|
||
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
|
||
+ redirty_tail_locked(inode, wb);
|
||
spin_unlock(&inode->i_lock);
|
||
- redirty_tail(inode, wb);
|
||
continue;
|
||
}
|
||
if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
|
||
@@ -1811,7 +1822,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
|
||
blk_start_plug(&plug);
|
||
spin_lock(&wb->list_lock);
|
||
if (list_empty(&wb->b_io))
|
||
- queue_io(wb, &work);
|
||
+ queue_io(wb, &work, jiffies);
|
||
__writeback_inodes_wb(wb, &work);
|
||
spin_unlock(&wb->list_lock);
|
||
blk_finish_plug(&plug);
|
||
@@ -1831,7 +1842,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
|
||
* takes longer than a dirty_writeback_interval interval, then leave a
|
||
* one-second gap.
|
||
*
|
||
- * older_than_this takes precedence over nr_to_write. So we'll only write back
|
||
+ * dirtied_before takes precedence over nr_to_write. So we'll only write back
|
||
* all dirty pages if they are all attached to "old" mappings.
|
||
*/
|
||
static long wb_writeback(struct bdi_writeback *wb,
|
||
@@ -1839,14 +1850,11 @@ static long wb_writeback(struct bdi_writeback *wb,
|
||
{
|
||
unsigned long wb_start = jiffies;
|
||
long nr_pages = work->nr_pages;
|
||
- unsigned long oldest_jif;
|
||
+ unsigned long dirtied_before = jiffies;
|
||
struct inode *inode;
|
||
long progress;
|
||
struct blk_plug plug;
|
||
|
||
- oldest_jif = jiffies;
|
||
- work->older_than_this = &oldest_jif;
|
||
-
|
||
blk_start_plug(&plug);
|
||
spin_lock(&wb->list_lock);
|
||
for (;;) {
|
||
@@ -1880,14 +1888,14 @@ static long wb_writeback(struct bdi_writeback *wb,
|
||
* safe.
|
||
*/
|
||
if (work->for_kupdate) {
|
||
- oldest_jif = jiffies -
|
||
+ dirtied_before = jiffies -
|
||
msecs_to_jiffies(dirty_expire_interval * 10);
|
||
} else if (work->for_background)
|
||
- oldest_jif = jiffies;
|
||
+ dirtied_before = jiffies;
|
||
|
||
trace_writeback_start(wb, work);
|
||
if (list_empty(&wb->b_io))
|
||
- queue_io(wb, work);
|
||
+ queue_io(wb, work, dirtied_before);
|
||
if (work->sb)
|
||
progress = writeback_sb_inodes(work->sb, wb, work);
|
||
else
|
||
@@ -2289,11 +2297,12 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
||
inode->i_state |= flags;
|
||
|
||
/*
|
||
- * If the inode is being synced, just update its dirty state.
|
||
- * The unlocker will place the inode on the appropriate
|
||
- * superblock list, based upon its state.
|
||
+ * If the inode is queued for writeback by flush worker, just
|
||
+ * update its dirty state. Once the flush worker is done with
|
||
+ * the inode it will place it on the appropriate superblock
|
||
+ * list, based upon its state.
|
||
*/
|
||
- if (inode->i_state & I_SYNC)
|
||
+ if (inode->i_state & I_SYNC_QUEUED)
|
||
goto out_unlock_inode;
|
||
|
||
/*
|
||
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
|
||
index ef5313f9c78fe..f936bcf02cce7 100644
|
||
--- a/fs/hugetlbfs/inode.c
|
||
+++ b/fs/hugetlbfs/inode.c
|
||
@@ -1364,6 +1364,12 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
||
sb->s_magic = HUGETLBFS_MAGIC;
|
||
sb->s_op = &hugetlbfs_ops;
|
||
sb->s_time_gran = 1;
|
||
+
|
||
+ /*
|
||
+ * Due to the special and limited functionality of hugetlbfs, it does
|
||
+ * not work well as a stacking filesystem.
|
||
+ */
|
||
+ sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
|
||
sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
|
||
if (!sb->s_root)
|
||
goto out_free;
|
||
diff --git a/fs/io-wq.c b/fs/io-wq.c
|
||
index 47c5f3aeb4600..cb9e5a444fba7 100644
|
||
--- a/fs/io-wq.c
|
||
+++ b/fs/io-wq.c
|
||
@@ -929,6 +929,24 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
|
||
return match->nr_running && !match->cancel_all;
|
||
}
|
||
|
||
+static inline void io_wqe_remove_pending(struct io_wqe *wqe,
|
||
+ struct io_wq_work *work,
|
||
+ struct io_wq_work_node *prev)
|
||
+{
|
||
+ unsigned int hash = io_get_work_hash(work);
|
||
+ struct io_wq_work *prev_work = NULL;
|
||
+
|
||
+ if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
|
||
+ if (prev)
|
||
+ prev_work = container_of(prev, struct io_wq_work, list);
|
||
+ if (prev_work && io_get_work_hash(prev_work) == hash)
|
||
+ wqe->hash_tail[hash] = prev_work;
|
||
+ else
|
||
+ wqe->hash_tail[hash] = NULL;
|
||
+ }
|
||
+ wq_list_del(&wqe->work_list, &work->list, prev);
|
||
+}
|
||
+
|
||
static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
|
||
struct io_cb_cancel_data *match)
|
||
{
|
||
@@ -942,8 +960,7 @@ retry:
|
||
work = container_of(node, struct io_wq_work, list);
|
||
if (!match->fn(work, match->data))
|
||
continue;
|
||
-
|
||
- wq_list_del(&wqe->work_list, node, prev);
|
||
+ io_wqe_remove_pending(wqe, work, prev);
|
||
spin_unlock_irqrestore(&wqe->lock, flags);
|
||
io_run_cancel(work, wqe);
|
||
match->nr_pending++;
|
||
diff --git a/fs/io_uring.c b/fs/io_uring.c
|
||
index 26978630378e0..4115bfedf15dc 100644
|
||
--- a/fs/io_uring.c
|
||
+++ b/fs/io_uring.c
|
||
@@ -1810,6 +1810,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
|
||
|
||
req = list_first_entry(done, struct io_kiocb, list);
|
||
if (READ_ONCE(req->result) == -EAGAIN) {
|
||
+ req->result = 0;
|
||
req->iopoll_completed = 0;
|
||
list_move_tail(&req->list, &again);
|
||
continue;
|
||
@@ -2517,6 +2518,11 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
|
||
return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
|
||
}
|
||
|
||
+static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
|
||
+{
|
||
+ return kiocb->ki_filp->f_mode & FMODE_STREAM ? NULL : &kiocb->ki_pos;
|
||
+}
|
||
+
|
||
/*
|
||
* For files that don't have ->read_iter() and ->write_iter(), handle them
|
||
* by looping over ->read() or ->write() manually.
|
||
@@ -2552,10 +2558,10 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
|
||
|
||
if (rw == READ) {
|
||
nr = file->f_op->read(file, iovec.iov_base,
|
||
- iovec.iov_len, &kiocb->ki_pos);
|
||
+ iovec.iov_len, io_kiocb_ppos(kiocb));
|
||
} else {
|
||
nr = file->f_op->write(file, iovec.iov_base,
|
||
- iovec.iov_len, &kiocb->ki_pos);
|
||
+ iovec.iov_len, io_kiocb_ppos(kiocb));
|
||
}
|
||
|
||
if (iov_iter_is_bvec(iter))
|
||
@@ -2680,7 +2686,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
|
||
goto copy_iov;
|
||
|
||
iov_count = iov_iter_count(&iter);
|
||
- ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
|
||
+ ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
|
||
if (!ret) {
|
||
ssize_t ret2;
|
||
|
||
@@ -2779,7 +2785,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
|
||
goto copy_iov;
|
||
|
||
iov_count = iov_iter_count(&iter);
|
||
- ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
|
||
+ ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
|
||
if (!ret) {
|
||
ssize_t ret2;
|
||
|
||
@@ -4113,7 +4119,8 @@ struct io_poll_table {
|
||
int error;
|
||
};
|
||
|
||
-static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
|
||
+static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
|
||
+ bool twa_signal_ok)
|
||
{
|
||
struct task_struct *tsk = req->task;
|
||
struct io_ring_ctx *ctx = req->ctx;
|
||
@@ -4126,7 +4133,7 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
|
||
* will do the job.
|
||
*/
|
||
notify = 0;
|
||
- if (!(ctx->flags & IORING_SETUP_SQPOLL))
|
||
+ if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
|
||
notify = TWA_SIGNAL;
|
||
|
||
ret = task_work_add(tsk, cb, notify);
|
||
@@ -4140,6 +4147,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
|
||
__poll_t mask, task_work_func_t func)
|
||
{
|
||
struct task_struct *tsk;
|
||
+ bool twa_signal_ok;
|
||
int ret;
|
||
|
||
/* for instances that support it check for an event match first: */
|
||
@@ -4155,13 +4163,21 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
|
||
init_task_work(&req->task_work, func);
|
||
percpu_ref_get(&req->ctx->refs);
|
||
|
||
+ /*
|
||
+ * If we using the signalfd wait_queue_head for this wakeup, then
|
||
+ * it's not safe to use TWA_SIGNAL as we could be recursing on the
|
||
+ * tsk->sighand->siglock on doing the wakeup. Should not be needed
|
||
+ * either, as the normal wakeup will suffice.
|
||
+ */
|
||
+ twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
|
||
+
|
||
/*
|
||
* If this fails, then the task is exiting. When a task exits, the
|
||
* work gets canceled, so just cancel this request as well instead
|
||
* of executing it. We can't safely execute it anyway, as we may not
|
||
* have the needed state needed for it anyway.
|
||
*/
|
||
- ret = io_req_task_work_add(req, &req->task_work);
|
||
+ ret = io_req_task_work_add(req, &req->task_work, twa_signal_ok);
|
||
if (unlikely(ret)) {
|
||
WRITE_ONCE(poll->canceled, true);
|
||
tsk = io_wq_get_task(req->ctx->io_wq);
|
||
@@ -4492,12 +4508,20 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
|
||
struct async_poll *apoll;
|
||
struct io_poll_table ipt;
|
||
__poll_t mask, ret;
|
||
+ int rw;
|
||
|
||
if (!req->file || !file_can_poll(req->file))
|
||
return false;
|
||
if (req->flags & (REQ_F_MUST_PUNT | REQ_F_POLLED))
|
||
return false;
|
||
- if (!def->pollin && !def->pollout)
|
||
+ if (def->pollin)
|
||
+ rw = READ;
|
||
+ else if (def->pollout)
|
||
+ rw = WRITE;
|
||
+ else
|
||
+ return false;
|
||
+ /* if we can't nonblock try, then no point in arming a poll handler */
|
||
+ if (!io_file_supports_async(req->file, rw))
|
||
return false;
|
||
|
||
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
|
||
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
|
||
index e91aad3637a23..6250c9faa4cbe 100644
|
||
--- a/fs/jbd2/transaction.c
|
||
+++ b/fs/jbd2/transaction.c
|
||
@@ -2026,6 +2026,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
|
||
*/
|
||
static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
|
||
{
|
||
+ J_ASSERT_JH(jh, jh->b_transaction != NULL);
|
||
+ J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
|
||
+
|
||
__jbd2_journal_temp_unlink_buffer(jh);
|
||
jh->b_transaction = NULL;
|
||
}
|
||
@@ -2117,6 +2120,7 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
|
||
{
|
||
struct buffer_head *head;
|
||
struct buffer_head *bh;
|
||
+ bool has_write_io_error = false;
|
||
int ret = 0;
|
||
|
||
J_ASSERT(PageLocked(page));
|
||
@@ -2141,11 +2145,26 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
|
||
jbd2_journal_put_journal_head(jh);
|
||
if (buffer_jbd(bh))
|
||
goto busy;
|
||
+
|
||
+ /*
|
||
+ * If we free a metadata buffer which has been failed to
|
||
+ * write out, the jbd2 checkpoint procedure will not detect
|
||
+ * this failure and may lead to filesystem inconsistency
|
||
+ * after cleanup journal tail.
|
||
+ */
|
||
+ if (buffer_write_io_error(bh)) {
|
||
+ pr_err("JBD2: Error while async write back metadata bh %llu.",
|
||
+ (unsigned long long)bh->b_blocknr);
|
||
+ has_write_io_error = true;
|
||
+ }
|
||
} while ((bh = bh->b_this_page) != head);
|
||
|
||
ret = try_to_free_buffers(page);
|
||
|
||
busy:
|
||
+ if (has_write_io_error)
|
||
+ jbd2_journal_abort(journal, -EIO);
|
||
+
|
||
return ret;
|
||
}
|
||
|
||
@@ -2572,6 +2591,13 @@ bool __jbd2_journal_refile_buffer(struct journal_head *jh)
|
||
|
||
was_dirty = test_clear_buffer_jbddirty(bh);
|
||
__jbd2_journal_temp_unlink_buffer(jh);
|
||
+
|
||
+ /*
|
||
+ * b_transaction must be set, otherwise the new b_transaction won't
|
||
+ * be holding jh reference
|
||
+ */
|
||
+ J_ASSERT_JH(jh, jh->b_transaction != NULL);
|
||
+
|
||
/*
|
||
* We set b_transaction here because b_next_transaction will inherit
|
||
* our jh reference and thus __jbd2_journal_file_buffer() must not
|
||
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
|
||
index c9056316a0b35..cea682ce8aa12 100644
|
||
--- a/fs/nfsd/nfs4state.c
|
||
+++ b/fs/nfsd/nfs4state.c
|
||
@@ -4597,6 +4597,8 @@ static bool nfsd_breaker_owns_lease(struct file_lock *fl)
|
||
if (!i_am_nfsd())
|
||
return NULL;
|
||
rqst = kthread_data(current);
|
||
+ if (!rqst->rq_lease_breaker)
|
||
+ return NULL;
|
||
clp = *(rqst->rq_lease_breaker);
|
||
return dl->dl_stid.sc_client == clp;
|
||
}
|
||
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
|
||
index b5dfb66548422..4504d215cd590 100644
|
||
--- a/fs/xfs/libxfs/xfs_trans_inode.c
|
||
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
|
||
@@ -36,6 +36,7 @@ xfs_trans_ijoin(
|
||
|
||
ASSERT(iip->ili_lock_flags == 0);
|
||
iip->ili_lock_flags = lock_flags;
|
||
+ ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
|
||
|
||
/*
|
||
* Get a log_item_desc to point at the new item.
|
||
@@ -89,6 +90,7 @@ xfs_trans_log_inode(
|
||
|
||
ASSERT(ip->i_itemp != NULL);
|
||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||
+ ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
|
||
|
||
/*
|
||
* Don't bother with i_lock for the I_DIRTY_TIME check here, as races
|
||
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
|
||
index 5daef654956cb..59dea8178ae3c 100644
|
||
--- a/fs/xfs/xfs_icache.c
|
||
+++ b/fs/xfs/xfs_icache.c
|
||
@@ -1141,7 +1141,7 @@ restart:
|
||
goto out_ifunlock;
|
||
xfs_iunpin_wait(ip);
|
||
}
|
||
- if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
|
||
+ if (xfs_inode_clean(ip)) {
|
||
xfs_ifunlock(ip);
|
||
goto reclaim;
|
||
}
|
||
@@ -1228,6 +1228,7 @@ reclaim:
|
||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||
xfs_qm_dqdetach(ip);
|
||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||
+ ASSERT(xfs_inode_clean(ip));
|
||
|
||
__xfs_inode_free(ip);
|
||
return error;
|
||
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
|
||
index 9aea7d68d8ab9..6d70daf1c92a7 100644
|
||
--- a/fs/xfs/xfs_inode.c
|
||
+++ b/fs/xfs/xfs_inode.c
|
||
@@ -1740,10 +1740,31 @@ xfs_inactive_ifree(
|
||
return error;
|
||
}
|
||
|
||
+ /*
|
||
+ * We do not hold the inode locked across the entire rolling transaction
|
||
+ * here. We only need to hold it for the first transaction that
|
||
+ * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
|
||
+ * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
|
||
+ * here breaks the relationship between cluster buffer invalidation and
|
||
+ * stale inode invalidation on cluster buffer item journal commit
|
||
+ * completion, and can result in leaving dirty stale inodes hanging
|
||
+ * around in memory.
|
||
+ *
|
||
+ * We have no need for serialising this inode operation against other
|
||
+ * operations - we freed the inode and hence reallocation is required
|
||
+ * and that will serialise on reallocating the space the deferops need
|
||
+ * to free. Hence we can unlock the inode on the first commit of
|
||
+ * the transaction rather than roll it right through the deferops. This
|
||
+ * avoids relogging the XFS_ISTALE inode.
|
||
+ *
|
||
+ * We check that xfs_ifree() hasn't grown an internal transaction roll
|
||
+ * by asserting that the inode is still locked when it returns.
|
||
+ */
|
||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||
- xfs_trans_ijoin(tp, ip, 0);
|
||
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
||
|
||
error = xfs_ifree(tp, ip);
|
||
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||
if (error) {
|
||
/*
|
||
* If we fail to free the inode, shut down. The cancel
|
||
@@ -1756,7 +1777,6 @@ xfs_inactive_ifree(
|
||
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
|
||
}
|
||
xfs_trans_cancel(tp);
|
||
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||
return error;
|
||
}
|
||
|
||
@@ -1774,7 +1794,6 @@ xfs_inactive_ifree(
|
||
xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
|
||
__func__, error);
|
||
|
||
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||
return 0;
|
||
}
|
||
|
||
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
|
||
index 4fc9a43ac45a8..aafd07388eb7b 100644
|
||
--- a/include/drm/drm_modeset_lock.h
|
||
+++ b/include/drm/drm_modeset_lock.h
|
||
@@ -164,6 +164,8 @@ int drm_modeset_lock_all_ctx(struct drm_device *dev,
|
||
* is 0, so no error checking is necessary
|
||
*/
|
||
#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \
|
||
+ if (!drm_drv_uses_atomic_modeset(dev)) \
|
||
+ mutex_lock(&dev->mode_config.mutex); \
|
||
drm_modeset_acquire_init(&ctx, flags); \
|
||
modeset_lock_retry: \
|
||
ret = drm_modeset_lock_all_ctx(dev, &ctx); \
|
||
@@ -172,6 +174,7 @@ modeset_lock_retry: \
|
||
|
||
/**
|
||
* DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks
|
||
+ * @dev: drm device
|
||
* @ctx: local modeset acquire context, will be dereferenced
|
||
* @ret: local ret/err/etc variable to track error status
|
||
*
|
||
@@ -188,7 +191,7 @@ modeset_lock_retry: \
|
||
* to that failure. In both of these cases the code between BEGIN/END will not
|
||
* be run, so the failure will reflect the inability to grab the locks.
|
||
*/
|
||
-#define DRM_MODESET_LOCK_ALL_END(ctx, ret) \
|
||
+#define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret) \
|
||
modeset_lock_fail: \
|
||
if (ret == -EDEADLK) { \
|
||
ret = drm_modeset_backoff(&ctx); \
|
||
@@ -196,6 +199,8 @@ modeset_lock_fail: \
|
||
goto modeset_lock_retry; \
|
||
} \
|
||
drm_modeset_drop_locks(&ctx); \
|
||
- drm_modeset_acquire_fini(&ctx);
|
||
+ drm_modeset_acquire_fini(&ctx); \
|
||
+ if (!drm_drv_uses_atomic_modeset(dev)) \
|
||
+ mutex_unlock(&dev->mode_config.mutex);
|
||
|
||
#endif /* DRM_MODESET_LOCK_H_ */
|
||
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
|
||
index ab2e20cba9514..ba22952c24e24 100644
|
||
--- a/include/linux/dma-direct.h
|
||
+++ b/include/linux/dma-direct.h
|
||
@@ -67,9 +67,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
|
||
}
|
||
|
||
u64 dma_direct_get_required_mask(struct device *dev);
|
||
-gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||
- u64 *phys_mask);
|
||
-bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
|
||
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||
gfp_t gfp, unsigned long attrs);
|
||
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
|
||
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
|
||
index a33ed3954ed46..0dc08701d7b7e 100644
|
||
--- a/include/linux/dma-mapping.h
|
||
+++ b/include/linux/dma-mapping.h
|
||
@@ -715,8 +715,9 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
|
||
pgprot_t prot, const void *caller);
|
||
void dma_common_free_remap(void *cpu_addr, size_t size);
|
||
|
||
-void *dma_alloc_from_pool(struct device *dev, size_t size,
|
||
- struct page **ret_page, gfp_t flags);
|
||
+struct page *dma_alloc_from_pool(struct device *dev, size_t size,
|
||
+ void **cpu_addr, gfp_t flags,
|
||
+ bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
|
||
bool dma_free_from_pool(struct device *dev, void *start, size_t size);
|
||
|
||
int
|
||
diff --git a/include/linux/efi.h b/include/linux/efi.h
|
||
index 05c47f857383e..73db1ae04cef8 100644
|
||
--- a/include/linux/efi.h
|
||
+++ b/include/linux/efi.h
|
||
@@ -606,7 +606,11 @@ extern void *efi_get_pal_addr (void);
|
||
extern void efi_map_pal_code (void);
|
||
extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
|
||
extern void efi_gettimeofday (struct timespec64 *ts);
|
||
+#ifdef CONFIG_EFI
|
||
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
|
||
+#else
|
||
+static inline void efi_enter_virtual_mode (void) {}
|
||
+#endif
|
||
#ifdef CONFIG_X86
|
||
extern efi_status_t efi_query_variable_store(u32 attributes,
|
||
unsigned long size,
|
||
diff --git a/include/linux/fb.h b/include/linux/fb.h
|
||
index 3b4b2f0c6994d..b11eb02cad6d3 100644
|
||
--- a/include/linux/fb.h
|
||
+++ b/include/linux/fb.h
|
||
@@ -400,8 +400,6 @@ struct fb_tile_ops {
|
||
#define FBINFO_HWACCEL_YPAN 0x2000 /* optional */
|
||
#define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */
|
||
|
||
-#define FBINFO_MISC_USEREVENT 0x10000 /* event request
|
||
- from userspace */
|
||
#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */
|
||
|
||
/* A driver may set this flag to indicate that it does want a set_par to be
|
||
diff --git a/include/linux/fs.h b/include/linux/fs.h
|
||
index 2dab217c6047f..ac1e89872db4f 100644
|
||
--- a/include/linux/fs.h
|
||
+++ b/include/linux/fs.h
|
||
@@ -2168,6 +2168,10 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
|
||
*
|
||
* I_DONTCACHE Evict inode as soon as it is not used anymore.
|
||
*
|
||
+ * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
|
||
+ * Used to detect that mark_inode_dirty() should not move
|
||
+ * inode between dirty lists.
|
||
+ *
|
||
* Q: What is the difference between I_WILL_FREE and I_FREEING?
|
||
*/
|
||
#define I_DIRTY_SYNC (1 << 0)
|
||
@@ -2185,12 +2189,12 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
|
||
#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
|
||
#define I_LINKABLE (1 << 10)
|
||
#define I_DIRTY_TIME (1 << 11)
|
||
-#define __I_DIRTY_TIME_EXPIRED 12
|
||
-#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
|
||
+#define I_DIRTY_TIME_EXPIRED (1 << 12)
|
||
#define I_WB_SWITCH (1 << 13)
|
||
#define I_OVL_INUSE (1 << 14)
|
||
#define I_CREATING (1 << 15)
|
||
#define I_DONTCACHE (1 << 16)
|
||
+#define I_SYNC_QUEUED (1 << 17)
|
||
|
||
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
|
||
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
|
||
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
|
||
index aac42c28fe62d..9b67394471e1c 100644
|
||
--- a/include/linux/netfilter_ipv6.h
|
||
+++ b/include/linux/netfilter_ipv6.h
|
||
@@ -58,7 +58,6 @@ struct nf_ipv6_ops {
|
||
int (*output)(struct net *, struct sock *, struct sk_buff *));
|
||
int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
|
||
#if IS_MODULE(CONFIG_IPV6)
|
||
- int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user);
|
||
int (*br_fragment)(struct net *net, struct sock *sk,
|
||
struct sk_buff *skb,
|
||
struct nf_bridge_frag_data *data,
|
||
@@ -117,23 +116,6 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
|
||
|
||
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
|
||
|
||
-static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb,
|
||
- u32 user)
|
||
-{
|
||
-#if IS_MODULE(CONFIG_IPV6)
|
||
- const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
|
||
-
|
||
- if (!v6_ops)
|
||
- return 1;
|
||
-
|
||
- return v6_ops->br_defrag(net, skb, user);
|
||
-#elif IS_BUILTIN(CONFIG_IPV6)
|
||
- return nf_ct_frag6_gather(net, skb, user);
|
||
-#else
|
||
- return 1;
|
||
-#endif
|
||
-}
|
||
-
|
||
int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||
struct nf_bridge_frag_data *data,
|
||
int (*output)(struct net *, struct sock *sk,
|
||
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
|
||
index cc41d692ae8ed..628db6a07fda0 100644
|
||
--- a/include/trace/events/ext4.h
|
||
+++ b/include/trace/events/ext4.h
|
||
@@ -746,24 +746,29 @@ TRACE_EVENT(ext4_mb_release_group_pa,
|
||
);
|
||
|
||
TRACE_EVENT(ext4_discard_preallocations,
|
||
- TP_PROTO(struct inode *inode),
|
||
+ TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed),
|
||
|
||
- TP_ARGS(inode),
|
||
+ TP_ARGS(inode, len, needed),
|
||
|
||
TP_STRUCT__entry(
|
||
- __field( dev_t, dev )
|
||
- __field( ino_t, ino )
|
||
+ __field( dev_t, dev )
|
||
+ __field( ino_t, ino )
|
||
+ __field( unsigned int, len )
|
||
+ __field( unsigned int, needed )
|
||
|
||
),
|
||
|
||
TP_fast_assign(
|
||
__entry->dev = inode->i_sb->s_dev;
|
||
__entry->ino = inode->i_ino;
|
||
+ __entry->len = len;
|
||
+ __entry->needed = needed;
|
||
),
|
||
|
||
- TP_printk("dev %d,%d ino %lu",
|
||
+ TP_printk("dev %d,%d ino %lu len: %u needed %u",
|
||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||
- (unsigned long) __entry->ino)
|
||
+ (unsigned long) __entry->ino, __entry->len,
|
||
+ __entry->needed)
|
||
);
|
||
|
||
TRACE_EVENT(ext4_mb_discard_preallocations,
|
||
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
|
||
index 10f5d1fa73476..7565dcd596973 100644
|
||
--- a/include/trace/events/writeback.h
|
||
+++ b/include/trace/events/writeback.h
|
||
@@ -498,8 +498,9 @@ DEFINE_WBC_EVENT(wbc_writepage);
|
||
TRACE_EVENT(writeback_queue_io,
|
||
TP_PROTO(struct bdi_writeback *wb,
|
||
struct wb_writeback_work *work,
|
||
+ unsigned long dirtied_before,
|
||
int moved),
|
||
- TP_ARGS(wb, work, moved),
|
||
+ TP_ARGS(wb, work, dirtied_before, moved),
|
||
TP_STRUCT__entry(
|
||
__array(char, name, 32)
|
||
__field(unsigned long, older)
|
||
@@ -509,19 +510,17 @@ TRACE_EVENT(writeback_queue_io,
|
||
__field(ino_t, cgroup_ino)
|
||
),
|
||
TP_fast_assign(
|
||
- unsigned long *older_than_this = work->older_than_this;
|
||
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
|
||
- __entry->older = older_than_this ? *older_than_this : 0;
|
||
- __entry->age = older_than_this ?
|
||
- (jiffies - *older_than_this) * 1000 / HZ : -1;
|
||
+ __entry->older = dirtied_before;
|
||
+ __entry->age = (jiffies - dirtied_before) * 1000 / HZ;
|
||
__entry->moved = moved;
|
||
__entry->reason = work->reason;
|
||
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
|
||
),
|
||
TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
|
||
__entry->name,
|
||
- __entry->older, /* older_than_this in jiffies */
|
||
- __entry->age, /* older_than_this in relative milliseconds */
|
||
+ __entry->older, /* dirtied_before in jiffies */
|
||
+ __entry->age, /* dirtied_before in relative milliseconds */
|
||
__entry->moved,
|
||
__print_symbolic(__entry->reason, WB_WORK_REASON),
|
||
(unsigned long)__entry->cgroup_ino
|
||
diff --git a/kernel/Makefile b/kernel/Makefile
|
||
index f3218bc5ec69f..155b5380500ad 100644
|
||
--- a/kernel/Makefile
|
||
+++ b/kernel/Makefile
|
||
@@ -125,6 +125,7 @@ obj-$(CONFIG_WATCH_QUEUE) += watch_queue.o
|
||
|
||
obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o
|
||
|
||
+CFLAGS_stackleak.o += $(DISABLE_STACKLEAK_PLUGIN)
|
||
obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
|
||
KASAN_SANITIZE_stackleak.o := n
|
||
KCSAN_SANITIZE_stackleak.o := n
|
||
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
|
||
index dd612b80b9fea..3c18090cd73dc 100644
|
||
--- a/kernel/bpf/bpf_iter.c
|
||
+++ b/kernel/bpf/bpf_iter.c
|
||
@@ -64,6 +64,9 @@ static void bpf_iter_done_stop(struct seq_file *seq)
|
||
iter_priv->done_stop = true;
|
||
}
|
||
|
||
+/* maximum visited objects before bailing out */
|
||
+#define MAX_ITER_OBJECTS 1000000
|
||
+
|
||
/* bpf_seq_read, a customized and simpler version for bpf iterator.
|
||
* no_llseek is assumed for this file.
|
||
* The following are differences from seq_read():
|
||
@@ -76,7 +79,7 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
|
||
{
|
||
struct seq_file *seq = file->private_data;
|
||
size_t n, offs, copied = 0;
|
||
- int err = 0;
|
||
+ int err = 0, num_objs = 0;
|
||
void *p;
|
||
|
||
mutex_lock(&seq->lock);
|
||
@@ -132,6 +135,7 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
|
||
while (1) {
|
||
loff_t pos = seq->index;
|
||
|
||
+ num_objs++;
|
||
offs = seq->count;
|
||
p = seq->op->next(seq, p, &seq->index);
|
||
if (pos == seq->index) {
|
||
@@ -150,6 +154,15 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
|
||
if (seq->count >= size)
|
||
break;
|
||
|
||
+ if (num_objs >= MAX_ITER_OBJECTS) {
|
||
+ if (offs == 0) {
|
||
+ err = -EAGAIN;
|
||
+ seq->op->stop(seq, p);
|
||
+ goto done;
|
||
+ }
|
||
+ break;
|
||
+ }
|
||
+
|
||
err = seq->op->show(seq, p);
|
||
if (err > 0) {
|
||
bpf_iter_dec_seq_num(seq);
|
||
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
|
||
index a4a0fb4f94cc1..323def936be24 100644
|
||
--- a/kernel/bpf/task_iter.c
|
||
+++ b/kernel/bpf/task_iter.c
|
||
@@ -28,8 +28,9 @@ static struct task_struct *task_seq_get_next(struct pid_namespace *ns,
|
||
|
||
rcu_read_lock();
|
||
retry:
|
||
- pid = idr_get_next(&ns->idr, tid);
|
||
+ pid = find_ge_pid(*tid, ns);
|
||
if (pid) {
|
||
+ *tid = pid_nr_ns(pid, ns);
|
||
task = get_pid_task(pid, PIDTYPE_PID);
|
||
if (!task) {
|
||
++*tid;
|
||
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
|
||
index 67f060b86a73f..f17aec9d01f0c 100644
|
||
--- a/kernel/dma/direct.c
|
||
+++ b/kernel/dma/direct.c
|
||
@@ -45,7 +45,7 @@ u64 dma_direct_get_required_mask(struct device *dev)
|
||
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
|
||
}
|
||
|
||
-gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||
+static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||
u64 *phys_limit)
|
||
{
|
||
u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
|
||
@@ -70,7 +70,7 @@ gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||
return 0;
|
||
}
|
||
|
||
-bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
||
+static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
||
{
|
||
return phys_to_dma_direct(dev, phys) + size - 1 <=
|
||
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
|
||
@@ -163,8 +163,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||
size = PAGE_ALIGN(size);
|
||
|
||
if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
|
||
- ret = dma_alloc_from_pool(dev, size, &page, gfp);
|
||
- if (!ret)
|
||
+ u64 phys_mask;
|
||
+
|
||
+ gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||
+ &phys_mask);
|
||
+ page = dma_alloc_from_pool(dev, size, &ret, gfp,
|
||
+ dma_coherent_ok);
|
||
+ if (!page)
|
||
return NULL;
|
||
goto done;
|
||
}
|
||
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
|
||
index 6bc74a2d51273..1281c0f0442bc 100644
|
||
--- a/kernel/dma/pool.c
|
||
+++ b/kernel/dma/pool.c
|
||
@@ -3,7 +3,9 @@
|
||
* Copyright (C) 2012 ARM Ltd.
|
||
* Copyright (C) 2020 Google LLC
|
||
*/
|
||
+#include <linux/cma.h>
|
||
#include <linux/debugfs.h>
|
||
+#include <linux/dma-contiguous.h>
|
||
#include <linux/dma-direct.h>
|
||
#include <linux/dma-noncoherent.h>
|
||
#include <linux/init.h>
|
||
@@ -55,11 +57,34 @@ static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
|
||
pool_size_kernel += size;
|
||
}
|
||
|
||
+static bool cma_in_zone(gfp_t gfp)
|
||
+{
|
||
+ unsigned long size;
|
||
+ phys_addr_t end;
|
||
+ struct cma *cma;
|
||
+
|
||
+ cma = dev_get_cma_area(NULL);
|
||
+ if (!cma)
|
||
+ return false;
|
||
+
|
||
+ size = cma_get_size(cma);
|
||
+ if (!size)
|
||
+ return false;
|
||
+
|
||
+ /* CMA can't cross zone boundaries, see cma_activate_area() */
|
||
+ end = cma_get_base(cma) + size - 1;
|
||
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
|
||
+ return end <= DMA_BIT_MASK(zone_dma_bits);
|
||
+ if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
|
||
+ return end <= DMA_BIT_MASK(32);
|
||
+ return true;
|
||
+}
|
||
+
|
||
static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
|
||
gfp_t gfp)
|
||
{
|
||
unsigned int order;
|
||
- struct page *page;
|
||
+ struct page *page = NULL;
|
||
void *addr;
|
||
int ret = -ENOMEM;
|
||
|
||
@@ -68,7 +93,11 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
|
||
|
||
do {
|
||
pool_size = 1 << (PAGE_SHIFT + order);
|
||
- page = alloc_pages(gfp, order);
|
||
+ if (cma_in_zone(gfp))
|
||
+ page = dma_alloc_from_contiguous(NULL, 1 << order,
|
||
+ order, false);
|
||
+ if (!page)
|
||
+ page = alloc_pages(gfp, order);
|
||
} while (!page && order-- > 0);
|
||
if (!page)
|
||
goto out;
|
||
@@ -196,93 +225,75 @@ static int __init dma_atomic_pool_init(void)
|
||
}
|
||
postcore_initcall(dma_atomic_pool_init);
|
||
|
||
-static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev)
|
||
+static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
|
||
{
|
||
- u64 phys_mask;
|
||
- gfp_t gfp;
|
||
-
|
||
- gfp = dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||
- &phys_mask);
|
||
- if (IS_ENABLED(CONFIG_ZONE_DMA) && gfp == GFP_DMA)
|
||
+ if (prev == NULL) {
|
||
+ if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
|
||
+ return atomic_pool_dma32;
|
||
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
|
||
+ return atomic_pool_dma;
|
||
+ return atomic_pool_kernel;
|
||
+ }
|
||
+ if (prev == atomic_pool_kernel)
|
||
+ return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
|
||
+ if (prev == atomic_pool_dma32)
|
||
return atomic_pool_dma;
|
||
- if (IS_ENABLED(CONFIG_ZONE_DMA32) && gfp == GFP_DMA32)
|
||
- return atomic_pool_dma32;
|
||
- return atomic_pool_kernel;
|
||
+ return NULL;
|
||
}
|
||
|
||
-static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool)
|
||
+static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
|
||
+ struct gen_pool *pool, void **cpu_addr,
|
||
+ bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
|
||
{
|
||
- if (bad_pool == atomic_pool_kernel)
|
||
- return atomic_pool_dma32 ? : atomic_pool_dma;
|
||
+ unsigned long addr;
|
||
+ phys_addr_t phys;
|
||
|
||
- if (bad_pool == atomic_pool_dma32)
|
||
- return atomic_pool_dma;
|
||
+ addr = gen_pool_alloc(pool, size);
|
||
+ if (!addr)
|
||
+ return NULL;
|
||
|
||
- return NULL;
|
||
-}
|
||
+ phys = gen_pool_virt_to_phys(pool, addr);
|
||
+ if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
|
||
+ gen_pool_free(pool, addr, size);
|
||
+ return NULL;
|
||
+ }
|
||
|
||
-static inline struct gen_pool *dma_guess_pool(struct device *dev,
|
||
- struct gen_pool *bad_pool)
|
||
-{
|
||
- if (bad_pool)
|
||
- return dma_get_safer_pool(bad_pool);
|
||
+ if (gen_pool_avail(pool) < atomic_pool_size)
|
||
+ schedule_work(&atomic_pool_work);
|
||
|
||
- return dma_guess_pool_from_device(dev);
|
||
+ *cpu_addr = (void *)addr;
|
||
+ memset(*cpu_addr, 0, size);
|
||
+ return pfn_to_page(__phys_to_pfn(phys));
|
||
}
|
||
|
||
-void *dma_alloc_from_pool(struct device *dev, size_t size,
|
||
- struct page **ret_page, gfp_t flags)
|
||
+struct page *dma_alloc_from_pool(struct device *dev, size_t size,
|
||
+ void **cpu_addr, gfp_t gfp,
|
||
+ bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
|
||
{
|
||
struct gen_pool *pool = NULL;
|
||
- unsigned long val = 0;
|
||
- void *ptr = NULL;
|
||
- phys_addr_t phys;
|
||
-
|
||
- while (1) {
|
||
- pool = dma_guess_pool(dev, pool);
|
||
- if (!pool) {
|
||
- WARN(1, "Failed to get suitable pool for %s\n",
|
||
- dev_name(dev));
|
||
- break;
|
||
- }
|
||
-
|
||
- val = gen_pool_alloc(pool, size);
|
||
- if (!val)
|
||
- continue;
|
||
-
|
||
- phys = gen_pool_virt_to_phys(pool, val);
|
||
- if (dma_coherent_ok(dev, phys, size))
|
||
- break;
|
||
-
|
||
- gen_pool_free(pool, val, size);
|
||
- val = 0;
|
||
- }
|
||
-
|
||
-
|
||
- if (val) {
|
||
- *ret_page = pfn_to_page(__phys_to_pfn(phys));
|
||
- ptr = (void *)val;
|
||
- memset(ptr, 0, size);
|
||
+ struct page *page;
|
||
|
||
- if (gen_pool_avail(pool) < atomic_pool_size)
|
||
- schedule_work(&atomic_pool_work);
|
||
+ while ((pool = dma_guess_pool(pool, gfp))) {
|
||
+ page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
|
||
+ phys_addr_ok);
|
||
+ if (page)
|
||
+ return page;
|
||
}
|
||
|
||
- return ptr;
|
||
+ WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
|
||
+ return NULL;
|
||
}
|
||
|
||
bool dma_free_from_pool(struct device *dev, void *start, size_t size)
|
||
{
|
||
struct gen_pool *pool = NULL;
|
||
|
||
- while (1) {
|
||
- pool = dma_guess_pool(dev, pool);
|
||
- if (!pool)
|
||
- return false;
|
||
-
|
||
- if (gen_pool_has_addr(pool, (unsigned long)start, size)) {
|
||
- gen_pool_free(pool, (unsigned long)start, size);
|
||
- return true;
|
||
- }
|
||
+ while ((pool = dma_guess_pool(pool, 0))) {
|
||
+ if (!gen_pool_has_addr(pool, (unsigned long)start, size))
|
||
+ continue;
|
||
+ gen_pool_free(pool, (unsigned long)start, size);
|
||
+ return true;
|
||
}
|
||
+
|
||
+ return false;
|
||
}
|
||
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
|
||
index 30cc217b86318..651a4ad6d711f 100644
|
||
--- a/kernel/irq/matrix.c
|
||
+++ b/kernel/irq/matrix.c
|
||
@@ -380,6 +380,13 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
|
||
unsigned int cpu, bit;
|
||
struct cpumap *cm;
|
||
|
||
+ /*
|
||
+ * Not required in theory, but matrix_find_best_cpu() uses
|
||
+ * for_each_cpu() which ignores the cpumask on UP .
|
||
+ */
|
||
+ if (cpumask_empty(msk))
|
||
+ return -EINVAL;
|
||
+
|
||
cpu = matrix_find_best_cpu(m, msk);
|
||
if (cpu == UINT_MAX)
|
||
return -ENOSPC;
|
||
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
|
||
index 5525cd3ba0c83..02ef87f50df29 100644
|
||
--- a/kernel/locking/lockdep_proc.c
|
||
+++ b/kernel/locking/lockdep_proc.c
|
||
@@ -423,7 +423,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
|
||
seq_time(m, lt->min);
|
||
seq_time(m, lt->max);
|
||
seq_time(m, lt->total);
|
||
- seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
|
||
+ seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
|
||
}
|
||
|
||
static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
|
||
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
|
||
index 588e8e3960197..1bd6563939e59 100644
|
||
--- a/kernel/trace/blktrace.c
|
||
+++ b/kernel/trace/blktrace.c
|
||
@@ -536,6 +536,18 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||
#endif
|
||
bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
|
||
|
||
+ /*
|
||
+ * As blktrace relies on debugfs for its interface the debugfs directory
|
||
+ * is required, contrary to the usual mantra of not checking for debugfs
|
||
+ * files or directories.
|
||
+ */
|
||
+ if (IS_ERR_OR_NULL(dir)) {
|
||
+ pr_warn("debugfs_dir not present for %s so skipping\n",
|
||
+ buts->name);
|
||
+ ret = -ENOENT;
|
||
+ goto err;
|
||
+ }
|
||
+
|
||
bt->dev = dev;
|
||
atomic_set(&bt->dropped, 0);
|
||
INIT_LIST_HEAD(&bt->running_list);
|
||
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
|
||
index 1d6a9b0b6a9fd..dd592ea9a4a06 100644
|
||
--- a/mm/khugepaged.c
|
||
+++ b/mm/khugepaged.c
|
||
@@ -431,7 +431,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
|
||
|
||
static inline int khugepaged_test_exit(struct mm_struct *mm)
|
||
{
|
||
- return atomic_read(&mm->mm_users) == 0;
|
||
+ return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
|
||
}
|
||
|
||
static bool hugepage_vma_check(struct vm_area_struct *vma,
|
||
@@ -1100,9 +1100,6 @@ static void collapse_huge_page(struct mm_struct *mm,
|
||
* handled by the anon_vma lock + PG_lock.
|
||
*/
|
||
mmap_write_lock(mm);
|
||
- result = SCAN_ANY_PROCESS;
|
||
- if (!mmget_still_valid(mm))
|
||
- goto out;
|
||
result = hugepage_vma_revalidate(mm, address, &vma);
|
||
if (result)
|
||
goto out;
|
||
diff --git a/mm/page_counter.c b/mm/page_counter.c
|
||
index b4663844c9b37..afe22ad335ccc 100644
|
||
--- a/mm/page_counter.c
|
||
+++ b/mm/page_counter.c
|
||
@@ -77,8 +77,8 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
|
||
* This is indeed racy, but we can live with some
|
||
* inaccuracy in the watermark.
|
||
*/
|
||
- if (new > c->watermark)
|
||
- c->watermark = new;
|
||
+ if (new > READ_ONCE(c->watermark))
|
||
+ WRITE_ONCE(c->watermark, new);
|
||
}
|
||
}
|
||
|
||
@@ -119,9 +119,10 @@ bool page_counter_try_charge(struct page_counter *counter,
|
||
propagate_protected_usage(c, new);
|
||
/*
|
||
* This is racy, but we can live with some
|
||
- * inaccuracy in the failcnt.
|
||
+ * inaccuracy in the failcnt which is only used
|
||
+ * to report stats.
|
||
*/
|
||
- c->failcnt++;
|
||
+ data_race(c->failcnt++);
|
||
*fail = c;
|
||
goto failed;
|
||
}
|
||
@@ -130,8 +131,8 @@ bool page_counter_try_charge(struct page_counter *counter,
|
||
* Just like with failcnt, we can live with some
|
||
* inaccuracy in the watermark.
|
||
*/
|
||
- if (new > c->watermark)
|
||
- c->watermark = new;
|
||
+ if (new > READ_ONCE(c->watermark))
|
||
+ WRITE_ONCE(c->watermark, new);
|
||
}
|
||
return true;
|
||
|
||
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
|
||
index 8096732223828..8d033a75a766e 100644
|
||
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
|
||
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
|
||
@@ -168,6 +168,7 @@ static unsigned int nf_ct_br_defrag4(struct sk_buff *skb,
|
||
static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
|
||
const struct nf_hook_state *state)
|
||
{
|
||
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
|
||
u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
|
||
enum ip_conntrack_info ctinfo;
|
||
struct br_input_skb_cb cb;
|
||
@@ -180,14 +181,17 @@ static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
|
||
|
||
br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm));
|
||
|
||
- err = nf_ipv6_br_defrag(state->net, skb,
|
||
- IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
|
||
+ err = nf_ct_frag6_gather(state->net, skb,
|
||
+ IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
|
||
/* queued */
|
||
if (err == -EINPROGRESS)
|
||
return NF_STOLEN;
|
||
|
||
br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size);
|
||
return err == 0 ? NF_ACCEPT : NF_DROP;
|
||
+#else
|
||
+ return NF_ACCEPT;
|
||
+#endif
|
||
}
|
||
|
||
static int nf_ct_br_ip_check(const struct sk_buff *skb)
|
||
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
|
||
index dbd215cbc53d8..a8dd956b5e8e1 100644
|
||
--- a/net/can/j1939/transport.c
|
||
+++ b/net/can/j1939/transport.c
|
||
@@ -1803,7 +1803,20 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
|
||
}
|
||
|
||
tpdat = se_skb->data;
|
||
- memcpy(&tpdat[offset], &dat[1], nbytes);
|
||
+ if (!session->transmission) {
|
||
+ memcpy(&tpdat[offset], &dat[1], nbytes);
|
||
+ } else {
|
||
+ int err;
|
||
+
|
||
+ err = memcmp(&tpdat[offset], &dat[1], nbytes);
|
||
+ if (err)
|
||
+ netdev_err_once(priv->ndev,
|
||
+ "%s: 0x%p: Data of RX-looped back packet (%*ph) doesn't match TX data (%*ph)!\n",
|
||
+ __func__, session,
|
||
+ nbytes, &dat[1],
|
||
+ nbytes, &tpdat[offset]);
|
||
+ }
|
||
+
|
||
if (packet == session->pkt.rx)
|
||
session->pkt.rx++;
|
||
|
||
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
|
||
index 409e79b84a830..6d0e942d082d4 100644
|
||
--- a/net/ipv6/netfilter.c
|
||
+++ b/net/ipv6/netfilter.c
|
||
@@ -245,9 +245,6 @@ static const struct nf_ipv6_ops ipv6ops = {
|
||
.route_input = ip6_route_input,
|
||
.fragment = ip6_fragment,
|
||
.reroute = nf_ip6_reroute,
|
||
-#if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
|
||
- .br_defrag = nf_ct_frag6_gather,
|
||
-#endif
|
||
#if IS_MODULE(CONFIG_IPV6)
|
||
.br_fragment = br_ip6_fragment,
|
||
#endif
|
||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
||
index 88325b264737f..d31832d32e028 100644
|
||
--- a/net/netfilter/nf_tables_api.c
|
||
+++ b/net/netfilter/nf_tables_api.c
|
||
@@ -2037,7 +2037,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
|
||
|
||
if (nla[NFTA_CHAIN_HOOK]) {
|
||
if (!nft_is_base_chain(chain))
|
||
- return -EBUSY;
|
||
+ return -EEXIST;
|
||
|
||
err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family,
|
||
false);
|
||
@@ -2047,21 +2047,21 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
|
||
basechain = nft_base_chain(chain);
|
||
if (basechain->type != hook.type) {
|
||
nft_chain_release_hook(&hook);
|
||
- return -EBUSY;
|
||
+ return -EEXIST;
|
||
}
|
||
|
||
if (ctx->family == NFPROTO_NETDEV) {
|
||
if (!nft_hook_list_equal(&basechain->hook_list,
|
||
&hook.list)) {
|
||
nft_chain_release_hook(&hook);
|
||
- return -EBUSY;
|
||
+ return -EEXIST;
|
||
}
|
||
} else {
|
||
ops = &basechain->ops;
|
||
if (ops->hooknum != hook.num ||
|
||
ops->priority != hook.priority) {
|
||
nft_chain_release_hook(&hook);
|
||
- return -EBUSY;
|
||
+ return -EEXIST;
|
||
}
|
||
}
|
||
nft_chain_release_hook(&hook);
|
||
@@ -5160,10 +5160,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
||
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^
|
||
nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) ||
|
||
nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^
|
||
- nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) {
|
||
- err = -EBUSY;
|
||
+ nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF))
|
||
goto err_element_clash;
|
||
- }
|
||
if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
|
||
nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) &&
|
||
memcmp(nft_set_ext_data(ext),
|
||
@@ -5171,7 +5169,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
||
(nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) &&
|
||
nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF) &&
|
||
*nft_set_ext_obj(ext) != *nft_set_ext_obj(ext2)))
|
||
- err = -EBUSY;
|
||
+ goto err_element_clash;
|
||
else if (!(nlmsg_flags & NLM_F_EXCL))
|
||
err = 0;
|
||
} else if (err == -ENOTEMPTY) {
|
||
@@ -6308,7 +6306,7 @@ static int nft_register_flowtable_net_hooks(struct net *net,
|
||
list_for_each_entry(hook2, &ft->hook_list, list) {
|
||
if (hook->ops.dev == hook2->ops.dev &&
|
||
hook->ops.pf == hook2->ops.pf) {
|
||
- err = -EBUSY;
|
||
+ err = -EEXIST;
|
||
goto err_unregister_net_hooks;
|
||
}
|
||
}
|
||
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
|
||
index 94b024534987a..03b81aa99975b 100644
|
||
--- a/net/openvswitch/datapath.c
|
||
+++ b/net/openvswitch/datapath.c
|
||
@@ -1736,6 +1736,7 @@ err:
|
||
/* Called with ovs_mutex. */
|
||
static void __dp_destroy(struct datapath *dp)
|
||
{
|
||
+ struct flow_table *table = &dp->table;
|
||
int i;
|
||
|
||
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
|
||
@@ -1754,7 +1755,14 @@ static void __dp_destroy(struct datapath *dp)
|
||
*/
|
||
ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
|
||
|
||
- /* RCU destroy the flow table */
|
||
+ /* Flush sw_flow in the tables. RCU cb only releases resource
|
||
+ * such as dp, ports and tables. That may avoid some issues
|
||
+ * such as RCU usage warning.
|
||
+ */
|
||
+ table_instance_flow_flush(table, ovsl_dereference(table->ti),
|
||
+ ovsl_dereference(table->ufid_ti));
|
||
+
|
||
+ /* RCU destroy the ports, meters and flow tables. */
|
||
call_rcu(&dp->rcu, destroy_dp_rcu);
|
||
}
|
||
|
||
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
|
||
index 2398d72383005..f198bbb0c517a 100644
|
||
--- a/net/openvswitch/flow_table.c
|
||
+++ b/net/openvswitch/flow_table.c
|
||
@@ -345,19 +345,15 @@ static void table_instance_flow_free(struct flow_table *table,
|
||
flow_mask_remove(table, flow->mask);
|
||
}
|
||
|
||
-static void table_instance_destroy(struct flow_table *table,
|
||
- struct table_instance *ti,
|
||
- struct table_instance *ufid_ti,
|
||
- bool deferred)
|
||
+/* Must be called with OVS mutex held. */
|
||
+void table_instance_flow_flush(struct flow_table *table,
|
||
+ struct table_instance *ti,
|
||
+ struct table_instance *ufid_ti)
|
||
{
|
||
int i;
|
||
|
||
- if (!ti)
|
||
- return;
|
||
-
|
||
- BUG_ON(!ufid_ti);
|
||
if (ti->keep_flows)
|
||
- goto skip_flows;
|
||
+ return;
|
||
|
||
for (i = 0; i < ti->n_buckets; i++) {
|
||
struct sw_flow *flow;
|
||
@@ -369,18 +365,16 @@ static void table_instance_destroy(struct flow_table *table,
|
||
|
||
table_instance_flow_free(table, ti, ufid_ti,
|
||
flow, false);
|
||
- ovs_flow_free(flow, deferred);
|
||
+ ovs_flow_free(flow, true);
|
||
}
|
||
}
|
||
+}
|
||
|
||
-skip_flows:
|
||
- if (deferred) {
|
||
- call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
|
||
- call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
|
||
- } else {
|
||
- __table_instance_destroy(ti);
|
||
- __table_instance_destroy(ufid_ti);
|
||
- }
|
||
+static void table_instance_destroy(struct table_instance *ti,
|
||
+ struct table_instance *ufid_ti)
|
||
+{
|
||
+ call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
|
||
+ call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
|
||
}
|
||
|
||
/* No need for locking this function is called from RCU callback or
|
||
@@ -393,7 +387,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
|
||
|
||
free_percpu(table->mask_cache);
|
||
kfree_rcu(rcu_dereference_raw(table->mask_array), rcu);
|
||
- table_instance_destroy(table, ti, ufid_ti, false);
|
||
+ table_instance_destroy(ti, ufid_ti);
|
||
}
|
||
|
||
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
|
||
@@ -511,7 +505,8 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
|
||
flow_table->count = 0;
|
||
flow_table->ufid_count = 0;
|
||
|
||
- table_instance_destroy(flow_table, old_ti, old_ufid_ti, true);
|
||
+ table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
|
||
+ table_instance_destroy(old_ti, old_ufid_ti);
|
||
return 0;
|
||
|
||
err_free_ti:
|
||
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
|
||
index 8a5cea6ae1116..8ea8fc9573776 100644
|
||
--- a/net/openvswitch/flow_table.h
|
||
+++ b/net/openvswitch/flow_table.h
|
||
@@ -86,4 +86,7 @@ bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *);
|
||
|
||
void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
|
||
bool full, const struct sw_flow_mask *mask);
|
||
+void table_instance_flow_flush(struct flow_table *table,
|
||
+ struct table_instance *ti,
|
||
+ struct table_instance *ufid_ti);
|
||
#endif /* flow_table.h */
|
||
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
|
||
index a080d63a9b456..4490dd7469d99 100644
|
||
--- a/sound/pci/cs46xx/cs46xx_lib.c
|
||
+++ b/sound/pci/cs46xx/cs46xx_lib.c
|
||
@@ -766,7 +766,7 @@ static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned
|
||
rate = 48000 / 9;
|
||
|
||
/*
|
||
- * We can not capture at at rate greater than the Input Rate (48000).
|
||
+ * We can not capture at a rate greater than the Input Rate (48000).
|
||
* Return an error if an attempt is made to stray outside that limit.
|
||
*/
|
||
if (rate > 48000)
|
||
diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c
|
||
index 6b536fc23ca62..1f90ca723f4df 100644
|
||
--- a/sound/pci/cs46xx/dsp_spos_scb_lib.c
|
||
+++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c
|
||
@@ -1716,7 +1716,7 @@ int cs46xx_iec958_pre_open (struct snd_cs46xx *chip)
|
||
struct dsp_spos_instance * ins = chip->dsp_spos_instance;
|
||
|
||
if ( ins->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED ) {
|
||
- /* remove AsynchFGTxSCB and and PCMSerialInput_II */
|
||
+ /* remove AsynchFGTxSCB and PCMSerialInput_II */
|
||
cs46xx_dsp_disable_spdif_out (chip);
|
||
|
||
/* save state */
|
||
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
|
||
index 803978d69e3c4..ea7f16dd1f73c 100644
|
||
--- a/sound/pci/hda/hda_codec.c
|
||
+++ b/sound/pci/hda/hda_codec.c
|
||
@@ -3427,7 +3427,7 @@ EXPORT_SYMBOL_GPL(snd_hda_set_power_save);
|
||
* @nid: NID to check / update
|
||
*
|
||
* Check whether the given NID is in the amp list. If it's in the list,
|
||
- * check the current AMP status, and update the the power-status according
|
||
+ * check the current AMP status, and update the power-status according
|
||
* to the mute status.
|
||
*
|
||
* This function is supposed to be set or called from the check_power_status
|
||
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
|
||
index f4e9d9445e18f..201a3b6b0b0f6 100644
|
||
--- a/sound/pci/hda/hda_generic.c
|
||
+++ b/sound/pci/hda/hda_generic.c
|
||
@@ -813,7 +813,7 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
|
||
}
|
||
}
|
||
|
||
-/* sync power of each widget in the the given path */
|
||
+/* sync power of each widget in the given path */
|
||
static hda_nid_t path_power_update(struct hda_codec *codec,
|
||
struct nid_path *path,
|
||
bool allow_powerdown)
|
||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
|
||
index 4c23b169ac67e..1a26940a3fd7c 100644
|
||
--- a/sound/pci/hda/hda_intel.c
|
||
+++ b/sound/pci/hda/hda_intel.c
|
||
@@ -2747,6 +2747,8 @@ static const struct pci_device_id azx_ids[] = {
|
||
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
|
||
/* Zhaoxin */
|
||
{ PCI_DEVICE(0x1d17, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN },
|
||
+ /* Loongson */
|
||
+ { PCI_DEVICE(0x0014, 0x7a07), .driver_data = AZX_DRIVER_GENERIC },
|
||
{ 0, }
|
||
};
|
||
MODULE_DEVICE_TABLE(pci, azx_ids);
|
||
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
|
||
index cd46247988e4d..f0c6d2907e396 100644
|
||
--- a/sound/pci/hda/patch_hdmi.c
|
||
+++ b/sound/pci/hda/patch_hdmi.c
|
||
@@ -160,6 +160,7 @@ struct hdmi_spec {
|
||
|
||
bool use_acomp_notifier; /* use eld_notify callback for hotplug */
|
||
bool acomp_registered; /* audio component registered in this driver */
|
||
+ bool force_connect; /* force connectivity */
|
||
struct drm_audio_component_audio_ops drm_audio_ops;
|
||
int (*port2pin)(struct hda_codec *, int); /* reverse port/pin mapping */
|
||
|
||
@@ -1701,7 +1702,8 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
|
||
* all device entries on the same pin
|
||
*/
|
||
config = snd_hda_codec_get_pincfg(codec, pin_nid);
|
||
- if (get_defcfg_connect(config) == AC_JACK_PORT_NONE)
|
||
+ if (get_defcfg_connect(config) == AC_JACK_PORT_NONE &&
|
||
+ !spec->force_connect)
|
||
return 0;
|
||
|
||
/*
|
||
@@ -1803,11 +1805,19 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
|
||
return 0;
|
||
}
|
||
|
||
+static const struct snd_pci_quirk force_connect_list[] = {
|
||
+ SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
|
||
+ SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
|
||
+ {}
|
||
+};
|
||
+
|
||
static int hdmi_parse_codec(struct hda_codec *codec)
|
||
{
|
||
+ struct hdmi_spec *spec = codec->spec;
|
||
hda_nid_t start_nid;
|
||
unsigned int caps;
|
||
int i, nodes;
|
||
+ const struct snd_pci_quirk *q;
|
||
|
||
nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &start_nid);
|
||
if (!start_nid || nodes < 0) {
|
||
@@ -1815,6 +1825,11 @@ static int hdmi_parse_codec(struct hda_codec *codec)
|
||
return -EINVAL;
|
||
}
|
||
|
||
+ q = snd_pci_quirk_lookup(codec->bus->pci, force_connect_list);
|
||
+
|
||
+ if (q && q->value)
|
||
+ spec->force_connect = true;
|
||
+
|
||
/*
|
||
* hdmi_add_pin() assumes total amount of converters to
|
||
* be known, so first discover all converters
|
||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
||
index b10d005786d07..da23c2d4ca51e 100644
|
||
--- a/sound/pci/hda/patch_realtek.c
|
||
+++ b/sound/pci/hda/patch_realtek.c
|
||
@@ -6167,6 +6167,7 @@ enum {
|
||
ALC269_FIXUP_CZC_L101,
|
||
ALC269_FIXUP_LEMOTE_A1802,
|
||
ALC269_FIXUP_LEMOTE_A190X,
|
||
+ ALC256_FIXUP_INTEL_NUC8_RUGGED,
|
||
};
|
||
|
||
static const struct hda_fixup alc269_fixups[] = {
|
||
@@ -7488,6 +7489,15 @@ static const struct hda_fixup alc269_fixups[] = {
|
||
},
|
||
.chain_id = ALC269_FIXUP_DMIC,
|
||
},
|
||
+ [ALC256_FIXUP_INTEL_NUC8_RUGGED] = {
|
||
+ .type = HDA_FIXUP_PINS,
|
||
+ .v.pins = (const struct hda_pintbl[]) {
|
||
+ { 0x1b, 0x01a1913c }, /* use as headset mic, without its own jack detect */
|
||
+ { }
|
||
+ },
|
||
+ .chained = true,
|
||
+ .chain_id = ALC269_FIXUP_HEADSET_MODE
|
||
+ },
|
||
};
|
||
|
||
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||
@@ -7787,6 +7797,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||
SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
|
||
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
|
||
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
|
||
+ SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
|
||
|
||
#if 0
|
||
/* Below is a quirk table taken from the old code.
|
||
@@ -7958,6 +7969,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
|
||
{.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
|
||
{.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
|
||
{.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
|
||
+ {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
|
||
{}
|
||
};
|
||
#define ALC225_STANDARD_PINS \
|
||
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
|
||
index a608d0486ae49..2bea11d62d3e9 100644
|
||
--- a/sound/pci/hda/patch_sigmatel.c
|
||
+++ b/sound/pci/hda/patch_sigmatel.c
|
||
@@ -832,7 +832,7 @@ static int stac_auto_create_beep_ctls(struct hda_codec *codec,
|
||
static const struct snd_kcontrol_new beep_vol_ctl =
|
||
HDA_CODEC_VOLUME(NULL, 0, 0, 0);
|
||
|
||
- /* check for mute support for the the amp */
|
||
+ /* check for mute support for the amp */
|
||
if ((caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT) {
|
||
const struct snd_kcontrol_new *temp;
|
||
if (spec->anabeep_nid == nid)
|
||
diff --git a/sound/pci/ice1712/prodigy192.c b/sound/pci/ice1712/prodigy192.c
|
||
index 8df14f63b10df..096ec76f53046 100644
|
||
--- a/sound/pci/ice1712/prodigy192.c
|
||
+++ b/sound/pci/ice1712/prodigy192.c
|
||
@@ -32,7 +32,7 @@
|
||
* Experimentally I found out that only a combination of
|
||
* OCKS0=1, OCKS1=1 (128fs, 64fs output) and ice1724 -
|
||
* VT1724_MT_I2S_MCLK_128X=0 (256fs input) yields correct
|
||
- * sampling rate. That means the the FPGA doubles the
|
||
+ * sampling rate. That means that the FPGA doubles the
|
||
* MCK01 rate.
|
||
*
|
||
* Copyright (c) 2003 Takashi Iwai <tiwai@suse.de>
|
||
diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
|
||
index c3f8721624cd4..b90421a1d909a 100644
|
||
--- a/sound/pci/oxygen/xonar_dg.c
|
||
+++ b/sound/pci/oxygen/xonar_dg.c
|
||
@@ -29,7 +29,7 @@
|
||
* GPIO 4 <- headphone detect
|
||
* GPIO 5 -> enable ADC analog circuit for the left channel
|
||
* GPIO 6 -> enable ADC analog circuit for the right channel
|
||
- * GPIO 7 -> switch green rear output jack between CS4245 and and the first
|
||
+ * GPIO 7 -> switch green rear output jack between CS4245 and the first
|
||
* channel of CS4361 (mechanical relay)
|
||
* GPIO 8 -> enable output to speakers
|
||
*
|
||
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
|
||
index ca42445b649d4..b471892d84778 100644
|
||
--- a/sound/soc/codecs/wm8958-dsp2.c
|
||
+++ b/sound/soc/codecs/wm8958-dsp2.c
|
||
@@ -412,8 +412,12 @@ int wm8958_aif_ev(struct snd_soc_dapm_widget *w,
|
||
struct snd_kcontrol *kcontrol, int event)
|
||
{
|
||
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
|
||
+ struct wm8994 *control = dev_get_drvdata(component->dev->parent);
|
||
int i;
|
||
|
||
+ if (control->type != WM8958)
|
||
+ return 0;
|
||
+
|
||
switch (event) {
|
||
case SND_SOC_DAPM_POST_PMU:
|
||
case SND_SOC_DAPM_PRE_PMU:
|
||
diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c
|
||
index e30b66b94bf67..0843235d73c91 100644
|
||
--- a/sound/soc/img/img-i2s-in.c
|
||
+++ b/sound/soc/img/img-i2s-in.c
|
||
@@ -343,8 +343,10 @@ static int img_i2s_in_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
|
||
chan_control_mask = IMG_I2S_IN_CH_CTL_CLK_TRANS_MASK;
|
||
|
||
ret = pm_runtime_get_sync(i2s->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_noidle(i2s->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
for (i = 0; i < i2s->active_channels; i++)
|
||
img_i2s_in_ch_disable(i2s, i);
|
||
diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c
|
||
index 5ddbe3a31c2e9..4da49a42e8547 100644
|
||
--- a/sound/soc/img/img-parallel-out.c
|
||
+++ b/sound/soc/img/img-parallel-out.c
|
||
@@ -163,8 +163,10 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
|
||
}
|
||
|
||
ret = pm_runtime_get_sync(prl->dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put_noidle(prl->dev);
|
||
return ret;
|
||
+ }
|
||
|
||
reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL);
|
||
reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set;
|
||
diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.h b/sound/soc/intel/boards/skl_hda_dsp_common.h
|
||
index 507750ef67f30..4b0b3959182e5 100644
|
||
--- a/sound/soc/intel/boards/skl_hda_dsp_common.h
|
||
+++ b/sound/soc/intel/boards/skl_hda_dsp_common.h
|
||
@@ -33,6 +33,7 @@ struct skl_hda_private {
|
||
int dai_index;
|
||
const char *platform_name;
|
||
bool common_hdmi_codec_drv;
|
||
+ bool idisp_codec;
|
||
};
|
||
|
||
extern struct snd_soc_dai_link skl_hda_be_dai_links[HDA_DSP_MAX_BE_DAI_LINKS];
|
||
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
|
||
index 79c8947f840b9..ca4900036ead9 100644
|
||
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
|
||
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
|
||
@@ -79,6 +79,9 @@ skl_hda_add_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *link)
|
||
link->platforms->name = ctx->platform_name;
|
||
link->nonatomic = 1;
|
||
|
||
+ if (!ctx->idisp_codec)
|
||
+ return 0;
|
||
+
|
||
if (strstr(link->name, "HDMI")) {
|
||
ret = skl_hda_hdmi_add_pcm(card, ctx->pcm_count);
|
||
|
||
@@ -118,19 +121,20 @@ static char hda_soc_components[30];
|
||
static int skl_hda_fill_card_info(struct snd_soc_acpi_mach_params *mach_params)
|
||
{
|
||
struct snd_soc_card *card = &hda_soc_card;
|
||
+ struct skl_hda_private *ctx = snd_soc_card_get_drvdata(card);
|
||
struct snd_soc_dai_link *dai_link;
|
||
- u32 codec_count, codec_mask, idisp_mask;
|
||
+ u32 codec_count, codec_mask;
|
||
int i, num_links, num_route;
|
||
|
||
codec_mask = mach_params->codec_mask;
|
||
codec_count = hweight_long(codec_mask);
|
||
- idisp_mask = codec_mask & IDISP_CODEC_MASK;
|
||
+ ctx->idisp_codec = !!(codec_mask & IDISP_CODEC_MASK);
|
||
|
||
if (!codec_count || codec_count > 2 ||
|
||
- (codec_count == 2 && !idisp_mask))
|
||
+ (codec_count == 2 && !ctx->idisp_codec))
|
||
return -EINVAL;
|
||
|
||
- if (codec_mask == idisp_mask) {
|
||
+ if (codec_mask == IDISP_CODEC_MASK) {
|
||
/* topology with iDisp as the only HDA codec */
|
||
num_links = IDISP_DAI_COUNT + DMIC_DAI_COUNT;
|
||
num_route = IDISP_ROUTE_COUNT;
|
||
@@ -152,7 +156,7 @@ static int skl_hda_fill_card_info(struct snd_soc_acpi_mach_params *mach_params)
|
||
num_route = ARRAY_SIZE(skl_hda_map);
|
||
card->dapm_widgets = skl_hda_widgets;
|
||
card->num_dapm_widgets = ARRAY_SIZE(skl_hda_widgets);
|
||
- if (!idisp_mask) {
|
||
+ if (!ctx->idisp_codec) {
|
||
for (i = 0; i < IDISP_DAI_COUNT; i++) {
|
||
skl_hda_be_dai_links[i].codecs = dummy_codec;
|
||
skl_hda_be_dai_links[i].num_codecs =
|
||
@@ -211,6 +215,8 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
|
||
if (!mach)
|
||
return -EINVAL;
|
||
|
||
+ snd_soc_card_set_drvdata(&hda_soc_card, ctx);
|
||
+
|
||
ret = skl_hda_fill_card_info(&mach->mach_params);
|
||
if (ret < 0) {
|
||
dev_err(&pdev->dev, "Unsupported HDAudio/iDisp configuration found\n");
|
||
@@ -223,7 +229,6 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
|
||
ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
|
||
|
||
hda_soc_card.dev = &pdev->dev;
|
||
- snd_soc_card_set_drvdata(&hda_soc_card, ctx);
|
||
|
||
if (mach->mach_params.dmic_num > 0) {
|
||
snprintf(hda_soc_components, sizeof(hda_soc_components),
|
||
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
|
||
index 1bfd9613449e9..95a119a2d354e 100644
|
||
--- a/sound/soc/intel/boards/sof_sdw.c
|
||
+++ b/sound/soc/intel/boards/sof_sdw.c
|
||
@@ -184,6 +184,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
|
||
.direction = {true, true},
|
||
.dai_name = "rt711-aif1",
|
||
.init = sof_sdw_rt711_init,
|
||
+ .exit = sof_sdw_rt711_exit,
|
||
},
|
||
{
|
||
.id = 0x1308,
|
||
diff --git a/sound/soc/intel/boards/sof_sdw_common.h b/sound/soc/intel/boards/sof_sdw_common.h
|
||
index 69b363b8a6869..fdd2385049e1e 100644
|
||
--- a/sound/soc/intel/boards/sof_sdw_common.h
|
||
+++ b/sound/soc/intel/boards/sof_sdw_common.h
|
||
@@ -84,6 +84,7 @@ int sof_sdw_rt711_init(const struct snd_soc_acpi_link_adr *link,
|
||
struct snd_soc_dai_link *dai_links,
|
||
struct sof_sdw_codec_info *info,
|
||
bool playback);
|
||
+int sof_sdw_rt711_exit(struct device *dev, struct snd_soc_dai_link *dai_link);
|
||
|
||
/* RT700 support */
|
||
int sof_sdw_rt700_init(const struct snd_soc_acpi_link_adr *link,
|
||
diff --git a/sound/soc/intel/boards/sof_sdw_rt711.c b/sound/soc/intel/boards/sof_sdw_rt711.c
|
||
index d4d75c8dc6b78..0cb9f1c1f8676 100644
|
||
--- a/sound/soc/intel/boards/sof_sdw_rt711.c
|
||
+++ b/sound/soc/intel/boards/sof_sdw_rt711.c
|
||
@@ -133,6 +133,21 @@ static int rt711_rtd_init(struct snd_soc_pcm_runtime *rtd)
|
||
return ret;
|
||
}
|
||
|
||
+int sof_sdw_rt711_exit(struct device *dev, struct snd_soc_dai_link *dai_link)
|
||
+{
|
||
+ struct device *sdw_dev;
|
||
+
|
||
+ sdw_dev = bus_find_device_by_name(&sdw_bus_type, NULL,
|
||
+ dai_link->codecs[0].name);
|
||
+ if (!sdw_dev)
|
||
+ return -EINVAL;
|
||
+
|
||
+ device_remove_properties(sdw_dev);
|
||
+ put_device(sdw_dev);
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
int sof_sdw_rt711_init(const struct snd_soc_acpi_link_adr *link,
|
||
struct snd_soc_dai_link *dai_links,
|
||
struct sof_sdw_codec_info *info,
|
||
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
|
||
index 635eacbd28d47..156e3b9d613c6 100644
|
||
--- a/sound/soc/tegra/tegra30_ahub.c
|
||
+++ b/sound/soc/tegra/tegra30_ahub.c
|
||
@@ -643,8 +643,10 @@ static int tegra30_ahub_resume(struct device *dev)
|
||
int ret;
|
||
|
||
ret = pm_runtime_get_sync(dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put(dev);
|
||
return ret;
|
||
+ }
|
||
ret = regcache_sync(ahub->regmap_ahub);
|
||
ret |= regcache_sync(ahub->regmap_apbif);
|
||
pm_runtime_put(dev);
|
||
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
|
||
index d59882ec48f16..db5a8587bfa4c 100644
|
||
--- a/sound/soc/tegra/tegra30_i2s.c
|
||
+++ b/sound/soc/tegra/tegra30_i2s.c
|
||
@@ -567,8 +567,10 @@ static int tegra30_i2s_resume(struct device *dev)
|
||
int ret;
|
||
|
||
ret = pm_runtime_get_sync(dev);
|
||
- if (ret < 0)
|
||
+ if (ret < 0) {
|
||
+ pm_runtime_put(dev);
|
||
return ret;
|
||
+ }
|
||
ret = regcache_sync(i2s->regmap);
|
||
pm_runtime_put(dev);
|
||
|
||
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
|
||
index eab0fd4fd7c33..e0b7174c10430 100644
|
||
--- a/sound/usb/mixer.c
|
||
+++ b/sound/usb/mixer.c
|
||
@@ -2367,7 +2367,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
|
||
int num_ins;
|
||
struct usb_mixer_elem_info *cval;
|
||
struct snd_kcontrol *kctl;
|
||
- int i, err, nameid, type, len;
|
||
+ int i, err, nameid, type, len, val;
|
||
const struct procunit_info *info;
|
||
const struct procunit_value_info *valinfo;
|
||
const struct usbmix_name_map *map;
|
||
@@ -2470,6 +2470,12 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
|
||
break;
|
||
}
|
||
|
||
+ err = get_cur_ctl_value(cval, cval->control << 8, &val);
|
||
+ if (err < 0) {
|
||
+ usb_mixer_elem_info_free(cval);
|
||
+ return -EINVAL;
|
||
+ }
|
||
+
|
||
kctl = snd_ctl_new1(&mixer_procunit_ctl, cval);
|
||
if (!kctl) {
|
||
usb_mixer_elem_info_free(cval);
|
||
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
|
||
index a53eb67ad4bd8..366faaa4ba82c 100644
|
||
--- a/sound/usb/quirks-table.h
|
||
+++ b/sound/usb/quirks-table.h
|
||
@@ -2678,6 +2678,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
|
||
.ifnum = QUIRK_ANY_INTERFACE,
|
||
.type = QUIRK_COMPOSITE,
|
||
.data = (const struct snd_usb_audio_quirk[]) {
|
||
+ {
|
||
+ .ifnum = 0,
|
||
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
|
||
+ },
|
||
{
|
||
.ifnum = 0,
|
||
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
|
||
@@ -2690,6 +2694,32 @@ YAMAHA_DEVICE(0x7010, "UB99"),
|
||
.attributes = UAC_EP_CS_ATTR_SAMPLE_RATE,
|
||
.endpoint = 0x01,
|
||
.ep_attr = USB_ENDPOINT_XFER_ISOC,
|
||
+ .datainterval = 1,
|
||
+ .maxpacksize = 0x024c,
|
||
+ .rates = SNDRV_PCM_RATE_44100 |
|
||
+ SNDRV_PCM_RATE_48000,
|
||
+ .rate_min = 44100,
|
||
+ .rate_max = 48000,
|
||
+ .nr_rates = 2,
|
||
+ .rate_table = (unsigned int[]) {
|
||
+ 44100, 48000
|
||
+ }
|
||
+ }
|
||
+ },
|
||
+ {
|
||
+ .ifnum = 0,
|
||
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
|
||
+ .data = &(const struct audioformat) {
|
||
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
|
||
+ .channels = 2,
|
||
+ .iface = 0,
|
||
+ .altsetting = 1,
|
||
+ .altset_idx = 1,
|
||
+ .attributes = 0,
|
||
+ .endpoint = 0x82,
|
||
+ .ep_attr = USB_ENDPOINT_XFER_ISOC,
|
||
+ .datainterval = 1,
|
||
+ .maxpacksize = 0x0126,
|
||
.rates = SNDRV_PCM_RATE_44100 |
|
||
SNDRV_PCM_RATE_48000,
|
||
.rate_min = 44100,
|
||
@@ -3697,8 +3727,8 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
|
||
* they pretend to be 96kHz mono as a workaround for stereo being broken
|
||
* by that...
|
||
*
|
||
- * They also have swapped L-R channels, but that's for userspace to deal
|
||
- * with.
|
||
+ * They also have an issue with initial stream alignment that causes the
|
||
+ * channels to be swapped and out of phase, which is dealt with in quirks.c.
|
||
*/
|
||
{
|
||
.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
|
||
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
|
||
index ede162f83eea0..0e9310727281a 100644
|
||
--- a/tools/bpf/bpftool/btf_dumper.c
|
||
+++ b/tools/bpf/bpftool/btf_dumper.c
|
||
@@ -67,7 +67,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
|
||
if (!info->btf_id || !info->nr_func_info ||
|
||
btf__get_from_id(info->btf_id, &prog_btf))
|
||
goto print;
|
||
- finfo = (struct bpf_func_info *)info->func_info;
|
||
+ finfo = u64_to_ptr(info->func_info);
|
||
func_type = btf__type_by_id(prog_btf, finfo->type_id);
|
||
if (!func_type || !btf_is_func(func_type))
|
||
goto print;
|
||
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
|
||
index fca57ee8fafe4..dea691c83afca 100644
|
||
--- a/tools/bpf/bpftool/link.c
|
||
+++ b/tools/bpf/bpftool/link.c
|
||
@@ -101,7 +101,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
|
||
switch (info->type) {
|
||
case BPF_LINK_TYPE_RAW_TRACEPOINT:
|
||
jsonw_string_field(json_wtr, "tp_name",
|
||
- (const char *)info->raw_tracepoint.tp_name);
|
||
+ u64_to_ptr(info->raw_tracepoint.tp_name));
|
||
break;
|
||
case BPF_LINK_TYPE_TRACING:
|
||
err = get_prog_info(info->prog_id, &prog_info);
|
||
@@ -177,7 +177,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
|
||
switch (info->type) {
|
||
case BPF_LINK_TYPE_RAW_TRACEPOINT:
|
||
printf("\n\ttp '%s' ",
|
||
- (const char *)info->raw_tracepoint.tp_name);
|
||
+ (const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
|
||
break;
|
||
case BPF_LINK_TYPE_TRACING:
|
||
err = get_prog_info(info->prog_id, &prog_info);
|
||
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
|
||
index 5cdf0bc049bd9..5917484c2e027 100644
|
||
--- a/tools/bpf/bpftool/main.h
|
||
+++ b/tools/bpf/bpftool/main.h
|
||
@@ -21,7 +21,15 @@
|
||
/* Make sure we do not use kernel-only integer typedefs */
|
||
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
|
||
|
||
-#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
|
||
+static inline __u64 ptr_to_u64(const void *ptr)
|
||
+{
|
||
+ return (__u64)(unsigned long)ptr;
|
||
+}
|
||
+
|
||
+static inline void *u64_to_ptr(__u64 ptr)
|
||
+{
|
||
+ return (void *)(unsigned long)ptr;
|
||
+}
|
||
|
||
#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
|
||
#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
|
||
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
|
||
index a5eff83496f2d..2c6f7e160b248 100644
|
||
--- a/tools/bpf/bpftool/prog.c
|
||
+++ b/tools/bpf/bpftool/prog.c
|
||
@@ -537,14 +537,14 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
|
||
p_info("no instructions returned");
|
||
return -1;
|
||
}
|
||
- buf = (unsigned char *)(info->jited_prog_insns);
|
||
+ buf = u64_to_ptr(info->jited_prog_insns);
|
||
member_len = info->jited_prog_len;
|
||
} else { /* DUMP_XLATED */
|
||
if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
|
||
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
|
||
return -1;
|
||
}
|
||
- buf = (unsigned char *)info->xlated_prog_insns;
|
||
+ buf = u64_to_ptr(info->xlated_prog_insns);
|
||
member_len = info->xlated_prog_len;
|
||
}
|
||
|
||
@@ -553,7 +553,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
|
||
return -1;
|
||
}
|
||
|
||
- func_info = (void *)info->func_info;
|
||
+ func_info = u64_to_ptr(info->func_info);
|
||
|
||
if (info->nr_line_info) {
|
||
prog_linfo = bpf_prog_linfo__new(info);
|
||
@@ -571,7 +571,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
|
||
|
||
n = write(fd, buf, member_len);
|
||
close(fd);
|
||
- if (n != member_len) {
|
||
+ if (n != (ssize_t)member_len) {
|
||
p_err("error writing output file: %s",
|
||
n < 0 ? strerror(errno) : "short write");
|
||
return -1;
|
||
@@ -601,13 +601,13 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
|
||
__u32 i;
|
||
if (info->nr_jited_ksyms) {
|
||
kernel_syms_load(&dd);
|
||
- ksyms = (__u64 *) info->jited_ksyms;
|
||
+ ksyms = u64_to_ptr(info->jited_ksyms);
|
||
}
|
||
|
||
if (json_output)
|
||
jsonw_start_array(json_wtr);
|
||
|
||
- lens = (__u32 *) info->jited_func_lens;
|
||
+ lens = u64_to_ptr(info->jited_func_lens);
|
||
for (i = 0; i < info->nr_jited_func_lens; i++) {
|
||
if (ksyms) {
|
||
sym = kernel_syms_search(&dd, ksyms[i]);
|
||
@@ -668,7 +668,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
|
||
} else {
|
||
kernel_syms_load(&dd);
|
||
dd.nr_jited_ksyms = info->nr_jited_ksyms;
|
||
- dd.jited_ksyms = (__u64 *) info->jited_ksyms;
|
||
+ dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
|
||
dd.btf = btf;
|
||
dd.func_info = func_info;
|
||
dd.finfo_rec_size = info->func_info_rec_size;
|
||
@@ -1790,7 +1790,7 @@ static char *profile_target_name(int tgt_fd)
|
||
goto out;
|
||
}
|
||
|
||
- func_info = (struct bpf_func_info *)(info_linear->info.func_info);
|
||
+ func_info = u64_to_ptr(info_linear->info.func_info);
|
||
t = btf__type_by_id(btf, func_info[0].type_id);
|
||
if (!t) {
|
||
p_err("btf %d doesn't have type %d",
|
||
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
|
||
index e7642a6e39f9e..3ac0094706b81 100644
|
||
--- a/tools/lib/bpf/libbpf.c
|
||
+++ b/tools/lib/bpf/libbpf.c
|
||
@@ -2237,7 +2237,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
|
||
data = elf_getdata(scn, NULL);
|
||
if (!scn || !data) {
|
||
pr_warn("failed to get Elf_Data from map section %d (%s)\n",
|
||
- obj->efile.maps_shndx, MAPS_ELF_SEC);
|
||
+ obj->efile.btf_maps_shndx, MAPS_ELF_SEC);
|
||
return -EINVAL;
|
||
}
|
||
|
||
@@ -3319,10 +3319,11 @@ bpf_object__probe_global_data(struct bpf_object *obj)
|
||
|
||
map = bpf_create_map_xattr(&map_attr);
|
||
if (map < 0) {
|
||
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
|
||
+ ret = -errno;
|
||
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
|
||
pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
|
||
- __func__, cp, errno);
|
||
- return -errno;
|
||
+ __func__, cp, -ret);
|
||
+ return ret;
|
||
}
|
||
|
||
insns[0].imm = map;
|
||
@@ -5779,9 +5780,10 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
|
||
}
|
||
|
||
if (bpf_obj_pin(prog->instances.fds[instance], path)) {
|
||
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
|
||
+ err = -errno;
|
||
+ cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
|
||
pr_warn("failed to pin program: %s\n", cp);
|
||
- return -errno;
|
||
+ return err;
|
||
}
|
||
pr_debug("pinned program '%s'\n", path);
|
||
|
||
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
|
||
index 7afa4160416f6..284d5921c3458 100644
|
||
--- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
|
||
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
|
||
@@ -159,15 +159,15 @@ void test_bpf_obj_id(void)
|
||
/* Check getting link info */
|
||
info_len = sizeof(struct bpf_link_info) * 2;
|
||
bzero(&link_infos[i], info_len);
|
||
- link_infos[i].raw_tracepoint.tp_name = (__u64)&tp_name;
|
||
+ link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name);
|
||
link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
|
||
err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]),
|
||
&link_infos[i], &info_len);
|
||
if (CHECK(err ||
|
||
link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
|
||
link_infos[i].prog_id != prog_infos[i].id ||
|
||
- link_infos[i].raw_tracepoint.tp_name != (__u64)&tp_name ||
|
||
- strcmp((char *)link_infos[i].raw_tracepoint.tp_name,
|
||
+ link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) ||
|
||
+ strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
|
||
"sys_enter") ||
|
||
info_len != sizeof(struct bpf_link_info),
|
||
"get-link-info(fd)",
|
||
@@ -178,7 +178,7 @@ void test_bpf_obj_id(void)
|
||
link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT,
|
||
link_infos[i].id,
|
||
link_infos[i].prog_id, prog_infos[i].id,
|
||
- (char *)link_infos[i].raw_tracepoint.tp_name,
|
||
+ (const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
|
||
"sys_enter"))
|
||
goto done;
|
||
|
||
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
|
||
index cb33a7ee4e04f..39fb81d9daeb5 100644
|
||
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
|
||
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
|
||
@@ -12,15 +12,16 @@ void btf_dump_printf(void *ctx, const char *fmt, va_list args)
|
||
static struct btf_dump_test_case {
|
||
const char *name;
|
||
const char *file;
|
||
+ bool known_ptr_sz;
|
||
struct btf_dump_opts opts;
|
||
} btf_dump_test_cases[] = {
|
||
- {"btf_dump: syntax", "btf_dump_test_case_syntax", {}},
|
||
- {"btf_dump: ordering", "btf_dump_test_case_ordering", {}},
|
||
- {"btf_dump: padding", "btf_dump_test_case_padding", {}},
|
||
- {"btf_dump: packing", "btf_dump_test_case_packing", {}},
|
||
- {"btf_dump: bitfields", "btf_dump_test_case_bitfields", {}},
|
||
- {"btf_dump: multidim", "btf_dump_test_case_multidim", {}},
|
||
- {"btf_dump: namespacing", "btf_dump_test_case_namespacing", {}},
|
||
+ {"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}},
|
||
+ {"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}},
|
||
+ {"btf_dump: padding", "btf_dump_test_case_padding", true, {}},
|
||
+ {"btf_dump: packing", "btf_dump_test_case_packing", true, {}},
|
||
+ {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}},
|
||
+ {"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}},
|
||
+ {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}},
|
||
};
|
||
|
||
static int btf_dump_all_types(const struct btf *btf,
|
||
@@ -62,6 +63,18 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
|
||
goto done;
|
||
}
|
||
|
||
+ /* tests with t->known_ptr_sz have no "long" or "unsigned long" type,
|
||
+ * so it's impossible to determine correct pointer size; but if they
|
||
+ * do, it should be 8 regardless of host architecture, becaues BPF
|
||
+ * target is always 64-bit
|
||
+ */
|
||
+ if (!t->known_ptr_sz) {
|
||
+ btf__set_pointer_size(btf, 8);
|
||
+ } else {
|
||
+ CHECK(btf__pointer_size(btf) != 8, "ptr_sz", "exp %d, got %zu\n",
|
||
+ 8, btf__pointer_size(btf));
|
||
+ }
|
||
+
|
||
snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
|
||
fd = mkstemp(out_file);
|
||
if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) {
|
||
diff --git a/tools/testing/selftests/bpf/prog_tests/core_extern.c b/tools/testing/selftests/bpf/prog_tests/core_extern.c
|
||
index b093787e94489..1931a158510e0 100644
|
||
--- a/tools/testing/selftests/bpf/prog_tests/core_extern.c
|
||
+++ b/tools/testing/selftests/bpf/prog_tests/core_extern.c
|
||
@@ -159,8 +159,8 @@ void test_core_extern(void)
|
||
exp = (uint64_t *)&t->data;
|
||
for (j = 0; j < n; j++) {
|
||
CHECK(got[j] != exp[j], "check_res",
|
||
- "result #%d: expected %lx, but got %lx\n",
|
||
- j, exp[j], got[j]);
|
||
+ "result #%d: expected %llx, but got %llx\n",
|
||
+ j, (__u64)exp[j], (__u64)got[j]);
|
||
}
|
||
cleanup:
|
||
test_core_extern__destroy(skel);
|
||
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
|
||
index 084ed26a7d78c..a54eafc5e4b31 100644
|
||
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
|
||
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
|
||
@@ -237,8 +237,8 @@
|
||
.union_sz = sizeof(((type *)0)->union_field), \
|
||
.arr_sz = sizeof(((type *)0)->arr_field), \
|
||
.arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \
|
||
- .ptr_sz = sizeof(((type *)0)->ptr_field), \
|
||
- .enum_sz = sizeof(((type *)0)->enum_field), \
|
||
+ .ptr_sz = 8, /* always 8-byte pointer for BPF */ \
|
||
+ .enum_sz = sizeof(((type *)0)->enum_field), \
|
||
}
|
||
|
||
#define SIZE_CASE(name) { \
|
||
@@ -432,20 +432,20 @@ static struct core_reloc_test_case test_cases[] = {
|
||
.sb4 = -1,
|
||
.sb20 = -0x17654321,
|
||
.u32 = 0xBEEF,
|
||
- .s32 = -0x3FEDCBA987654321,
|
||
+ .s32 = -0x3FEDCBA987654321LL,
|
||
}),
|
||
BITFIELDS_CASE(bitfields___bitfield_vs_int, {
|
||
- .ub1 = 0xFEDCBA9876543210,
|
||
+ .ub1 = 0xFEDCBA9876543210LL,
|
||
.ub2 = 0xA6,
|
||
- .ub7 = -0x7EDCBA987654321,
|
||
- .sb4 = -0x6123456789ABCDE,
|
||
- .sb20 = 0xD00D,
|
||
+ .ub7 = -0x7EDCBA987654321LL,
|
||
+ .sb4 = -0x6123456789ABCDELL,
|
||
+ .sb20 = 0xD00DLL,
|
||
.u32 = -0x76543,
|
||
- .s32 = 0x0ADEADBEEFBADB0B,
|
||
+ .s32 = 0x0ADEADBEEFBADB0BLL,
|
||
}),
|
||
BITFIELDS_CASE(bitfields___just_big_enough, {
|
||
- .ub1 = 0xF,
|
||
- .ub2 = 0x0812345678FEDCBA,
|
||
+ .ub1 = 0xFLL,
|
||
+ .ub2 = 0x0812345678FEDCBALL,
|
||
}),
|
||
BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
|
||
|
||
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
|
||
index a895bfed55db0..197d0d217b56b 100644
|
||
--- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
|
||
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
|
||
@@ -16,7 +16,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||
__u32 duration = 0, retval;
|
||
struct bpf_map *data_map;
|
||
const int zero = 0;
|
||
- u64 *result = NULL;
|
||
+ __u64 *result = NULL;
|
||
|
||
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
|
||
&pkt_obj, &pkt_fd);
|
||
@@ -29,7 +29,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||
|
||
link = calloc(sizeof(struct bpf_link *), prog_cnt);
|
||
prog = calloc(sizeof(struct bpf_program *), prog_cnt);
|
||
- result = malloc((prog_cnt + 32 /* spare */) * sizeof(u64));
|
||
+ result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64));
|
||
if (CHECK(!link || !prog || !result, "alloc_memory",
|
||
"failed to alloc memory"))
|
||
goto close_prog;
|
||
@@ -72,7 +72,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||
goto close_prog;
|
||
|
||
for (i = 0; i < prog_cnt; i++)
|
||
- if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n",
|
||
+ if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %llu\n",
|
||
result[i]))
|
||
goto close_prog;
|
||
|
||
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
|
||
index f11f187990e95..cd6dc80edf18e 100644
|
||
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
|
||
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
|
||
@@ -591,7 +591,7 @@ void test_flow_dissector(void)
|
||
CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
|
||
err || tattr.retval != 1,
|
||
tests[i].name,
|
||
- "err %d errno %d retval %d duration %d size %u/%lu\n",
|
||
+ "err %d errno %d retval %d duration %d size %u/%zu\n",
|
||
err, errno, tattr.retval, tattr.duration,
|
||
tattr.data_size_out, sizeof(flow_keys));
|
||
CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
|
||
diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c
|
||
index e3cb62b0a110e..9efa7e50eab27 100644
|
||
--- a/tools/testing/selftests/bpf/prog_tests/global_data.c
|
||
+++ b/tools/testing/selftests/bpf/prog_tests/global_data.c
|
||
@@ -5,7 +5,7 @@
|
||
static void test_global_data_number(struct bpf_object *obj, __u32 duration)
|
||
{
|
||
int i, err, map_fd;
|
||
- uint64_t num;
|
||
+ __u64 num;
|
||
|
||
map_fd = bpf_find_map(__func__, obj, "result_number");
|
||
if (CHECK_FAIL(map_fd < 0))
|
||
@@ -14,7 +14,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
|
||
struct {
|
||
char *name;
|
||
uint32_t key;
|
||
- uint64_t num;
|
||
+ __u64 num;
|
||
} tests[] = {
|
||
{ "relocate .bss reference", 0, 0 },
|
||
{ "relocate .data reference", 1, 42 },
|
||
@@ -32,7 +32,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
|
||
for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
|
||
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
|
||
CHECK(err || num != tests[i].num, tests[i].name,
|
||
- "err %d result %lx expected %lx\n",
|
||
+ "err %d result %llx expected %llx\n",
|
||
err, num, tests[i].num);
|
||
}
|
||
}
|
||
diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c
|
||
index 43d0b5578f461..9c3c5c0f068fb 100644
|
||
--- a/tools/testing/selftests/bpf/prog_tests/mmap.c
|
||
+++ b/tools/testing/selftests/bpf/prog_tests/mmap.c
|
||
@@ -21,7 +21,7 @@ void test_mmap(void)
|
||
const long page_size = sysconf(_SC_PAGE_SIZE);
|
||
int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
|
||
struct bpf_map *data_map, *bss_map;
|
||
- void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
|
||
+ void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2;
|
||
struct test_mmap__bss *bss_data;
|
||
struct bpf_map_info map_info;
|
||
__u32 map_info_sz = sizeof(map_info);
|
||
@@ -183,16 +183,23 @@ void test_mmap(void)
|
||
|
||
/* check some more advanced mmap() manipulations */
|
||
|
||
+ tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS,
|
||
+ -1, 0);
|
||
+ if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno))
|
||
+ goto cleanup;
|
||
+
|
||
/* map all but last page: pages 1-3 mapped */
|
||
- tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED,
|
||
+ tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
|
||
data_map_fd, 0);
|
||
- if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno))
|
||
+ if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) {
|
||
+ munmap(tmp0, 4 * page_size);
|
||
goto cleanup;
|
||
+ }
|
||
|
||
/* unmap second page: pages 1, 3 mapped */
|
||
err = munmap(tmp1 + page_size, page_size);
|
||
if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
|
||
- munmap(tmp1, map_sz);
|
||
+ munmap(tmp1, 4 * page_size);
|
||
goto cleanup;
|
||
}
|
||
|
||
@@ -201,7 +208,7 @@ void test_mmap(void)
|
||
MAP_SHARED | MAP_FIXED, data_map_fd, 0);
|
||
if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
|
||
munmap(tmp1, page_size);
|
||
- munmap(tmp1 + 2*page_size, page_size);
|
||
+ munmap(tmp1 + 2*page_size, 2 * page_size);
|
||
goto cleanup;
|
||
}
|
||
CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
|
||
@@ -211,7 +218,7 @@ void test_mmap(void)
|
||
tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
|
||
data_map_fd, 0);
|
||
if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
|
||
- munmap(tmp1, 3 * page_size); /* unmap page 1 */
|
||
+ munmap(tmp1, 4 * page_size); /* unmap page 1 */
|
||
goto cleanup;
|
||
}
|
||
CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
|
||
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
|
||
index dde2b7ae7bc9e..935a294f049a2 100644
|
||
--- a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
|
||
+++ b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
|
||
@@ -28,7 +28,7 @@ void test_prog_run_xattr(void)
|
||
"err %d errno %d retval %d\n", err, errno, tattr.retval);
|
||
|
||
CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
|
||
- "incorrect output size, want %lu have %u\n",
|
||
+ "incorrect output size, want %zu have %u\n",
|
||
sizeof(pkt_v4), tattr.data_size_out);
|
||
|
||
CHECK_ATTR(buf[5] != 0, "overflow",
|
||
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
|
||
index 7021b92af3134..c61b2b69710a9 100644
|
||
--- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
|
||
+++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
|
||
@@ -80,7 +80,7 @@ void test_skb_ctx(void)
|
||
|
||
CHECK_ATTR(tattr.ctx_size_out != sizeof(skb),
|
||
"ctx_size_out",
|
||
- "incorrect output size, want %lu have %u\n",
|
||
+ "incorrect output size, want %zu have %u\n",
|
||
sizeof(skb), tattr.ctx_size_out);
|
||
|
||
for (i = 0; i < 5; i++)
|
||
diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
|
||
index 25b068591e9a4..193002b14d7f6 100644
|
||
--- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
|
||
+++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
|
||
@@ -19,7 +19,7 @@ static int libbpf_debug_print(enum libbpf_print_level level,
|
||
log_buf = va_arg(args, char *);
|
||
if (!log_buf)
|
||
goto out;
|
||
- if (strstr(log_buf, err_str) == 0)
|
||
+ if (err_str && strstr(log_buf, err_str) == 0)
|
||
found = true;
|
||
out:
|
||
printf(format, log_buf);
|
||
diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
|
||
index 34d84717c9464..69139ed662164 100644
|
||
--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
|
||
+++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
|
||
@@ -1,5 +1,10 @@
|
||
#include <stdint.h>
|
||
#include <stdbool.h>
|
||
+
|
||
+void preserce_ptr_sz_fn(long x) {}
|
||
+
|
||
+#define __bpf_aligned __attribute__((aligned(8)))
|
||
+
|
||
/*
|
||
* KERNEL
|
||
*/
|
||
@@ -444,51 +449,51 @@ struct core_reloc_primitives {
|
||
char a;
|
||
int b;
|
||
enum core_reloc_primitives_enum c;
|
||
- void *d;
|
||
- int (*f)(const char *);
|
||
+ void *d __bpf_aligned;
|
||
+ int (*f)(const char *) __bpf_aligned;
|
||
};
|
||
|
||
struct core_reloc_primitives___diff_enum_def {
|
||
char a;
|
||
int b;
|
||
- void *d;
|
||
- int (*f)(const char *);
|
||
+ void *d __bpf_aligned;
|
||
+ int (*f)(const char *) __bpf_aligned;
|
||
enum {
|
||
X = 100,
|
||
Y = 200,
|
||
- } c; /* inline enum def with differing set of values */
|
||
+ } c __bpf_aligned; /* inline enum def with differing set of values */
|
||
};
|
||
|
||
struct core_reloc_primitives___diff_func_proto {
|
||
- void (*f)(int); /* incompatible function prototype */
|
||
- void *d;
|
||
- enum core_reloc_primitives_enum c;
|
||
+ void (*f)(int) __bpf_aligned; /* incompatible function prototype */
|
||
+ void *d __bpf_aligned;
|
||
+ enum core_reloc_primitives_enum c __bpf_aligned;
|
||
int b;
|
||
char a;
|
||
};
|
||
|
||
struct core_reloc_primitives___diff_ptr_type {
|
||
- const char * const d; /* different pointee type + modifiers */
|
||
- char a;
|
||
+ const char * const d __bpf_aligned; /* different pointee type + modifiers */
|
||
+ char a __bpf_aligned;
|
||
int b;
|
||
enum core_reloc_primitives_enum c;
|
||
- int (*f)(const char *);
|
||
+ int (*f)(const char *) __bpf_aligned;
|
||
};
|
||
|
||
struct core_reloc_primitives___err_non_enum {
|
||
char a[1];
|
||
int b;
|
||
int c; /* int instead of enum */
|
||
- void *d;
|
||
- int (*f)(const char *);
|
||
+ void *d __bpf_aligned;
|
||
+ int (*f)(const char *) __bpf_aligned;
|
||
};
|
||
|
||
struct core_reloc_primitives___err_non_int {
|
||
char a[1];
|
||
- int *b; /* ptr instead of int */
|
||
- enum core_reloc_primitives_enum c;
|
||
- void *d;
|
||
- int (*f)(const char *);
|
||
+ int *b __bpf_aligned; /* ptr instead of int */
|
||
+ enum core_reloc_primitives_enum c __bpf_aligned;
|
||
+ void *d __bpf_aligned;
|
||
+ int (*f)(const char *) __bpf_aligned;
|
||
};
|
||
|
||
struct core_reloc_primitives___err_non_ptr {
|
||
@@ -496,7 +501,7 @@ struct core_reloc_primitives___err_non_ptr {
|
||
int b;
|
||
enum core_reloc_primitives_enum c;
|
||
int d; /* int instead of ptr */
|
||
- int (*f)(const char *);
|
||
+ int (*f)(const char *) __bpf_aligned;
|
||
};
|
||
|
||
/*
|
||
@@ -507,7 +512,7 @@ struct core_reloc_mods_output {
|
||
};
|
||
|
||
typedef const int int_t;
|
||
-typedef const char *char_ptr_t;
|
||
+typedef const char *char_ptr_t __bpf_aligned;
|
||
typedef const int arr_t[7];
|
||
|
||
struct core_reloc_mods_substruct {
|
||
@@ -523,9 +528,9 @@ typedef struct {
|
||
struct core_reloc_mods {
|
||
int a;
|
||
int_t b;
|
||
- char *c;
|
||
+ char *c __bpf_aligned;
|
||
char_ptr_t d;
|
||
- int e[3];
|
||
+ int e[3] __bpf_aligned;
|
||
arr_t f;
|
||
struct core_reloc_mods_substruct g;
|
||
core_reloc_mods_substruct_t h;
|
||
@@ -535,9 +540,9 @@ struct core_reloc_mods {
|
||
struct core_reloc_mods___mod_swap {
|
||
int b;
|
||
int_t a;
|
||
- char *d;
|
||
+ char *d __bpf_aligned;
|
||
char_ptr_t c;
|
||
- int f[3];
|
||
+ int f[3] __bpf_aligned;
|
||
arr_t e;
|
||
struct {
|
||
int y;
|
||
@@ -555,7 +560,7 @@ typedef arr1_t arr2_t;
|
||
typedef arr2_t arr3_t;
|
||
typedef arr3_t arr4_t;
|
||
|
||
-typedef const char * const volatile fancy_char_ptr_t;
|
||
+typedef const char * const volatile fancy_char_ptr_t __bpf_aligned;
|
||
|
||
typedef core_reloc_mods_substruct_t core_reloc_mods_substruct_tt;
|
||
|
||
@@ -567,7 +572,7 @@ struct core_reloc_mods___typedefs {
|
||
arr4_t e;
|
||
fancy_char_ptr_t d;
|
||
fancy_char_ptr_t c;
|
||
- int3_t b;
|
||
+ int3_t b __bpf_aligned;
|
||
int3_t a;
|
||
};
|
||
|
||
@@ -739,19 +744,19 @@ struct core_reloc_bitfields___bit_sz_change {
|
||
int8_t sb4: 1; /* 4 -> 1 */
|
||
int32_t sb20: 30; /* 20 -> 30 */
|
||
/* non-bitfields */
|
||
- uint16_t u32; /* 32 -> 16 */
|
||
- int64_t s32; /* 32 -> 64 */
|
||
+ uint16_t u32; /* 32 -> 16 */
|
||
+ int64_t s32 __bpf_aligned; /* 32 -> 64 */
|
||
};
|
||
|
||
/* turn bitfield into non-bitfield and vice versa */
|
||
struct core_reloc_bitfields___bitfield_vs_int {
|
||
uint64_t ub1; /* 3 -> 64 non-bitfield */
|
||
uint8_t ub2; /* 20 -> 8 non-bitfield */
|
||
- int64_t ub7; /* 7 -> 64 non-bitfield signed */
|
||
- int64_t sb4; /* 4 -> 64 non-bitfield signed */
|
||
- uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */
|
||
- int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
|
||
- uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */
|
||
+ int64_t ub7 __bpf_aligned; /* 7 -> 64 non-bitfield signed */
|
||
+ int64_t sb4 __bpf_aligned; /* 4 -> 64 non-bitfield signed */
|
||
+ uint64_t sb20 __bpf_aligned; /* 20 -> 16 non-bitfield unsigned */
|
||
+ int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
|
||
+ uint64_t s32: 60 __bpf_aligned; /* 32 non-bitfield -> 60 bitfield */
|
||
};
|
||
|
||
struct core_reloc_bitfields___just_big_enough {
|
||
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
|
||
index 305fae8f80a98..c75fc6447186a 100644
|
||
--- a/tools/testing/selftests/bpf/test_btf.c
|
||
+++ b/tools/testing/selftests/bpf/test_btf.c
|
||
@@ -3883,7 +3883,7 @@ static int test_big_btf_info(unsigned int test_num)
|
||
info_garbage.garbage = 0;
|
||
err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
|
||
if (CHECK(err || info_len != sizeof(*info),
|
||
- "err:%d errno:%d info_len:%u sizeof(*info):%lu",
|
||
+ "err:%d errno:%d info_len:%u sizeof(*info):%zu",
|
||
err, errno, info_len, sizeof(*info))) {
|
||
err = -1;
|
||
goto done;
|
||
@@ -4094,7 +4094,7 @@ static int do_test_get_info(unsigned int test_num)
|
||
if (CHECK(err || !info.id || info_len != sizeof(info) ||
|
||
info.btf_size != raw_btf_size ||
|
||
(ret = memcmp(raw_btf, user_btf, expected_nbytes)),
|
||
- "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%lu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
|
||
+ "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%zu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
|
||
err, errno, info.id, info_len, sizeof(info),
|
||
raw_btf_size, info.btf_size, expected_nbytes, ret)) {
|
||
err = -1;
|
||
@@ -4730,7 +4730,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
|
||
|
||
nexpected_line = snprintf(expected_line, line_size,
|
||
"%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
|
||
- "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
|
||
+ "{%llu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
|
||
"%u,0x%x,[[%d,%d],[%d,%d]]}\n",
|
||
percpu_map ? "\tcpu" : "",
|
||
percpu_map ? cpu : next_key,
|
||
@@ -4738,7 +4738,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
|
||
v->unused_bits2a,
|
||
v->bits28,
|
||
v->unused_bits2b,
|
||
- v->ui64,
|
||
+ (__u64)v->ui64,
|
||
v->ui8a[0], v->ui8a[1],
|
||
v->ui8a[2], v->ui8a[3],
|
||
v->ui8a[4], v->ui8a[5],
|
||
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
|
||
index b809246039181..b5670350e3263 100644
|
||
--- a/tools/testing/selftests/bpf/test_progs.h
|
||
+++ b/tools/testing/selftests/bpf/test_progs.h
|
||
@@ -133,6 +133,11 @@ static inline __u64 ptr_to_u64(const void *ptr)
|
||
return (__u64) (unsigned long) ptr;
|
||
}
|
||
|
||
+static inline void *u64_to_ptr(__u64 ptr)
|
||
+{
|
||
+ return (void *) (unsigned long) ptr;
|
||
+}
|
||
+
|
||
int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
|
||
int compare_map_keys(int map1_fd, int map2_fd);
|
||
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
|
||
diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh
|
||
index 18c5de53558af..bf361f30d6ef9 100755
|
||
--- a/tools/testing/selftests/net/icmp_redirect.sh
|
||
+++ b/tools/testing/selftests/net/icmp_redirect.sh
|
||
@@ -180,6 +180,8 @@ setup()
|
||
;;
|
||
r[12]) ip netns exec $ns sysctl -q -w net.ipv4.ip_forward=1
|
||
ip netns exec $ns sysctl -q -w net.ipv4.conf.all.send_redirects=1
|
||
+ ip netns exec $ns sysctl -q -w net.ipv4.conf.default.rp_filter=0
|
||
+ ip netns exec $ns sysctl -q -w net.ipv4.conf.all.rp_filter=0
|
||
|
||
ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=1
|
||
ip netns exec $ns sysctl -q -w net.ipv6.route.mtu_expires=10
|
||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
|
||
index a2d7b0e3dca97..a26ac122c759f 100644
|
||
--- a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
|
||
+++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
|
||
@@ -91,8 +91,6 @@ int back_to_back_ebbs(void)
|
||
ebb_global_disable();
|
||
ebb_freeze_pmcs();
|
||
|
||
- count_pmc(1, sample_period);
|
||
-
|
||
dump_ebb_state();
|
||
|
||
event_close(&event);
|
||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
|
||
index bc893813483ee..bb9f587fa76e8 100644
|
||
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
|
||
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
|
||
@@ -42,8 +42,6 @@ int cycles(void)
|
||
ebb_global_disable();
|
||
ebb_freeze_pmcs();
|
||
|
||
- count_pmc(1, sample_period);
|
||
-
|
||
dump_ebb_state();
|
||
|
||
event_close(&event);
|
||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
|
||
index dcd351d203289..9ae795ce314e6 100644
|
||
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
|
||
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
|
||
@@ -99,8 +99,6 @@ int cycles_with_freeze(void)
|
||
ebb_global_disable();
|
||
ebb_freeze_pmcs();
|
||
|
||
- count_pmc(1, sample_period);
|
||
-
|
||
dump_ebb_state();
|
||
|
||
printf("EBBs while frozen %d\n", ebbs_while_frozen);
|
||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
|
||
index 94c99c12c0f23..4b45a2e70f62b 100644
|
||
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
|
||
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
|
||
@@ -71,8 +71,6 @@ int cycles_with_mmcr2(void)
|
||
ebb_global_disable();
|
||
ebb_freeze_pmcs();
|
||
|
||
- count_pmc(1, sample_period);
|
||
-
|
||
dump_ebb_state();
|
||
|
||
event_close(&event);
|
||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
|
||
index dfbc5c3ad52d7..21537d6eb6b7d 100644
|
||
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
|
||
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
|
||
@@ -396,8 +396,6 @@ int ebb_child(union pipe read_pipe, union pipe write_pipe)
|
||
ebb_global_disable();
|
||
ebb_freeze_pmcs();
|
||
|
||
- count_pmc(1, sample_period);
|
||
-
|
||
dump_ebb_state();
|
||
|
||
event_close(&event);
|
||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
|
||
index ca2f7d729155b..b208bf6ad58d3 100644
|
||
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
|
||
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
|
||
@@ -38,8 +38,6 @@ static int victim_child(union pipe read_pipe, union pipe write_pipe)
|
||
ebb_global_disable();
|
||
ebb_freeze_pmcs();
|
||
|
||
- count_pmc(1, sample_period);
|
||
-
|
||
dump_ebb_state();
|
||
|
||
FAIL_IF(ebb_state.stats.ebb_count == 0);
|
||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
|
||
index ac3e6e182614a..ba2681a12cc7b 100644
|
||
--- a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
|
||
+++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
|
||
@@ -75,7 +75,6 @@ static int test_body(void)
|
||
ebb_freeze_pmcs();
|
||
ebb_global_disable();
|
||
|
||
- count_pmc(4, sample_period);
|
||
mtspr(SPRN_PMC4, 0xdead);
|
||
|
||
dump_summary_ebb_state();
|
||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
|
||
index b8242e9d97d2d..791d37ba327b5 100644
|
||
--- a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
|
||
+++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
|
||
@@ -70,13 +70,6 @@ int multi_counter(void)
|
||
ebb_global_disable();
|
||
ebb_freeze_pmcs();
|
||
|
||
- count_pmc(1, sample_period);
|
||
- count_pmc(2, sample_period);
|
||
- count_pmc(3, sample_period);
|
||
- count_pmc(4, sample_period);
|
||
- count_pmc(5, sample_period);
|
||
- count_pmc(6, sample_period);
|
||
-
|
||
dump_ebb_state();
|
||
|
||
for (i = 0; i < 6; i++)
|
||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
|
||
index a05c0e18ded63..9b0f70d597020 100644
|
||
--- a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
|
||
+++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
|
||
@@ -61,8 +61,6 @@ static int cycles_child(void)
|
||
ebb_global_disable();
|
||
ebb_freeze_pmcs();
|
||
|
||
- count_pmc(1, sample_period);
|
||
-
|
||
dump_summary_ebb_state();
|
||
|
||
event_close(&event);
|
||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
|
||
index 153ebc92234fd..2904c741e04e5 100644
|
||
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
|
||
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
|
||
@@ -82,8 +82,6 @@ static int test_body(void)
|
||
ebb_global_disable();
|
||
ebb_freeze_pmcs();
|
||
|
||
- count_pmc(1, sample_period);
|
||
-
|
||
dump_ebb_state();
|
||
|
||
if (mmcr0_mismatch)
|
||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
|
||
index eadad75ed7e6f..b29f8ba22d1e6 100644
|
||
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
|
||
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
|
||
@@ -76,8 +76,6 @@ int pmc56_overflow(void)
|
||
ebb_global_disable();
|
||
ebb_freeze_pmcs();
|
||
|
||
- count_pmc(2, sample_period);
|
||
-
|
||
dump_ebb_state();
|
||
|
||
printf("PMC5/6 overflow %d\n", pmc56_overflowed);
|