mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-30 02:31:46 +00:00
7351 lines
229 KiB
Diff
7351 lines
229 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index 94673d2a6a27..fcfef30ca9a6 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 14
|
|
-SUBLEVEL = 112
|
|
+SUBLEVEL = 113
|
|
EXTRAVERSION =
|
|
NAME = Petit Gorille
|
|
|
|
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
|
|
index 083560e9e571..4dac1169f528 100644
|
|
--- a/arch/arc/configs/hsdk_defconfig
|
|
+++ b/arch/arc/configs/hsdk_defconfig
|
|
@@ -9,6 +9,7 @@ CONFIG_NAMESPACES=y
|
|
# CONFIG_UTS_NS is not set
|
|
# CONFIG_PID_NS is not set
|
|
CONFIG_BLK_DEV_INITRD=y
|
|
+CONFIG_BLK_DEV_RAM=y
|
|
CONFIG_EMBEDDED=y
|
|
CONFIG_PERF_EVENTS=y
|
|
# CONFIG_VM_EVENT_COUNTERS is not set
|
|
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
|
|
index 1f945d0f40da..208bf2c9e7b0 100644
|
|
--- a/arch/arc/kernel/head.S
|
|
+++ b/arch/arc/kernel/head.S
|
|
@@ -107,6 +107,7 @@ ENTRY(stext)
|
|
; r2 = pointer to uboot provided cmdline or external DTB in mem
|
|
; These are handled later in handle_uboot_args()
|
|
st r0, [@uboot_tag]
|
|
+ st r1, [@uboot_magic]
|
|
st r2, [@uboot_arg]
|
|
#endif
|
|
|
|
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
|
|
index 709649e5f9bc..6b8d106e0d53 100644
|
|
--- a/arch/arc/kernel/setup.c
|
|
+++ b/arch/arc/kernel/setup.c
|
|
@@ -35,6 +35,7 @@ unsigned int intr_to_DE_cnt;
|
|
|
|
/* Part of U-boot ABI: see head.S */
|
|
int __initdata uboot_tag;
|
|
+int __initdata uboot_magic;
|
|
char __initdata *uboot_arg;
|
|
|
|
const struct machine_desc *machine_desc;
|
|
@@ -433,6 +434,8 @@ static inline bool uboot_arg_invalid(unsigned long addr)
|
|
#define UBOOT_TAG_NONE 0
|
|
#define UBOOT_TAG_CMDLINE 1
|
|
#define UBOOT_TAG_DTB 2
|
|
+/* We always pass 0 as magic from U-boot */
|
|
+#define UBOOT_MAGIC_VALUE 0
|
|
|
|
void __init handle_uboot_args(void)
|
|
{
|
|
@@ -448,6 +451,11 @@ void __init handle_uboot_args(void)
|
|
goto ignore_uboot_args;
|
|
}
|
|
|
|
+ if (uboot_magic != UBOOT_MAGIC_VALUE) {
|
|
+ pr_warn(IGNORE_ARGS "non zero uboot magic\n");
|
|
+ goto ignore_uboot_args;
|
|
+ }
|
|
+
|
|
if (uboot_tag != UBOOT_TAG_NONE &&
|
|
uboot_arg_invalid((unsigned long)uboot_arg)) {
|
|
pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
|
|
diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl
|
|
index fac0533ea633..f64e8413ab9a 100644
|
|
--- a/arch/arm/crypto/sha256-armv4.pl
|
|
+++ b/arch/arm/crypto/sha256-armv4.pl
|
|
@@ -205,10 +205,11 @@ K256:
|
|
.global sha256_block_data_order
|
|
.type sha256_block_data_order,%function
|
|
sha256_block_data_order:
|
|
+.Lsha256_block_data_order:
|
|
#if __ARM_ARCH__<7
|
|
sub r3,pc,#8 @ sha256_block_data_order
|
|
#else
|
|
- adr r3,sha256_block_data_order
|
|
+ adr r3,.Lsha256_block_data_order
|
|
#endif
|
|
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
|
|
ldr r12,.LOPENSSL_armcap
|
|
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
|
|
index 555a1a8eec90..72c248081d27 100644
|
|
--- a/arch/arm/crypto/sha256-core.S_shipped
|
|
+++ b/arch/arm/crypto/sha256-core.S_shipped
|
|
@@ -86,10 +86,11 @@ K256:
|
|
.global sha256_block_data_order
|
|
.type sha256_block_data_order,%function
|
|
sha256_block_data_order:
|
|
+.Lsha256_block_data_order:
|
|
#if __ARM_ARCH__<7
|
|
sub r3,pc,#8 @ sha256_block_data_order
|
|
#else
|
|
- adr r3,sha256_block_data_order
|
|
+ adr r3,.Lsha256_block_data_order
|
|
#endif
|
|
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
|
|
ldr r12,.LOPENSSL_armcap
|
|
diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl
|
|
index a2b11a844357..5fe336420bcf 100644
|
|
--- a/arch/arm/crypto/sha512-armv4.pl
|
|
+++ b/arch/arm/crypto/sha512-armv4.pl
|
|
@@ -267,10 +267,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
|
|
.global sha512_block_data_order
|
|
.type sha512_block_data_order,%function
|
|
sha512_block_data_order:
|
|
+.Lsha512_block_data_order:
|
|
#if __ARM_ARCH__<7
|
|
sub r3,pc,#8 @ sha512_block_data_order
|
|
#else
|
|
- adr r3,sha512_block_data_order
|
|
+ adr r3,.Lsha512_block_data_order
|
|
#endif
|
|
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
|
|
ldr r12,.LOPENSSL_armcap
|
|
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
|
|
index 3694c4d4ca2b..de9bd7f55242 100644
|
|
--- a/arch/arm/crypto/sha512-core.S_shipped
|
|
+++ b/arch/arm/crypto/sha512-core.S_shipped
|
|
@@ -134,10 +134,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
|
|
.global sha512_block_data_order
|
|
.type sha512_block_data_order,%function
|
|
sha512_block_data_order:
|
|
+.Lsha512_block_data_order:
|
|
#if __ARM_ARCH__<7
|
|
sub r3,pc,#8 @ sha512_block_data_order
|
|
#else
|
|
- adr r3,sha512_block_data_order
|
|
+ adr r3,.Lsha512_block_data_order
|
|
#endif
|
|
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
|
|
ldr r12,.LOPENSSL_armcap
|
|
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
|
|
index a50dc00d79a2..d0a05a3bdb96 100644
|
|
--- a/arch/arm/kernel/patch.c
|
|
+++ b/arch/arm/kernel/patch.c
|
|
@@ -16,7 +16,7 @@ struct patch {
|
|
unsigned int insn;
|
|
};
|
|
|
|
-static DEFINE_SPINLOCK(patch_lock);
|
|
+static DEFINE_RAW_SPINLOCK(patch_lock);
|
|
|
|
static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
|
|
__acquires(&patch_lock)
|
|
@@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
|
|
return addr;
|
|
|
|
if (flags)
|
|
- spin_lock_irqsave(&patch_lock, *flags);
|
|
+ raw_spin_lock_irqsave(&patch_lock, *flags);
|
|
else
|
|
__acquire(&patch_lock);
|
|
|
|
@@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
|
|
clear_fixmap(fixmap);
|
|
|
|
if (flags)
|
|
- spin_unlock_irqrestore(&patch_lock, *flags);
|
|
+ raw_spin_unlock_irqrestore(&patch_lock, *flags);
|
|
else
|
|
__release(&patch_lock);
|
|
}
|
|
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
|
|
index e8229b9fee4a..3265b8f86069 100644
|
|
--- a/arch/arm/plat-samsung/Kconfig
|
|
+++ b/arch/arm/plat-samsung/Kconfig
|
|
@@ -258,7 +258,7 @@ config S3C_PM_DEBUG_LED_SMDK
|
|
|
|
config SAMSUNG_PM_CHECK
|
|
bool "S3C2410 PM Suspend Memory CRC"
|
|
- depends on PM
|
|
+ depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210)
|
|
select CRC32
|
|
help
|
|
Enable the PM code's memory area checksum over sleep. This option
|
|
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
|
|
index 0f0b1b2f3b60..7caeae73348d 100644
|
|
--- a/arch/powerpc/kernel/rtasd.c
|
|
+++ b/arch/powerpc/kernel/rtasd.c
|
|
@@ -274,27 +274,16 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
|
|
}
|
|
|
|
#ifdef CONFIG_PPC_PSERIES
|
|
-static s32 prrn_update_scope;
|
|
-
|
|
-static void prrn_work_fn(struct work_struct *work)
|
|
+static void handle_prrn_event(s32 scope)
|
|
{
|
|
/*
|
|
* For PRRN, we must pass the negative of the scope value in
|
|
* the RTAS event.
|
|
*/
|
|
- pseries_devicetree_update(-prrn_update_scope);
|
|
+ pseries_devicetree_update(-scope);
|
|
numa_update_cpu_topology(false);
|
|
}
|
|
|
|
-static DECLARE_WORK(prrn_work, prrn_work_fn);
|
|
-
|
|
-static void prrn_schedule_update(u32 scope)
|
|
-{
|
|
- flush_work(&prrn_work);
|
|
- prrn_update_scope = scope;
|
|
- schedule_work(&prrn_work);
|
|
-}
|
|
-
|
|
static void handle_rtas_event(const struct rtas_error_log *log)
|
|
{
|
|
if (rtas_error_type(log) != RTAS_TYPE_PRRN || !prrn_is_enabled())
|
|
@@ -303,7 +292,7 @@ static void handle_rtas_event(const struct rtas_error_log *log)
|
|
/* For PRRN Events the extended log length is used to denote
|
|
* the scope for calling rtas update-nodes.
|
|
*/
|
|
- prrn_schedule_update(rtas_error_extended_log_length(log));
|
|
+ handle_prrn_event(rtas_error_extended_log_length(log));
|
|
}
|
|
|
|
#else
|
|
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
|
|
index 8949b7ae6d92..fa61c870ada9 100644
|
|
--- a/arch/x86/kernel/cpu/cyrix.c
|
|
+++ b/arch/x86/kernel/cpu/cyrix.c
|
|
@@ -124,7 +124,7 @@ static void set_cx86_reorder(void)
|
|
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
|
|
|
|
/* Load/Store Serialize to mem access disable (=reorder it) */
|
|
- setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
|
|
+ setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
|
|
/* set load/store serialize from 1GB to 4GB */
|
|
ccr3 |= 0xe0;
|
|
setCx86(CX86_CCR3, ccr3);
|
|
@@ -135,11 +135,11 @@ static void set_cx86_memwb(void)
|
|
pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
|
|
|
|
/* CCR2 bit 2: unlock NW bit */
|
|
- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
|
|
+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
|
|
/* set 'Not Write-through' */
|
|
write_cr0(read_cr0() | X86_CR0_NW);
|
|
/* CCR2 bit 2: lock NW bit and set WT1 */
|
|
- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
|
|
+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
|
|
}
|
|
|
|
/*
|
|
@@ -153,14 +153,14 @@ static void geode_configure(void)
|
|
local_irq_save(flags);
|
|
|
|
/* Suspend on halt power saving and enable #SUSP pin */
|
|
- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
|
|
+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
|
|
|
|
ccr3 = getCx86(CX86_CCR3);
|
|
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
|
|
|
|
|
|
/* FPU fast, DTE cache, Mem bypass */
|
|
- setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
|
|
+ setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
|
|
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
|
|
|
|
set_cx86_memwb();
|
|
@@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
|
|
/* GXm supports extended cpuid levels 'ala' AMD */
|
|
if (c->cpuid_level == 2) {
|
|
/* Enable cxMMX extensions (GX1 Datasheet 54) */
|
|
- setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
|
|
+ setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
|
|
|
|
/*
|
|
* GXm : 0x30 ... 0x5f GXm datasheet 51
|
|
@@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
|
|
if (dir1 > 7) {
|
|
dir0_msn++; /* M II */
|
|
/* Enable MMX extensions (App note 108) */
|
|
- setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
|
|
+ setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
|
|
} else {
|
|
/* A 6x86MX - it has the bug. */
|
|
set_cpu_bug(c, X86_BUG_COMA);
|
|
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
|
|
index afa1a204bc6d..df767e6de8dd 100644
|
|
--- a/arch/x86/kernel/hpet.c
|
|
+++ b/arch/x86/kernel/hpet.c
|
|
@@ -909,6 +909,8 @@ int __init hpet_enable(void)
|
|
return 0;
|
|
|
|
hpet_set_mapping();
|
|
+ if (!hpet_virt_address)
|
|
+ return 0;
|
|
|
|
/*
|
|
* Read the period and check for a sane value:
|
|
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
|
|
index 8771766d46b6..9954a604a822 100644
|
|
--- a/arch/x86/kernel/hw_breakpoint.c
|
|
+++ b/arch/x86/kernel/hw_breakpoint.c
|
|
@@ -352,6 +352,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
|
#endif
|
|
default:
|
|
WARN_ON_ONCE(1);
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
/*
|
|
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
|
|
index bc6bc6689e68..1c52acaa5bec 100644
|
|
--- a/arch/x86/kernel/mpparse.c
|
|
+++ b/arch/x86/kernel/mpparse.c
|
|
@@ -596,8 +596,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
|
|
mpf_base = base;
|
|
mpf_found = true;
|
|
|
|
- pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
|
|
- base, base + sizeof(*mpf) - 1, mpf);
|
|
+ pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
|
|
+ base, base + sizeof(*mpf) - 1);
|
|
|
|
memblock_reserve(base, sizeof(*mpf));
|
|
if (mpf->physptr)
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index 4bd878c9f7d2..90b7eee6d0f9 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -11846,24 +11846,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|
kvm_clear_interrupt_queue(vcpu);
|
|
}
|
|
|
|
-static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
|
|
- struct vmcs12 *vmcs12)
|
|
-{
|
|
- u32 entry_failure_code;
|
|
-
|
|
- nested_ept_uninit_mmu_context(vcpu);
|
|
-
|
|
- /*
|
|
- * Only PDPTE load can fail as the value of cr3 was checked on entry and
|
|
- * couldn't have changed.
|
|
- */
|
|
- if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
|
|
- nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
|
|
-
|
|
- if (!enable_ept)
|
|
- vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
|
|
-}
|
|
-
|
|
/*
|
|
* A part of what we need to when the nested L2 guest exits and we want to
|
|
* run its L1 parent, is to reset L1's guest state to the host state specified
|
|
@@ -11877,6 +11859,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
|
|
struct vmcs12 *vmcs12)
|
|
{
|
|
struct kvm_segment seg;
|
|
+ u32 entry_failure_code;
|
|
|
|
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
|
|
vcpu->arch.efer = vmcs12->host_ia32_efer;
|
|
@@ -11903,7 +11886,17 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
|
|
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
|
|
vmx_set_cr4(vcpu, vmcs12->host_cr4);
|
|
|
|
- load_vmcs12_mmu_host_state(vcpu, vmcs12);
|
|
+ nested_ept_uninit_mmu_context(vcpu);
|
|
+
|
|
+ /*
|
|
+ * Only PDPTE load can fail as the value of cr3 was checked on entry and
|
|
+ * couldn't have changed.
|
|
+ */
|
|
+ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
|
|
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
|
|
+
|
|
+ if (!enable_ept)
|
|
+ vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
|
|
|
|
if (enable_vpid) {
|
|
/*
|
|
@@ -11994,6 +11987,140 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
|
|
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
|
|
}
|
|
|
|
+static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
|
|
+{
|
|
+ struct shared_msr_entry *efer_msr;
|
|
+ unsigned int i;
|
|
+
|
|
+ if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
|
|
+ return vmcs_read64(GUEST_IA32_EFER);
|
|
+
|
|
+ if (cpu_has_load_ia32_efer)
|
|
+ return host_efer;
|
|
+
|
|
+ for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
|
|
+ if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
|
|
+ return vmx->msr_autoload.guest.val[i].value;
|
|
+ }
|
|
+
|
|
+ efer_msr = find_msr_entry(vmx, MSR_EFER);
|
|
+ if (efer_msr)
|
|
+ return efer_msr->data;
|
|
+
|
|
+ return host_efer;
|
|
+}
|
|
+
|
|
+static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
+ struct vmx_msr_entry g, h;
|
|
+ struct msr_data msr;
|
|
+ gpa_t gpa;
|
|
+ u32 i, j;
|
|
+
|
|
+ vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
|
|
+
|
|
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
|
|
+ /*
|
|
+ * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
|
|
+ * as vmcs01.GUEST_DR7 contains a userspace defined value
|
|
+ * and vcpu->arch.dr7 is not squirreled away before the
|
|
+ * nested VMENTER (not worth adding a variable in nested_vmx).
|
|
+ */
|
|
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
|
|
+ kvm_set_dr(vcpu, 7, DR7_FIXED_1);
|
|
+ else
|
|
+ WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Note that calling vmx_set_{efer,cr0,cr4} is important as they
|
|
+ * handle a variety of side effects to KVM's software model.
|
|
+ */
|
|
+ vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
|
|
+
|
|
+ vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
|
|
+ vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
|
|
+
|
|
+ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
|
|
+ vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
|
|
+
|
|
+ nested_ept_uninit_mmu_context(vcpu);
|
|
+ vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
|
|
+ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
|
|
+
|
|
+ /*
|
|
+ * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
|
|
+ * from vmcs01 (if necessary). The PDPTRs are not loaded on
|
|
+ * VMFail, like everything else we just need to ensure our
|
|
+ * software model is up-to-date.
|
|
+ */
|
|
+ ept_save_pdptrs(vcpu);
|
|
+
|
|
+ kvm_mmu_reset_context(vcpu);
|
|
+
|
|
+ if (cpu_has_vmx_msr_bitmap())
|
|
+ vmx_update_msr_bitmap(vcpu);
|
|
+
|
|
+ /*
|
|
+ * This nasty bit of open coding is a compromise between blindly
|
|
+ * loading L1's MSRs using the exit load lists (incorrect emulation
|
|
+ * of VMFail), leaving the nested VM's MSRs in the software model
|
|
+ * (incorrect behavior) and snapshotting the modified MSRs (too
|
|
+ * expensive since the lists are unbound by hardware). For each
|
|
+ * MSR that was (prematurely) loaded from the nested VMEntry load
|
|
+ * list, reload it from the exit load list if it exists and differs
|
|
+ * from the guest value. The intent is to stuff host state as
|
|
+ * silently as possible, not to fully process the exit load list.
|
|
+ */
|
|
+ msr.host_initiated = false;
|
|
+ for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
|
|
+ gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
|
|
+ if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
|
|
+ pr_debug_ratelimited(
|
|
+ "%s read MSR index failed (%u, 0x%08llx)\n",
|
|
+ __func__, i, gpa);
|
|
+ goto vmabort;
|
|
+ }
|
|
+
|
|
+ for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
|
|
+ gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
|
|
+ if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
|
|
+ pr_debug_ratelimited(
|
|
+ "%s read MSR failed (%u, 0x%08llx)\n",
|
|
+ __func__, j, gpa);
|
|
+ goto vmabort;
|
|
+ }
|
|
+ if (h.index != g.index)
|
|
+ continue;
|
|
+ if (h.value == g.value)
|
|
+ break;
|
|
+
|
|
+ if (nested_vmx_load_msr_check(vcpu, &h)) {
|
|
+ pr_debug_ratelimited(
|
|
+ "%s check failed (%u, 0x%x, 0x%x)\n",
|
|
+ __func__, j, h.index, h.reserved);
|
|
+ goto vmabort;
|
|
+ }
|
|
+
|
|
+ msr.index = h.index;
|
|
+ msr.data = h.value;
|
|
+ if (kvm_set_msr(vcpu, &msr)) {
|
|
+ pr_debug_ratelimited(
|
|
+ "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
|
|
+ __func__, j, h.index, h.value);
|
|
+ goto vmabort;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return;
|
|
+
|
|
+vmabort:
|
|
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
|
|
+}
|
|
+
|
|
/*
|
|
* Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
|
|
* and modify vmcs12 to make it see what it would expect to see there if
|
|
@@ -12126,7 +12253,13 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|
*/
|
|
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
|
|
- load_vmcs12_mmu_host_state(vcpu, vmcs12);
|
|
+ /*
|
|
+ * Restore L1's host state to KVM's software model. We're here
|
|
+ * because a consistency check was caught by hardware, which
|
|
+ * means some amount of guest state has been propagated to KVM's
|
|
+ * model and needs to be unwound to the host's state.
|
|
+ */
|
|
+ nested_vmx_restore_host_state(vcpu);
|
|
|
|
/*
|
|
* The emulated instruction was already skipped in
|
|
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
|
|
index 3d624c72c6c2..ebfc06f29f7b 100644
|
|
--- a/drivers/acpi/ec.c
|
|
+++ b/drivers/acpi/ec.c
|
|
@@ -194,6 +194,7 @@ static struct workqueue_struct *ec_query_wq;
|
|
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
|
|
static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
|
|
static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
|
|
+static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
|
|
|
|
/* --------------------------------------------------------------------------
|
|
* Logging/Debugging
|
|
@@ -499,6 +500,26 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
|
|
ec_log_drv("event blocked");
|
|
}
|
|
|
|
+/*
|
|
+ * Process _Q events that might have accumulated in the EC.
|
|
+ * Run with locked ec mutex.
|
|
+ */
|
|
+static void acpi_ec_clear(struct acpi_ec *ec)
|
|
+{
|
|
+ int i, status;
|
|
+ u8 value = 0;
|
|
+
|
|
+ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
|
|
+ status = acpi_ec_query(ec, &value);
|
|
+ if (status || !value)
|
|
+ break;
|
|
+ }
|
|
+ if (unlikely(i == ACPI_EC_CLEAR_MAX))
|
|
+ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
|
|
+ else
|
|
+ pr_info("%d stale EC events cleared\n", i);
|
|
+}
|
|
+
|
|
static void acpi_ec_enable_event(struct acpi_ec *ec)
|
|
{
|
|
unsigned long flags;
|
|
@@ -507,6 +528,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
|
|
if (acpi_ec_started(ec))
|
|
__acpi_ec_enable_event(ec);
|
|
spin_unlock_irqrestore(&ec->lock, flags);
|
|
+
|
|
+ /* Drain additional events if hardware requires that */
|
|
+ if (EC_FLAGS_CLEAR_ON_RESUME)
|
|
+ acpi_ec_clear(ec);
|
|
}
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
@@ -1802,6 +1827,31 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id)
|
|
}
|
|
#endif
|
|
|
|
+/*
|
|
+ * On some hardware it is necessary to clear events accumulated by the EC during
|
|
+ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
|
|
+ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
|
|
+ *
|
|
+ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
|
|
+ *
|
|
+ * Ideally, the EC should also be instructed NOT to accumulate events during
|
|
+ * sleep (which Windows seems to do somehow), but the interface to control this
|
|
+ * behaviour is not known at this time.
|
|
+ *
|
|
+ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
|
|
+ * however it is very likely that other Samsung models are affected.
|
|
+ *
|
|
+ * On systems which don't accumulate _Q events during sleep, this extra check
|
|
+ * should be harmless.
|
|
+ */
|
|
+static int ec_clear_on_resume(const struct dmi_system_id *id)
|
|
+{
|
|
+ pr_debug("Detected system needing EC poll on resume.\n");
|
|
+ EC_FLAGS_CLEAR_ON_RESUME = 1;
|
|
+ ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/*
|
|
* Some ECDTs contain wrong register addresses.
|
|
* MSI MS-171F
|
|
@@ -1851,6 +1901,9 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
|
|
ec_honor_ecdt_gpe, "ASUS X580VD", {
|
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
|
|
+ {
|
|
+ ec_clear_on_resume, "Samsung hardware", {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
|
|
{},
|
|
};
|
|
|
|
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
|
|
index a2428e9462dd..3c092f07d7e3 100644
|
|
--- a/drivers/acpi/sbs.c
|
|
+++ b/drivers/acpi/sbs.c
|
|
@@ -441,9 +441,13 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs)
|
|
|
|
/*
|
|
* The spec requires that bit 4 always be 1. If it's not set, assume
|
|
- * that the implementation doesn't support an SBS charger
|
|
+ * that the implementation doesn't support an SBS charger.
|
|
+ *
|
|
+ * And on some MacBooks a status of 0xffff is always returned, no
|
|
+ * matter whether the charger is plugged in or not, which is also
|
|
+ * wrong, so ignore the SBS charger for those too.
|
|
*/
|
|
- if (!((status >> 4) & 0x1))
|
|
+ if (!((status >> 4) & 0x1) || status == 0xffff)
|
|
return -ENODEV;
|
|
|
|
sbs->charger_present = (status >> 15) & 0x1;
|
|
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
|
|
index 036eec404289..2d927feb3db4 100644
|
|
--- a/drivers/auxdisplay/hd44780.c
|
|
+++ b/drivers/auxdisplay/hd44780.c
|
|
@@ -302,6 +302,8 @@ static int hd44780_remove(struct platform_device *pdev)
|
|
struct charlcd *lcd = platform_get_drvdata(pdev);
|
|
|
|
charlcd_unregister(lcd);
|
|
+
|
|
+ kfree(lcd);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/base/node.c b/drivers/base/node.c
|
|
index ee090ab9171c..5c39f14d15a5 100644
|
|
--- a/drivers/base/node.c
|
|
+++ b/drivers/base/node.c
|
|
@@ -197,11 +197,16 @@ static ssize_t node_read_vmstat(struct device *dev,
|
|
sum_zone_numa_state(nid, i));
|
|
#endif
|
|
|
|
- for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
|
|
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
|
|
+ /* Skip hidden vmstat items. */
|
|
+ if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
|
|
+ NR_VM_NUMA_STAT_ITEMS] == '\0')
|
|
+ continue;
|
|
n += sprintf(buf+n, "%s %lu\n",
|
|
vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
|
|
NR_VM_NUMA_STAT_ITEMS],
|
|
node_page_state(pgdat, i));
|
|
+ }
|
|
|
|
return n;
|
|
}
|
|
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
|
|
index 6eb5cb92b986..9f82e14983f6 100644
|
|
--- a/drivers/crypto/axis/artpec6_crypto.c
|
|
+++ b/drivers/crypto/axis/artpec6_crypto.c
|
|
@@ -284,6 +284,7 @@ enum artpec6_crypto_hash_flags {
|
|
|
|
struct artpec6_crypto_req_common {
|
|
struct list_head list;
|
|
+ struct list_head complete_in_progress;
|
|
struct artpec6_crypto_dma_descriptors *dma;
|
|
struct crypto_async_request *req;
|
|
void (*complete)(struct crypto_async_request *req);
|
|
@@ -2046,7 +2047,8 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
|
|
return artpec6_crypto_dma_map_descs(common);
|
|
}
|
|
|
|
-static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
|
|
+static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
|
|
+ struct list_head *completions)
|
|
{
|
|
struct artpec6_crypto_req_common *req;
|
|
|
|
@@ -2057,7 +2059,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
|
|
list_move_tail(&req->list, &ac->pending);
|
|
artpec6_crypto_start_dma(req);
|
|
|
|
- req->req->complete(req->req, -EINPROGRESS);
|
|
+ list_add_tail(&req->complete_in_progress, completions);
|
|
}
|
|
|
|
/*
|
|
@@ -2087,6 +2089,11 @@ static void artpec6_crypto_task(unsigned long data)
|
|
struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
|
|
struct artpec6_crypto_req_common *req;
|
|
struct artpec6_crypto_req_common *n;
|
|
+ struct list_head complete_done;
|
|
+ struct list_head complete_in_progress;
|
|
+
|
|
+ INIT_LIST_HEAD(&complete_done);
|
|
+ INIT_LIST_HEAD(&complete_in_progress);
|
|
|
|
if (list_empty(&ac->pending)) {
|
|
pr_debug("Spurious IRQ\n");
|
|
@@ -2120,19 +2127,30 @@ static void artpec6_crypto_task(unsigned long data)
|
|
|
|
pr_debug("Completing request %p\n", req);
|
|
|
|
- list_del(&req->list);
|
|
+ list_move_tail(&req->list, &complete_done);
|
|
|
|
artpec6_crypto_dma_unmap_all(req);
|
|
artpec6_crypto_copy_bounce_buffers(req);
|
|
|
|
ac->pending_count--;
|
|
artpec6_crypto_common_destroy(req);
|
|
- req->complete(req->req);
|
|
}
|
|
|
|
- artpec6_crypto_process_queue(ac);
|
|
+ artpec6_crypto_process_queue(ac, &complete_in_progress);
|
|
|
|
spin_unlock_bh(&ac->queue_lock);
|
|
+
|
|
+ /* Perform the completion callbacks without holding the queue lock
|
|
+ * to allow new request submissions from the callbacks.
|
|
+ */
|
|
+ list_for_each_entry_safe(req, n, &complete_done, list) {
|
|
+ req->complete(req->req);
|
|
+ }
|
|
+
|
|
+ list_for_each_entry_safe(req, n, &complete_in_progress,
|
|
+ complete_in_progress) {
|
|
+ req->req->complete(req->req, -EINPROGRESS);
|
|
+ }
|
|
}
|
|
|
|
static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
|
|
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
|
|
index 2943dfc4c470..822ad220f0af 100644
|
|
--- a/drivers/gpio/gpio-pxa.c
|
|
+++ b/drivers/gpio/gpio-pxa.c
|
|
@@ -776,6 +776,9 @@ static int pxa_gpio_suspend(void)
|
|
struct pxa_gpio_bank *c;
|
|
int gpio;
|
|
|
|
+ if (!pchip)
|
|
+ return 0;
|
|
+
|
|
for_each_gpio_bank(gpio, c, pchip) {
|
|
c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
|
|
c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
|
|
@@ -794,6 +797,9 @@ static void pxa_gpio_resume(void)
|
|
struct pxa_gpio_bank *c;
|
|
int gpio;
|
|
|
|
+ if (!pchip)
|
|
+ return;
|
|
+
|
|
for_each_gpio_bank(gpio, c, pchip) {
|
|
/* restore level with set/clear */
|
|
writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET);
|
|
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
|
|
index 164fa4b1f9a9..732b8fbbca68 100644
|
|
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
|
|
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
|
|
@@ -285,57 +285,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
|
|
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
|
|
struct queue_properties *q)
|
|
{
|
|
- uint64_t addr;
|
|
- struct cik_mqd *m;
|
|
- int retval;
|
|
-
|
|
- retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
|
|
- mqd_mem_obj);
|
|
-
|
|
- if (retval != 0)
|
|
- return -ENOMEM;
|
|
-
|
|
- m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
|
|
- addr = (*mqd_mem_obj)->gpu_addr;
|
|
-
|
|
- memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
|
|
-
|
|
- m->header = 0xC0310800;
|
|
- m->compute_pipelinestat_enable = 1;
|
|
- m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
|
|
- m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
|
|
- m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
|
|
- m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
|
|
-
|
|
- m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
|
|
- PRELOAD_REQ;
|
|
- m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
|
|
- QUANTUM_DURATION(10);
|
|
-
|
|
- m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
|
|
- m->cp_mqd_base_addr_lo = lower_32_bits(addr);
|
|
- m->cp_mqd_base_addr_hi = upper_32_bits(addr);
|
|
-
|
|
- m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
|
|
-
|
|
- /*
|
|
- * Pipe Priority
|
|
- * Identifies the pipe relative priority when this queue is connected
|
|
- * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
|
|
- * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
|
|
- * 0 = CS_LOW (typically below GFX)
|
|
- * 1 = CS_MEDIUM (typically between HP3D and GFX
|
|
- * 2 = CS_HIGH (typically above HP3D)
|
|
- */
|
|
- m->cp_hqd_pipe_priority = 1;
|
|
- m->cp_hqd_queue_priority = 15;
|
|
-
|
|
- *mqd = m;
|
|
- if (gart_addr)
|
|
- *gart_addr = addr;
|
|
- retval = mm->update_mqd(mm, m, q);
|
|
-
|
|
- return retval;
|
|
+ return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
|
|
}
|
|
|
|
static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
|
|
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
|
|
index 8a0f85f5fc1a..6a765682fbfa 100644
|
|
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
|
|
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
|
|
@@ -38,6 +38,7 @@ int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
|
|
|
|
int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
|
|
int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
|
|
+int gf117_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
|
|
int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
|
|
int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
|
|
int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
|
|
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
|
|
index 9109b69cd052..9635704a1d86 100644
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
|
|
@@ -161,7 +161,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
|
|
}
|
|
|
|
ret = pm_runtime_get_sync(drm->dev);
|
|
- if (IS_ERR_VALUE(ret) && ret != -EACCES)
|
|
+ if (ret < 0 && ret != -EACCES)
|
|
return ret;
|
|
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
|
|
pm_runtime_put_autosuspend(drm->dev);
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
|
|
index e096a5d9c292..f8dd78e21456 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
|
|
@@ -1612,7 +1612,7 @@ nvd7_chipset = {
|
|
.pci = gf106_pci_new,
|
|
.therm = gf119_therm_new,
|
|
.timer = nv41_timer_new,
|
|
- .volt = gf100_volt_new,
|
|
+ .volt = gf117_volt_new,
|
|
.ce[0] = gf100_ce_new,
|
|
.disp = gf119_disp_new,
|
|
.dma = gf119_dma_new,
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
|
|
index bcd179ba11d0..146adcdd316a 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
|
|
@@ -2,6 +2,7 @@ nvkm-y += nvkm/subdev/volt/base.o
|
|
nvkm-y += nvkm/subdev/volt/gpio.o
|
|
nvkm-y += nvkm/subdev/volt/nv40.o
|
|
nvkm-y += nvkm/subdev/volt/gf100.o
|
|
+nvkm-y += nvkm/subdev/volt/gf117.o
|
|
nvkm-y += nvkm/subdev/volt/gk104.o
|
|
nvkm-y += nvkm/subdev/volt/gk20a.o
|
|
nvkm-y += nvkm/subdev/volt/gm20b.o
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
|
|
new file mode 100644
|
|
index 000000000000..547a58f0aeac
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
|
|
@@ -0,0 +1,60 @@
|
|
+/*
|
|
+ * Copyright 2019 Ilia Mirkin
|
|
+ *
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining a
|
|
+ * copy of this software and associated documentation files (the "Software"),
|
|
+ * to deal in the Software without restriction, including without limitation
|
|
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
+ * and/or sell copies of the Software, and to permit persons to whom the
|
|
+ * Software is furnished to do so, subject to the following conditions:
|
|
+ *
|
|
+ * The above copyright notice and this permission notice shall be included in
|
|
+ * all copies or substantial portions of the Software.
|
|
+ *
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
+ * OTHER DEALINGS IN THE SOFTWARE.
|
|
+ *
|
|
+ * Authors: Ilia Mirkin
|
|
+ */
|
|
+#include "priv.h"
|
|
+
|
|
+#include <subdev/fuse.h>
|
|
+
|
|
+static int
|
|
+gf117_volt_speedo_read(struct nvkm_volt *volt)
|
|
+{
|
|
+ struct nvkm_device *device = volt->subdev.device;
|
|
+ struct nvkm_fuse *fuse = device->fuse;
|
|
+
|
|
+ if (!fuse)
|
|
+ return -EINVAL;
|
|
+
|
|
+ return nvkm_fuse_read(fuse, 0x3a8);
|
|
+}
|
|
+
|
|
+static const struct nvkm_volt_func
|
|
+gf117_volt = {
|
|
+ .oneinit = gf100_volt_oneinit,
|
|
+ .vid_get = nvkm_voltgpio_get,
|
|
+ .vid_set = nvkm_voltgpio_set,
|
|
+ .speedo_read = gf117_volt_speedo_read,
|
|
+};
|
|
+
|
|
+int
|
|
+gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
|
|
+{
|
|
+ struct nvkm_volt *volt;
|
|
+ int ret;
|
|
+
|
|
+ ret = nvkm_volt_new_(&gf117_volt, device, index, &volt);
|
|
+ *pvolt = volt;
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return nvkm_voltgpio_init(volt);
|
|
+}
|
|
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
|
|
index 6ba93449fcfb..58b67e0cc385 100644
|
|
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
|
|
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
|
|
@@ -40,7 +40,6 @@ static inline struct innolux_panel *to_innolux_panel(struct drm_panel *panel)
|
|
static int innolux_panel_disable(struct drm_panel *panel)
|
|
{
|
|
struct innolux_panel *innolux = to_innolux_panel(panel);
|
|
- int err;
|
|
|
|
if (!innolux->enabled)
|
|
return 0;
|
|
@@ -48,11 +47,6 @@ static int innolux_panel_disable(struct drm_panel *panel)
|
|
innolux->backlight->props.power = FB_BLANK_POWERDOWN;
|
|
backlight_update_status(innolux->backlight);
|
|
|
|
- err = mipi_dsi_dcs_set_display_off(innolux->link);
|
|
- if (err < 0)
|
|
- DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
|
|
- err);
|
|
-
|
|
innolux->enabled = false;
|
|
|
|
return 0;
|
|
@@ -66,6 +60,11 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
|
|
if (!innolux->prepared)
|
|
return 0;
|
|
|
|
+ err = mipi_dsi_dcs_set_display_off(innolux->link);
|
|
+ if (err < 0)
|
|
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
|
|
+ err);
|
|
+
|
|
err = mipi_dsi_dcs_enter_sleep_mode(innolux->link);
|
|
if (err < 0) {
|
|
DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
|
|
diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
|
|
index 832d8f9aaba2..099e1ce2f234 100644
|
|
--- a/drivers/hid/i2c-hid/Makefile
|
|
+++ b/drivers/hid/i2c-hid/Makefile
|
|
@@ -3,3 +3,6 @@
|
|
#
|
|
|
|
obj-$(CONFIG_I2C_HID) += i2c-hid.o
|
|
+
|
|
+i2c-hid-objs = i2c-hid-core.o
|
|
+i2c-hid-$(CONFIG_DMI) += i2c-hid-dmi-quirks.o
|
|
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
|
|
new file mode 100644
|
|
index 000000000000..7842d76aa813
|
|
--- /dev/null
|
|
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
|
|
@@ -0,0 +1,1299 @@
|
|
+/*
|
|
+ * HID over I2C protocol implementation
|
|
+ *
|
|
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
|
|
+ * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
|
|
+ * Copyright (c) 2012 Red Hat, Inc
|
|
+ *
|
|
+ * This code is partly based on "USB HID support for Linux":
|
|
+ *
|
|
+ * Copyright (c) 1999 Andreas Gal
|
|
+ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
|
|
+ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
|
|
+ * Copyright (c) 2007-2008 Oliver Neukum
|
|
+ * Copyright (c) 2006-2010 Jiri Kosina
|
|
+ *
|
|
+ * This file is subject to the terms and conditions of the GNU General Public
|
|
+ * License. See the file COPYING in the main directory of this archive for
|
|
+ * more details.
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/i2c.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/input.h>
|
|
+#include <linux/irq.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/pm.h>
|
|
+#include <linux/pm_runtime.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/wait.h>
|
|
+#include <linux/err.h>
|
|
+#include <linux/string.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/jiffies.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/hid.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/acpi.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/regulator/consumer.h>
|
|
+
|
|
+#include <linux/platform_data/i2c-hid.h>
|
|
+
|
|
+#include "../hid-ids.h"
|
|
+#include "i2c-hid.h"
|
|
+
|
|
+/* quirks to control the device */
|
|
+#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
|
|
+
|
|
+/* flags */
|
|
+#define I2C_HID_STARTED 0
|
|
+#define I2C_HID_RESET_PENDING 1
|
|
+#define I2C_HID_READ_PENDING 2
|
|
+
|
|
+#define I2C_HID_PWR_ON 0x00
|
|
+#define I2C_HID_PWR_SLEEP 0x01
|
|
+
|
|
+/* debug option */
|
|
+static bool debug;
|
|
+module_param(debug, bool, 0444);
|
|
+MODULE_PARM_DESC(debug, "print a lot of debug information");
|
|
+
|
|
+#define i2c_hid_dbg(ihid, fmt, arg...) \
|
|
+do { \
|
|
+ if (debug) \
|
|
+ dev_printk(KERN_DEBUG, &(ihid)->client->dev, fmt, ##arg); \
|
|
+} while (0)
|
|
+
|
|
+struct i2c_hid_desc {
|
|
+ __le16 wHIDDescLength;
|
|
+ __le16 bcdVersion;
|
|
+ __le16 wReportDescLength;
|
|
+ __le16 wReportDescRegister;
|
|
+ __le16 wInputRegister;
|
|
+ __le16 wMaxInputLength;
|
|
+ __le16 wOutputRegister;
|
|
+ __le16 wMaxOutputLength;
|
|
+ __le16 wCommandRegister;
|
|
+ __le16 wDataRegister;
|
|
+ __le16 wVendorID;
|
|
+ __le16 wProductID;
|
|
+ __le16 wVersionID;
|
|
+ __le32 reserved;
|
|
+} __packed;
|
|
+
|
|
+struct i2c_hid_cmd {
|
|
+ unsigned int registerIndex;
|
|
+ __u8 opcode;
|
|
+ unsigned int length;
|
|
+ bool wait;
|
|
+};
|
|
+
|
|
+union command {
|
|
+ u8 data[0];
|
|
+ struct cmd {
|
|
+ __le16 reg;
|
|
+ __u8 reportTypeID;
|
|
+ __u8 opcode;
|
|
+ } __packed c;
|
|
+};
|
|
+
|
|
+#define I2C_HID_CMD(opcode_) \
|
|
+ .opcode = opcode_, .length = 4, \
|
|
+ .registerIndex = offsetof(struct i2c_hid_desc, wCommandRegister)
|
|
+
|
|
+/* fetch HID descriptor */
|
|
+static const struct i2c_hid_cmd hid_descr_cmd = { .length = 2 };
|
|
+/* fetch report descriptors */
|
|
+static const struct i2c_hid_cmd hid_report_descr_cmd = {
|
|
+ .registerIndex = offsetof(struct i2c_hid_desc,
|
|
+ wReportDescRegister),
|
|
+ .opcode = 0x00,
|
|
+ .length = 2 };
|
|
+/* commands */
|
|
+static const struct i2c_hid_cmd hid_reset_cmd = { I2C_HID_CMD(0x01),
|
|
+ .wait = true };
|
|
+static const struct i2c_hid_cmd hid_get_report_cmd = { I2C_HID_CMD(0x02) };
|
|
+static const struct i2c_hid_cmd hid_set_report_cmd = { I2C_HID_CMD(0x03) };
|
|
+static const struct i2c_hid_cmd hid_set_power_cmd = { I2C_HID_CMD(0x08) };
|
|
+static const struct i2c_hid_cmd hid_no_cmd = { .length = 0 };
|
|
+
|
|
+/*
|
|
+ * These definitions are not used here, but are defined by the spec.
|
|
+ * Keeping them here for documentation purposes.
|
|
+ *
|
|
+ * static const struct i2c_hid_cmd hid_get_idle_cmd = { I2C_HID_CMD(0x04) };
|
|
+ * static const struct i2c_hid_cmd hid_set_idle_cmd = { I2C_HID_CMD(0x05) };
|
|
+ * static const struct i2c_hid_cmd hid_get_protocol_cmd = { I2C_HID_CMD(0x06) };
|
|
+ * static const struct i2c_hid_cmd hid_set_protocol_cmd = { I2C_HID_CMD(0x07) };
|
|
+ */
|
|
+
|
|
+static DEFINE_MUTEX(i2c_hid_open_mut);
|
|
+
|
|
+/* The main device structure */
|
|
+struct i2c_hid {
|
|
+ struct i2c_client *client; /* i2c client */
|
|
+ struct hid_device *hid; /* pointer to corresponding HID dev */
|
|
+ union {
|
|
+ __u8 hdesc_buffer[sizeof(struct i2c_hid_desc)];
|
|
+ struct i2c_hid_desc hdesc; /* the HID Descriptor */
|
|
+ };
|
|
+ __le16 wHIDDescRegister; /* location of the i2c
|
|
+ * register of the HID
|
|
+ * descriptor. */
|
|
+ unsigned int bufsize; /* i2c buffer size */
|
|
+ u8 *inbuf; /* Input buffer */
|
|
+ u8 *rawbuf; /* Raw Input buffer */
|
|
+ u8 *cmdbuf; /* Command buffer */
|
|
+ u8 *argsbuf; /* Command arguments buffer */
|
|
+
|
|
+ unsigned long flags; /* device flags */
|
|
+ unsigned long quirks; /* Various quirks */
|
|
+
|
|
+ wait_queue_head_t wait; /* For waiting the interrupt */
|
|
+
|
|
+ struct i2c_hid_platform_data pdata;
|
|
+
|
|
+ bool irq_wake_enabled;
|
|
+ struct mutex reset_lock;
|
|
+};
|
|
+
|
|
+static const struct i2c_hid_quirks {
|
|
+ __u16 idVendor;
|
|
+ __u16 idProduct;
|
|
+ __u32 quirks;
|
|
+} i2c_hid_quirks[] = {
|
|
+ { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752,
|
|
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
|
|
+ { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
|
|
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
|
|
+ { 0, 0 }
|
|
+};
|
|
+
|
|
+/*
|
|
+ * i2c_hid_lookup_quirk: return any quirks associated with a I2C HID device
|
|
+ * @idVendor: the 16-bit vendor ID
|
|
+ * @idProduct: the 16-bit product ID
|
|
+ *
|
|
+ * Returns: a u32 quirks value.
|
|
+ */
|
|
+static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
|
|
+{
|
|
+ u32 quirks = 0;
|
|
+ int n;
|
|
+
|
|
+ for (n = 0; i2c_hid_quirks[n].idVendor; n++)
|
|
+ if (i2c_hid_quirks[n].idVendor == idVendor &&
|
|
+ (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID ||
|
|
+ i2c_hid_quirks[n].idProduct == idProduct))
|
|
+ quirks = i2c_hid_quirks[n].quirks;
|
|
+
|
|
+ return quirks;
|
|
+}
|
|
+
|
|
+static int __i2c_hid_command(struct i2c_client *client,
|
|
+ const struct i2c_hid_cmd *command, u8 reportID,
|
|
+ u8 reportType, u8 *args, int args_len,
|
|
+ unsigned char *buf_recv, int data_len)
|
|
+{
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ union command *cmd = (union command *)ihid->cmdbuf;
|
|
+ int ret;
|
|
+ struct i2c_msg msg[2];
|
|
+ int msg_num = 1;
|
|
+
|
|
+ int length = command->length;
|
|
+ bool wait = command->wait;
|
|
+ unsigned int registerIndex = command->registerIndex;
|
|
+
|
|
+ /* special case for hid_descr_cmd */
|
|
+ if (command == &hid_descr_cmd) {
|
|
+ cmd->c.reg = ihid->wHIDDescRegister;
|
|
+ } else {
|
|
+ cmd->data[0] = ihid->hdesc_buffer[registerIndex];
|
|
+ cmd->data[1] = ihid->hdesc_buffer[registerIndex + 1];
|
|
+ }
|
|
+
|
|
+ if (length > 2) {
|
|
+ cmd->c.opcode = command->opcode;
|
|
+ cmd->c.reportTypeID = reportID | reportType << 4;
|
|
+ }
|
|
+
|
|
+ memcpy(cmd->data + length, args, args_len);
|
|
+ length += args_len;
|
|
+
|
|
+ i2c_hid_dbg(ihid, "%s: cmd=%*ph\n", __func__, length, cmd->data);
|
|
+
|
|
+ msg[0].addr = client->addr;
|
|
+ msg[0].flags = client->flags & I2C_M_TEN;
|
|
+ msg[0].len = length;
|
|
+ msg[0].buf = cmd->data;
|
|
+ if (data_len > 0) {
|
|
+ msg[1].addr = client->addr;
|
|
+ msg[1].flags = client->flags & I2C_M_TEN;
|
|
+ msg[1].flags |= I2C_M_RD;
|
|
+ msg[1].len = data_len;
|
|
+ msg[1].buf = buf_recv;
|
|
+ msg_num = 2;
|
|
+ set_bit(I2C_HID_READ_PENDING, &ihid->flags);
|
|
+ }
|
|
+
|
|
+ if (wait)
|
|
+ set_bit(I2C_HID_RESET_PENDING, &ihid->flags);
|
|
+
|
|
+ ret = i2c_transfer(client->adapter, msg, msg_num);
|
|
+
|
|
+ if (data_len > 0)
|
|
+ clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
|
|
+
|
|
+ if (ret != msg_num)
|
|
+ return ret < 0 ? ret : -EIO;
|
|
+
|
|
+ ret = 0;
|
|
+
|
|
+ if (wait) {
|
|
+ i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
|
|
+ if (!wait_event_timeout(ihid->wait,
|
|
+ !test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
|
|
+ msecs_to_jiffies(5000)))
|
|
+ ret = -ENODATA;
|
|
+ i2c_hid_dbg(ihid, "%s: finished.\n", __func__);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int i2c_hid_command(struct i2c_client *client,
|
|
+ const struct i2c_hid_cmd *command,
|
|
+ unsigned char *buf_recv, int data_len)
|
|
+{
|
|
+ return __i2c_hid_command(client, command, 0, 0, NULL, 0,
|
|
+ buf_recv, data_len);
|
|
+}
|
|
+
|
|
+static int i2c_hid_get_report(struct i2c_client *client, u8 reportType,
|
|
+ u8 reportID, unsigned char *buf_recv, int data_len)
|
|
+{
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ u8 args[3];
|
|
+ int ret;
|
|
+ int args_len = 0;
|
|
+ u16 readRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
|
|
+
|
|
+ i2c_hid_dbg(ihid, "%s\n", __func__);
|
|
+
|
|
+ if (reportID >= 0x0F) {
|
|
+ args[args_len++] = reportID;
|
|
+ reportID = 0x0F;
|
|
+ }
|
|
+
|
|
+ args[args_len++] = readRegister & 0xFF;
|
|
+ args[args_len++] = readRegister >> 8;
|
|
+
|
|
+ ret = __i2c_hid_command(client, &hid_get_report_cmd, reportID,
|
|
+ reportType, args, args_len, buf_recv, data_len);
|
|
+ if (ret) {
|
|
+ dev_err(&client->dev,
|
|
+ "failed to retrieve report from device.\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * i2c_hid_set_or_send_report: forward an incoming report to the device
|
|
+ * @client: the i2c_client of the device
|
|
+ * @reportType: 0x03 for HID_FEATURE_REPORT ; 0x02 for HID_OUTPUT_REPORT
|
|
+ * @reportID: the report ID
|
|
+ * @buf: the actual data to transfer, without the report ID
|
|
+ * @len: size of buf
|
|
+ * @use_data: true: use SET_REPORT HID command, false: send plain OUTPUT report
|
|
+ */
|
|
+static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
|
|
+ u8 reportID, unsigned char *buf, size_t data_len, bool use_data)
|
|
+{
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ u8 *args = ihid->argsbuf;
|
|
+ const struct i2c_hid_cmd *hidcmd;
|
|
+ int ret;
|
|
+ u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
|
|
+ u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
|
|
+ u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
|
|
+ u16 size;
|
|
+ int args_len;
|
|
+ int index = 0;
|
|
+
|
|
+ i2c_hid_dbg(ihid, "%s\n", __func__);
|
|
+
|
|
+ if (data_len > ihid->bufsize)
|
|
+ return -EINVAL;
|
|
+
|
|
+ size = 2 /* size */ +
|
|
+ (reportID ? 1 : 0) /* reportID */ +
|
|
+ data_len /* buf */;
|
|
+ args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
|
|
+ 2 /* dataRegister */ +
|
|
+ size /* args */;
|
|
+
|
|
+ if (!use_data && maxOutputLength == 0)
|
|
+ return -ENOSYS;
|
|
+
|
|
+ if (reportID >= 0x0F) {
|
|
+ args[index++] = reportID;
|
|
+ reportID = 0x0F;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * use the data register for feature reports or if the device does not
|
|
+ * support the output register
|
|
+ */
|
|
+ if (use_data) {
|
|
+ args[index++] = dataRegister & 0xFF;
|
|
+ args[index++] = dataRegister >> 8;
|
|
+ hidcmd = &hid_set_report_cmd;
|
|
+ } else {
|
|
+ args[index++] = outputRegister & 0xFF;
|
|
+ args[index++] = outputRegister >> 8;
|
|
+ hidcmd = &hid_no_cmd;
|
|
+ }
|
|
+
|
|
+ args[index++] = size & 0xFF;
|
|
+ args[index++] = size >> 8;
|
|
+
|
|
+ if (reportID)
|
|
+ args[index++] = reportID;
|
|
+
|
|
+ memcpy(&args[index], buf, data_len);
|
|
+
|
|
+ ret = __i2c_hid_command(client, hidcmd, reportID,
|
|
+ reportType, args, args_len, NULL, 0);
|
|
+ if (ret) {
|
|
+ dev_err(&client->dev, "failed to set a report to device.\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return data_len;
|
|
+}
|
|
+
|
|
+static int i2c_hid_set_power(struct i2c_client *client, int power_state)
|
|
+{
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ int ret;
|
|
+
|
|
+ i2c_hid_dbg(ihid, "%s\n", __func__);
|
|
+
|
|
+ /*
|
|
+ * Some devices require to send a command to wakeup before power on.
|
|
+ * The call will get a return value (EREMOTEIO) but device will be
|
|
+ * triggered and activated. After that, it goes like a normal device.
|
|
+ */
|
|
+ if (power_state == I2C_HID_PWR_ON &&
|
|
+ ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
|
|
+ ret = i2c_hid_command(client, &hid_set_power_cmd, NULL, 0);
|
|
+
|
|
+ /* Device was already activated */
|
|
+ if (!ret)
|
|
+ goto set_pwr_exit;
|
|
+ }
|
|
+
|
|
+ ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
|
|
+ 0, NULL, 0, NULL, 0);
|
|
+
|
|
+ if (ret)
|
|
+ dev_err(&client->dev, "failed to change power setting.\n");
|
|
+
|
|
+set_pwr_exit:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int i2c_hid_hwreset(struct i2c_client *client)
|
|
+{
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ int ret;
|
|
+
|
|
+ i2c_hid_dbg(ihid, "%s\n", __func__);
|
|
+
|
|
+ /*
|
|
+ * This prevents sending feature reports while the device is
|
|
+ * being reset. Otherwise we may lose the reset complete
|
|
+ * interrupt.
|
|
+ */
|
|
+ mutex_lock(&ihid->reset_lock);
|
|
+
|
|
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
|
|
+ if (ret)
|
|
+ goto out_unlock;
|
|
+
|
|
+ /*
|
|
+ * The HID over I2C specification states that if a DEVICE needs time
|
|
+ * after the PWR_ON request, it should utilise CLOCK stretching.
|
|
+ * However, it has been observered that the Windows driver provides a
|
|
+ * 1ms sleep between the PWR_ON and RESET requests and that some devices
|
|
+ * rely on this.
|
|
+ */
|
|
+ usleep_range(1000, 5000);
|
|
+
|
|
+ i2c_hid_dbg(ihid, "resetting...\n");
|
|
+
|
|
+ ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
|
|
+ if (ret) {
|
|
+ dev_err(&client->dev, "failed to reset device.\n");
|
|
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
|
|
+ }
|
|
+
|
|
+out_unlock:
|
|
+ mutex_unlock(&ihid->reset_lock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void i2c_hid_get_input(struct i2c_hid *ihid)
|
|
+{
|
|
+ int ret;
|
|
+ u32 ret_size;
|
|
+ int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
|
|
+
|
|
+ if (size > ihid->bufsize)
|
|
+ size = ihid->bufsize;
|
|
+
|
|
+ ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
|
|
+ if (ret != size) {
|
|
+ if (ret < 0)
|
|
+ return;
|
|
+
|
|
+ dev_err(&ihid->client->dev, "%s: got %d data instead of %d\n",
|
|
+ __func__, ret, size);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ret_size = ihid->inbuf[0] | ihid->inbuf[1] << 8;
|
|
+
|
|
+ if (!ret_size) {
|
|
+ /* host or device initiated RESET completed */
|
|
+ if (test_and_clear_bit(I2C_HID_RESET_PENDING, &ihid->flags))
|
|
+ wake_up(&ihid->wait);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if ((ret_size > size) || (ret_size < 2)) {
|
|
+ dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
|
|
+ __func__, size, ret_size);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
|
|
+
|
|
+ if (test_bit(I2C_HID_STARTED, &ihid->flags))
|
|
+ hid_input_report(ihid->hid, HID_INPUT_REPORT, ihid->inbuf + 2,
|
|
+ ret_size - 2, 1);
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
|
|
+{
|
|
+ struct i2c_hid *ihid = dev_id;
|
|
+
|
|
+ if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
|
|
+ return IRQ_HANDLED;
|
|
+
|
|
+ i2c_hid_get_input(ihid);
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static int i2c_hid_get_report_length(struct hid_report *report)
|
|
+{
|
|
+ return ((report->size - 1) >> 3) + 1 +
|
|
+ report->device->report_enum[report->type].numbered + 2;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Traverse the supplied list of reports and find the longest
|
|
+ */
|
|
+static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
|
|
+ unsigned int *max)
|
|
+{
|
|
+ struct hid_report *report;
|
|
+ unsigned int size;
|
|
+
|
|
+ /* We should not rely on wMaxInputLength, as some devices may set it to
|
|
+ * a wrong length. */
|
|
+ list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
|
|
+ size = i2c_hid_get_report_length(report);
|
|
+ if (*max < size)
|
|
+ *max = size;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void i2c_hid_free_buffers(struct i2c_hid *ihid)
|
|
+{
|
|
+ kfree(ihid->inbuf);
|
|
+ kfree(ihid->rawbuf);
|
|
+ kfree(ihid->argsbuf);
|
|
+ kfree(ihid->cmdbuf);
|
|
+ ihid->inbuf = NULL;
|
|
+ ihid->rawbuf = NULL;
|
|
+ ihid->cmdbuf = NULL;
|
|
+ ihid->argsbuf = NULL;
|
|
+ ihid->bufsize = 0;
|
|
+}
|
|
+
|
|
+static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
|
|
+{
|
|
+ /* the worst case is computed from the set_report command with a
|
|
+ * reportID > 15 and the maximum report length */
|
|
+ int args_len = sizeof(__u8) + /* ReportID */
|
|
+ sizeof(__u8) + /* optional ReportID byte */
|
|
+ sizeof(__u16) + /* data register */
|
|
+ sizeof(__u16) + /* size of the report */
|
|
+ report_size; /* report */
|
|
+
|
|
+ ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
|
|
+ ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
|
|
+ ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
|
|
+ ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
|
|
+
|
|
+ if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
|
|
+ i2c_hid_free_buffers(ihid);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ ihid->bufsize = report_size;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int i2c_hid_get_raw_report(struct hid_device *hid,
|
|
+ unsigned char report_number, __u8 *buf, size_t count,
|
|
+ unsigned char report_type)
|
|
+{
|
|
+ struct i2c_client *client = hid->driver_data;
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ size_t ret_count, ask_count;
|
|
+ int ret;
|
|
+
|
|
+ if (report_type == HID_OUTPUT_REPORT)
|
|
+ return -EINVAL;
|
|
+
|
|
+ /* +2 bytes to include the size of the reply in the query buffer */
|
|
+ ask_count = min(count + 2, (size_t)ihid->bufsize);
|
|
+
|
|
+ ret = i2c_hid_get_report(client,
|
|
+ report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
|
|
+ report_number, ihid->rawbuf, ask_count);
|
|
+
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
|
|
+
|
|
+ if (ret_count <= 2)
|
|
+ return 0;
|
|
+
|
|
+ ret_count = min(ret_count, ask_count);
|
|
+
|
|
+ /* The query buffer contains the size, dropping it in the reply */
|
|
+ count = min(count, ret_count - 2);
|
|
+ memcpy(buf, ihid->rawbuf + 2, count);
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
|
|
+ size_t count, unsigned char report_type, bool use_data)
|
|
+{
|
|
+ struct i2c_client *client = hid->driver_data;
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ int report_id = buf[0];
|
|
+ int ret;
|
|
+
|
|
+ if (report_type == HID_INPUT_REPORT)
|
|
+ return -EINVAL;
|
|
+
|
|
+ mutex_lock(&ihid->reset_lock);
|
|
+
|
|
+ if (report_id) {
|
|
+ buf++;
|
|
+ count--;
|
|
+ }
|
|
+
|
|
+ ret = i2c_hid_set_or_send_report(client,
|
|
+ report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
|
|
+ report_id, buf, count, use_data);
|
|
+
|
|
+ if (report_id && ret >= 0)
|
|
+ ret++; /* add report_id to the number of transfered bytes */
|
|
+
|
|
+ mutex_unlock(&ihid->reset_lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int i2c_hid_output_report(struct hid_device *hid, __u8 *buf,
|
|
+ size_t count)
|
|
+{
|
|
+ return i2c_hid_output_raw_report(hid, buf, count, HID_OUTPUT_REPORT,
|
|
+ false);
|
|
+}
|
|
+
|
|
+static int i2c_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
|
|
+ __u8 *buf, size_t len, unsigned char rtype,
|
|
+ int reqtype)
|
|
+{
|
|
+ switch (reqtype) {
|
|
+ case HID_REQ_GET_REPORT:
|
|
+ return i2c_hid_get_raw_report(hid, reportnum, buf, len, rtype);
|
|
+ case HID_REQ_SET_REPORT:
|
|
+ if (buf[0] != reportnum)
|
|
+ return -EINVAL;
|
|
+ return i2c_hid_output_raw_report(hid, buf, len, rtype, true);
|
|
+ default:
|
|
+ return -EIO;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int i2c_hid_parse(struct hid_device *hid)
|
|
+{
|
|
+ struct i2c_client *client = hid->driver_data;
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ struct i2c_hid_desc *hdesc = &ihid->hdesc;
|
|
+ unsigned int rsize;
|
|
+ char *rdesc;
|
|
+ int ret;
|
|
+ int tries = 3;
|
|
+ char *use_override;
|
|
+
|
|
+ i2c_hid_dbg(ihid, "entering %s\n", __func__);
|
|
+
|
|
+ rsize = le16_to_cpu(hdesc->wReportDescLength);
|
|
+ if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
|
|
+ dbg_hid("weird size of report descriptor (%u)\n", rsize);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ do {
|
|
+ ret = i2c_hid_hwreset(client);
|
|
+ if (ret)
|
|
+ msleep(1000);
|
|
+ } while (tries-- > 0 && ret);
|
|
+
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
|
|
+ &rsize);
|
|
+
|
|
+ if (use_override) {
|
|
+ rdesc = use_override;
|
|
+ i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
|
|
+ } else {
|
|
+ rdesc = kzalloc(rsize, GFP_KERNEL);
|
|
+
|
|
+ if (!rdesc) {
|
|
+ dbg_hid("couldn't allocate rdesc memory\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ i2c_hid_dbg(ihid, "asking HID report descriptor\n");
|
|
+
|
|
+ ret = i2c_hid_command(client, &hid_report_descr_cmd,
|
|
+ rdesc, rsize);
|
|
+ if (ret) {
|
|
+ hid_err(hid, "reading report descriptor failed\n");
|
|
+ kfree(rdesc);
|
|
+ return -EIO;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
|
|
+
|
|
+ ret = hid_parse_report(hid, rdesc, rsize);
|
|
+ if (!use_override)
|
|
+ kfree(rdesc);
|
|
+
|
|
+ if (ret) {
|
|
+ dbg_hid("parsing report descriptor failed\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int i2c_hid_start(struct hid_device *hid)
|
|
+{
|
|
+ struct i2c_client *client = hid->driver_data;
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ int ret;
|
|
+ unsigned int bufsize = HID_MIN_BUFFER_SIZE;
|
|
+
|
|
+ i2c_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
|
|
+ i2c_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
|
|
+ i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
|
|
+
|
|
+ if (bufsize > ihid->bufsize) {
|
|
+ disable_irq(client->irq);
|
|
+ i2c_hid_free_buffers(ihid);
|
|
+
|
|
+ ret = i2c_hid_alloc_buffers(ihid, bufsize);
|
|
+ enable_irq(client->irq);
|
|
+
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void i2c_hid_stop(struct hid_device *hid)
|
|
+{
|
|
+ hid->claimed = 0;
|
|
+}
|
|
+
|
|
+static int i2c_hid_open(struct hid_device *hid)
|
|
+{
|
|
+ struct i2c_client *client = hid->driver_data;
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ int ret = 0;
|
|
+
|
|
+ ret = pm_runtime_get_sync(&client->dev);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ set_bit(I2C_HID_STARTED, &ihid->flags);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void i2c_hid_close(struct hid_device *hid)
|
|
+{
|
|
+ struct i2c_client *client = hid->driver_data;
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+
|
|
+ clear_bit(I2C_HID_STARTED, &ihid->flags);
|
|
+
|
|
+ /* Save some power */
|
|
+ pm_runtime_put(&client->dev);
|
|
+}
|
|
+
|
|
+static int i2c_hid_power(struct hid_device *hid, int lvl)
|
|
+{
|
|
+ struct i2c_client *client = hid->driver_data;
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+
|
|
+ i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
|
|
+
|
|
+ switch (lvl) {
|
|
+ case PM_HINT_FULLON:
|
|
+ pm_runtime_get_sync(&client->dev);
|
|
+ break;
|
|
+ case PM_HINT_NORMAL:
|
|
+ pm_runtime_put(&client->dev);
|
|
+ break;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+struct hid_ll_driver i2c_hid_ll_driver = {
|
|
+ .parse = i2c_hid_parse,
|
|
+ .start = i2c_hid_start,
|
|
+ .stop = i2c_hid_stop,
|
|
+ .open = i2c_hid_open,
|
|
+ .close = i2c_hid_close,
|
|
+ .power = i2c_hid_power,
|
|
+ .output_report = i2c_hid_output_report,
|
|
+ .raw_request = i2c_hid_raw_request,
|
|
+};
|
|
+EXPORT_SYMBOL_GPL(i2c_hid_ll_driver);
|
|
+
|
|
+static int i2c_hid_init_irq(struct i2c_client *client)
|
|
+{
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ unsigned long irqflags = 0;
|
|
+ int ret;
|
|
+
|
|
+ dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
|
|
+
|
|
+ if (!irq_get_trigger_type(client->irq))
|
|
+ irqflags = IRQF_TRIGGER_LOW;
|
|
+
|
|
+ ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
|
|
+ irqflags | IRQF_ONESHOT, client->name, ihid);
|
|
+ if (ret < 0) {
|
|
+ dev_warn(&client->dev,
|
|
+ "Could not register for %s interrupt, irq = %d,"
|
|
+ " ret = %d\n",
|
|
+ client->name, client->irq, ret);
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
|
|
+{
|
|
+ struct i2c_client *client = ihid->client;
|
|
+ struct i2c_hid_desc *hdesc = &ihid->hdesc;
|
|
+ unsigned int dsize;
|
|
+ int ret;
|
|
+
|
|
+ /* i2c hid fetch using a fixed descriptor size (30 bytes) */
|
|
+ if (i2c_hid_get_dmi_i2c_hid_desc_override(client->name)) {
|
|
+ i2c_hid_dbg(ihid, "Using a HID descriptor override\n");
|
|
+ ihid->hdesc =
|
|
+ *i2c_hid_get_dmi_i2c_hid_desc_override(client->name);
|
|
+ } else {
|
|
+ i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
|
|
+ ret = i2c_hid_command(client, &hid_descr_cmd,
|
|
+ ihid->hdesc_buffer,
|
|
+ sizeof(struct i2c_hid_desc));
|
|
+ if (ret) {
|
|
+ dev_err(&client->dev, "hid_descr_cmd failed\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Validate the length of HID descriptor, the 4 first bytes:
|
|
+ * bytes 0-1 -> length
|
|
+ * bytes 2-3 -> bcdVersion (has to be 1.00) */
|
|
+ /* check bcdVersion == 1.0 */
|
|
+ if (le16_to_cpu(hdesc->bcdVersion) != 0x0100) {
|
|
+ dev_err(&client->dev,
|
|
+ "unexpected HID descriptor bcdVersion (0x%04hx)\n",
|
|
+ le16_to_cpu(hdesc->bcdVersion));
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ /* Descriptor length should be 30 bytes as per the specification */
|
|
+ dsize = le16_to_cpu(hdesc->wHIDDescLength);
|
|
+ if (dsize != sizeof(struct i2c_hid_desc)) {
|
|
+ dev_err(&client->dev, "weird size of HID descriptor (%u)\n",
|
|
+ dsize);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ i2c_hid_dbg(ihid, "HID Descriptor: %*ph\n", dsize, ihid->hdesc_buffer);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_ACPI
|
|
+static int i2c_hid_acpi_pdata(struct i2c_client *client,
|
|
+ struct i2c_hid_platform_data *pdata)
|
|
+{
|
|
+ static guid_t i2c_hid_guid =
|
|
+ GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
|
|
+ 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
|
|
+ union acpi_object *obj;
|
|
+ struct acpi_device *adev;
|
|
+ acpi_handle handle;
|
|
+
|
|
+ handle = ACPI_HANDLE(&client->dev);
|
|
+ if (!handle || acpi_bus_get_device(handle, &adev))
|
|
+ return -ENODEV;
|
|
+
|
|
+ obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
|
|
+ ACPI_TYPE_INTEGER);
|
|
+ if (!obj) {
|
|
+ dev_err(&client->dev, "device _DSM execution failed\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ pdata->hid_descriptor_address = obj->integer.value;
|
|
+ ACPI_FREE(obj);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void i2c_hid_acpi_fix_up_power(struct device *dev)
|
|
+{
|
|
+ acpi_handle handle = ACPI_HANDLE(dev);
|
|
+ struct acpi_device *adev;
|
|
+
|
|
+ if (handle && acpi_bus_get_device(handle, &adev) == 0)
|
|
+ acpi_device_fix_up_power(adev);
|
|
+}
|
|
+
|
|
+static const struct acpi_device_id i2c_hid_acpi_match[] = {
|
|
+ {"ACPI0C50", 0 },
|
|
+ {"PNP0C50", 0 },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
|
|
+#else
|
|
+static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
|
|
+ struct i2c_hid_platform_data *pdata)
|
|
+{
|
|
+ return -ENODEV;
|
|
+}
|
|
+
|
|
+static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_OF
|
|
+static int i2c_hid_of_probe(struct i2c_client *client,
|
|
+ struct i2c_hid_platform_data *pdata)
|
|
+{
|
|
+ struct device *dev = &client->dev;
|
|
+ u32 val;
|
|
+ int ret;
|
|
+
|
|
+ ret = of_property_read_u32(dev->of_node, "hid-descr-addr", &val);
|
|
+ if (ret) {
|
|
+ dev_err(&client->dev, "HID register address not provided\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ if (val >> 16) {
|
|
+ dev_err(&client->dev, "Bad HID register address: 0x%08x\n",
|
|
+ val);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ pdata->hid_descriptor_address = val;
|
|
+
|
|
+ ret = of_property_read_u32(dev->of_node, "post-power-on-delay-ms",
|
|
+ &val);
|
|
+ if (!ret)
|
|
+ pdata->post_power_delay_ms = val;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct of_device_id i2c_hid_of_match[] = {
|
|
+ { .compatible = "hid-over-i2c" },
|
|
+ {},
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
|
|
+#else
|
|
+static inline int i2c_hid_of_probe(struct i2c_client *client,
|
|
+ struct i2c_hid_platform_data *pdata)
|
|
+{
|
|
+ return -ENODEV;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int i2c_hid_probe(struct i2c_client *client,
|
|
+ const struct i2c_device_id *dev_id)
|
|
+{
|
|
+ int ret;
|
|
+ struct i2c_hid *ihid;
|
|
+ struct hid_device *hid;
|
|
+ __u16 hidRegister;
|
|
+ struct i2c_hid_platform_data *platform_data = client->dev.platform_data;
|
|
+
|
|
+ dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
|
|
+
|
|
+ if (!client->irq) {
|
|
+ dev_err(&client->dev,
|
|
+ "HID over i2c has not been provided an Int IRQ\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (client->irq < 0) {
|
|
+ if (client->irq != -EPROBE_DEFER)
|
|
+ dev_err(&client->dev,
|
|
+ "HID over i2c doesn't have a valid IRQ\n");
|
|
+ return client->irq;
|
|
+ }
|
|
+
|
|
+ ihid = kzalloc(sizeof(struct i2c_hid), GFP_KERNEL);
|
|
+ if (!ihid)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ if (client->dev.of_node) {
|
|
+ ret = i2c_hid_of_probe(client, &ihid->pdata);
|
|
+ if (ret)
|
|
+ goto err;
|
|
+ } else if (!platform_data) {
|
|
+ ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
|
|
+ if (ret) {
|
|
+ dev_err(&client->dev,
|
|
+ "HID register address not provided\n");
|
|
+ goto err;
|
|
+ }
|
|
+ } else {
|
|
+ ihid->pdata = *platform_data;
|
|
+ }
|
|
+
|
|
+ ihid->pdata.supply = devm_regulator_get(&client->dev, "vdd");
|
|
+ if (IS_ERR(ihid->pdata.supply)) {
|
|
+ ret = PTR_ERR(ihid->pdata.supply);
|
|
+ if (ret != -EPROBE_DEFER)
|
|
+ dev_err(&client->dev, "Failed to get regulator: %d\n",
|
|
+ ret);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ ret = regulator_enable(ihid->pdata.supply);
|
|
+ if (ret < 0) {
|
|
+ dev_err(&client->dev, "Failed to enable regulator: %d\n",
|
|
+ ret);
|
|
+ goto err;
|
|
+ }
|
|
+ if (ihid->pdata.post_power_delay_ms)
|
|
+ msleep(ihid->pdata.post_power_delay_ms);
|
|
+
|
|
+ i2c_set_clientdata(client, ihid);
|
|
+
|
|
+ ihid->client = client;
|
|
+
|
|
+ hidRegister = ihid->pdata.hid_descriptor_address;
|
|
+ ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
|
|
+
|
|
+ init_waitqueue_head(&ihid->wait);
|
|
+ mutex_init(&ihid->reset_lock);
|
|
+
|
|
+ /* we need to allocate the command buffer without knowing the maximum
|
|
+ * size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
|
|
+ * real computation later. */
|
|
+ ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE);
|
|
+ if (ret < 0)
|
|
+ goto err_regulator;
|
|
+
|
|
+ i2c_hid_acpi_fix_up_power(&client->dev);
|
|
+
|
|
+ pm_runtime_get_noresume(&client->dev);
|
|
+ pm_runtime_set_active(&client->dev);
|
|
+ pm_runtime_enable(&client->dev);
|
|
+ device_enable_async_suspend(&client->dev);
|
|
+
|
|
+ /* Make sure there is something at this address */
|
|
+ ret = i2c_smbus_read_byte(client);
|
|
+ if (ret < 0) {
|
|
+ dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
|
|
+ ret = -ENXIO;
|
|
+ goto err_pm;
|
|
+ }
|
|
+
|
|
+ ret = i2c_hid_fetch_hid_descriptor(ihid);
|
|
+ if (ret < 0)
|
|
+ goto err_pm;
|
|
+
|
|
+ ret = i2c_hid_init_irq(client);
|
|
+ if (ret < 0)
|
|
+ goto err_pm;
|
|
+
|
|
+ hid = hid_allocate_device();
|
|
+ if (IS_ERR(hid)) {
|
|
+ ret = PTR_ERR(hid);
|
|
+ goto err_irq;
|
|
+ }
|
|
+
|
|
+ ihid->hid = hid;
|
|
+
|
|
+ hid->driver_data = client;
|
|
+ hid->ll_driver = &i2c_hid_ll_driver;
|
|
+ hid->dev.parent = &client->dev;
|
|
+ hid->bus = BUS_I2C;
|
|
+ hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
|
|
+ hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
|
|
+ hid->product = le16_to_cpu(ihid->hdesc.wProductID);
|
|
+
|
|
+ snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
|
|
+ client->name, hid->vendor, hid->product);
|
|
+ strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
|
|
+
|
|
+ ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
|
|
+
|
|
+ ret = hid_add_device(hid);
|
|
+ if (ret) {
|
|
+ if (ret != -ENODEV)
|
|
+ hid_err(client, "can't add hid device: %d\n", ret);
|
|
+ goto err_mem_free;
|
|
+ }
|
|
+
|
|
+ pm_runtime_put(&client->dev);
|
|
+ return 0;
|
|
+
|
|
+err_mem_free:
|
|
+ hid_destroy_device(hid);
|
|
+
|
|
+err_irq:
|
|
+ free_irq(client->irq, ihid);
|
|
+
|
|
+err_pm:
|
|
+ pm_runtime_put_noidle(&client->dev);
|
|
+ pm_runtime_disable(&client->dev);
|
|
+
|
|
+err_regulator:
|
|
+ regulator_disable(ihid->pdata.supply);
|
|
+
|
|
+err:
|
|
+ i2c_hid_free_buffers(ihid);
|
|
+ kfree(ihid);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int i2c_hid_remove(struct i2c_client *client)
|
|
+{
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ struct hid_device *hid;
|
|
+
|
|
+ pm_runtime_get_sync(&client->dev);
|
|
+ pm_runtime_disable(&client->dev);
|
|
+ pm_runtime_set_suspended(&client->dev);
|
|
+ pm_runtime_put_noidle(&client->dev);
|
|
+
|
|
+ hid = ihid->hid;
|
|
+ hid_destroy_device(hid);
|
|
+
|
|
+ free_irq(client->irq, ihid);
|
|
+
|
|
+ if (ihid->bufsize)
|
|
+ i2c_hid_free_buffers(ihid);
|
|
+
|
|
+ regulator_disable(ihid->pdata.supply);
|
|
+
|
|
+ kfree(ihid);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void i2c_hid_shutdown(struct i2c_client *client)
|
|
+{
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+
|
|
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
|
|
+ free_irq(client->irq, ihid);
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+static int i2c_hid_suspend(struct device *dev)
|
|
+{
|
|
+ struct i2c_client *client = to_i2c_client(dev);
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ struct hid_device *hid = ihid->hid;
|
|
+ int ret;
|
|
+ int wake_status;
|
|
+
|
|
+ if (hid->driver && hid->driver->suspend) {
|
|
+ /*
|
|
+ * Wake up the device so that IO issues in
|
|
+ * HID driver's suspend code can succeed.
|
|
+ */
|
|
+ ret = pm_runtime_resume(dev);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = hid->driver->suspend(hid, PMSG_SUSPEND);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (!pm_runtime_suspended(dev)) {
|
|
+ /* Save some power */
|
|
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
|
|
+
|
|
+ disable_irq(client->irq);
|
|
+ }
|
|
+
|
|
+ if (device_may_wakeup(&client->dev)) {
|
|
+ wake_status = enable_irq_wake(client->irq);
|
|
+ if (!wake_status)
|
|
+ ihid->irq_wake_enabled = true;
|
|
+ else
|
|
+ hid_warn(hid, "Failed to enable irq wake: %d\n",
|
|
+ wake_status);
|
|
+ } else {
|
|
+ ret = regulator_disable(ihid->pdata.supply);
|
|
+ if (ret < 0)
|
|
+ hid_warn(hid, "Failed to disable supply: %d\n", ret);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int i2c_hid_resume(struct device *dev)
|
|
+{
|
|
+ int ret;
|
|
+ struct i2c_client *client = to_i2c_client(dev);
|
|
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
+ struct hid_device *hid = ihid->hid;
|
|
+ int wake_status;
|
|
+
|
|
+ if (!device_may_wakeup(&client->dev)) {
|
|
+ ret = regulator_enable(ihid->pdata.supply);
|
|
+ if (ret < 0)
|
|
+ hid_warn(hid, "Failed to enable supply: %d\n", ret);
|
|
+ if (ihid->pdata.post_power_delay_ms)
|
|
+ msleep(ihid->pdata.post_power_delay_ms);
|
|
+ } else if (ihid->irq_wake_enabled) {
|
|
+ wake_status = disable_irq_wake(client->irq);
|
|
+ if (!wake_status)
|
|
+ ihid->irq_wake_enabled = false;
|
|
+ else
|
|
+ hid_warn(hid, "Failed to disable irq wake: %d\n",
|
|
+ wake_status);
|
|
+ }
|
|
+
|
|
+ /* We'll resume to full power */
|
|
+ pm_runtime_disable(dev);
|
|
+ pm_runtime_set_active(dev);
|
|
+ pm_runtime_enable(dev);
|
|
+
|
|
+ enable_irq(client->irq);
|
|
+ ret = i2c_hid_hwreset(client);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (hid->driver && hid->driver->reset_resume) {
|
|
+ ret = hid->driver->reset_resume(hid);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+static int i2c_hid_runtime_suspend(struct device *dev)
|
|
+{
|
|
+ struct i2c_client *client = to_i2c_client(dev);
|
|
+
|
|
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
|
|
+ disable_irq(client->irq);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int i2c_hid_runtime_resume(struct device *dev)
|
|
+{
|
|
+ struct i2c_client *client = to_i2c_client(dev);
|
|
+
|
|
+ enable_irq(client->irq);
|
|
+ i2c_hid_set_power(client, I2C_HID_PWR_ON);
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static const struct dev_pm_ops i2c_hid_pm = {
|
|
+ SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
|
|
+ SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume,
|
|
+ NULL)
|
|
+};
|
|
+
|
|
+static const struct i2c_device_id i2c_hid_id_table[] = {
|
|
+ { "hid", 0 },
|
|
+ { "hid-over-i2c", 0 },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
|
|
+
|
|
+
|
|
+static struct i2c_driver i2c_hid_driver = {
|
|
+ .driver = {
|
|
+ .name = "i2c_hid",
|
|
+ .pm = &i2c_hid_pm,
|
|
+ .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
|
|
+ .of_match_table = of_match_ptr(i2c_hid_of_match),
|
|
+ },
|
|
+
|
|
+ .probe = i2c_hid_probe,
|
|
+ .remove = i2c_hid_remove,
|
|
+ .shutdown = i2c_hid_shutdown,
|
|
+ .id_table = i2c_hid_id_table,
|
|
+};
|
|
+
|
|
+module_i2c_driver(i2c_hid_driver);
|
|
+
|
|
+MODULE_DESCRIPTION("HID over I2C core driver");
|
|
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
|
|
new file mode 100644
|
|
index 000000000000..1d645c9ab417
|
|
--- /dev/null
|
|
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
|
|
@@ -0,0 +1,376 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
+/*
|
|
+ * Quirks for I2C-HID devices that do not supply proper descriptors
|
|
+ *
|
|
+ * Copyright (c) 2018 Julian Sax <jsbc@gmx.de>
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include <linux/types.h>
|
|
+#include <linux/dmi.h>
|
|
+#include <linux/mod_devicetable.h>
|
|
+
|
|
+#include "i2c-hid.h"
|
|
+
|
|
+
|
|
+struct i2c_hid_desc_override {
|
|
+ union {
|
|
+ struct i2c_hid_desc *i2c_hid_desc;
|
|
+ uint8_t *i2c_hid_desc_buffer;
|
|
+ };
|
|
+ uint8_t *hid_report_desc;
|
|
+ unsigned int hid_report_desc_size;
|
|
+ uint8_t *i2c_name;
|
|
+};
|
|
+
|
|
+
|
|
+/*
|
|
+ * descriptors for the SIPODEV SP1064 touchpad
|
|
+ *
|
|
+ * This device does not supply any descriptors and on windows a filter
|
|
+ * driver operates between the i2c-hid layer and the device and injects
|
|
+ * these descriptors when the device is prompted. The descriptors were
|
|
+ * extracted by listening to the i2c-hid traffic that occurs between the
|
|
+ * windows filter driver and the windows i2c-hid driver.
|
|
+ */
|
|
+
|
|
+static const struct i2c_hid_desc_override sipodev_desc = {
|
|
+ .i2c_hid_desc_buffer = (uint8_t [])
|
|
+ {0x1e, 0x00, /* Length of descriptor */
|
|
+ 0x00, 0x01, /* Version of descriptor */
|
|
+ 0xdb, 0x01, /* Length of report descriptor */
|
|
+ 0x21, 0x00, /* Location of report descriptor */
|
|
+ 0x24, 0x00, /* Location of input report */
|
|
+ 0x1b, 0x00, /* Max input report length */
|
|
+ 0x25, 0x00, /* Location of output report */
|
|
+ 0x11, 0x00, /* Max output report length */
|
|
+ 0x22, 0x00, /* Location of command register */
|
|
+ 0x23, 0x00, /* Location of data register */
|
|
+ 0x11, 0x09, /* Vendor ID */
|
|
+ 0x88, 0x52, /* Product ID */
|
|
+ 0x06, 0x00, /* Version ID */
|
|
+ 0x00, 0x00, 0x00, 0x00 /* Reserved */
|
|
+ },
|
|
+
|
|
+ .hid_report_desc = (uint8_t [])
|
|
+ {0x05, 0x01, /* Usage Page (Desktop), */
|
|
+ 0x09, 0x02, /* Usage (Mouse), */
|
|
+ 0xA1, 0x01, /* Collection (Application), */
|
|
+ 0x85, 0x01, /* Report ID (1), */
|
|
+ 0x09, 0x01, /* Usage (Pointer), */
|
|
+ 0xA1, 0x00, /* Collection (Physical), */
|
|
+ 0x05, 0x09, /* Usage Page (Button), */
|
|
+ 0x19, 0x01, /* Usage Minimum (01h), */
|
|
+ 0x29, 0x02, /* Usage Maximum (02h), */
|
|
+ 0x25, 0x01, /* Logical Maximum (1), */
|
|
+ 0x75, 0x01, /* Report Size (1), */
|
|
+ 0x95, 0x02, /* Report Count (2), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x95, 0x06, /* Report Count (6), */
|
|
+ 0x81, 0x01, /* Input (Constant), */
|
|
+ 0x05, 0x01, /* Usage Page (Desktop), */
|
|
+ 0x09, 0x30, /* Usage (X), */
|
|
+ 0x09, 0x31, /* Usage (Y), */
|
|
+ 0x15, 0x81, /* Logical Minimum (-127), */
|
|
+ 0x25, 0x7F, /* Logical Maximum (127), */
|
|
+ 0x75, 0x08, /* Report Size (8), */
|
|
+ 0x95, 0x02, /* Report Count (2), */
|
|
+ 0x81, 0x06, /* Input (Variable, Relative), */
|
|
+ 0xC0, /* End Collection, */
|
|
+ 0xC0, /* End Collection, */
|
|
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
|
|
+ 0x09, 0x05, /* Usage (Touchpad), */
|
|
+ 0xA1, 0x01, /* Collection (Application), */
|
|
+ 0x85, 0x04, /* Report ID (4), */
|
|
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
|
|
+ 0x09, 0x22, /* Usage (Finger), */
|
|
+ 0xA1, 0x02, /* Collection (Logical), */
|
|
+ 0x15, 0x00, /* Logical Minimum (0), */
|
|
+ 0x25, 0x01, /* Logical Maximum (1), */
|
|
+ 0x09, 0x47, /* Usage (Touch Valid), */
|
|
+ 0x09, 0x42, /* Usage (Tip Switch), */
|
|
+ 0x95, 0x02, /* Report Count (2), */
|
|
+ 0x75, 0x01, /* Report Size (1), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x95, 0x01, /* Report Count (1), */
|
|
+ 0x75, 0x03, /* Report Size (3), */
|
|
+ 0x25, 0x05, /* Logical Maximum (5), */
|
|
+ 0x09, 0x51, /* Usage (Contact Identifier), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x75, 0x01, /* Report Size (1), */
|
|
+ 0x95, 0x03, /* Report Count (3), */
|
|
+ 0x81, 0x03, /* Input (Constant, Variable), */
|
|
+ 0x05, 0x01, /* Usage Page (Desktop), */
|
|
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
|
|
+ 0x75, 0x10, /* Report Size (16), */
|
|
+ 0x55, 0x0E, /* Unit Exponent (14), */
|
|
+ 0x65, 0x11, /* Unit (Centimeter), */
|
|
+ 0x09, 0x30, /* Usage (X), */
|
|
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
|
|
+ 0x95, 0x01, /* Report Count (1), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
|
|
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
|
|
+ 0x09, 0x31, /* Usage (Y), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0xC0, /* End Collection, */
|
|
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
|
|
+ 0x09, 0x22, /* Usage (Finger), */
|
|
+ 0xA1, 0x02, /* Collection (Logical), */
|
|
+ 0x25, 0x01, /* Logical Maximum (1), */
|
|
+ 0x09, 0x47, /* Usage (Touch Valid), */
|
|
+ 0x09, 0x42, /* Usage (Tip Switch), */
|
|
+ 0x95, 0x02, /* Report Count (2), */
|
|
+ 0x75, 0x01, /* Report Size (1), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x95, 0x01, /* Report Count (1), */
|
|
+ 0x75, 0x03, /* Report Size (3), */
|
|
+ 0x25, 0x05, /* Logical Maximum (5), */
|
|
+ 0x09, 0x51, /* Usage (Contact Identifier), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x75, 0x01, /* Report Size (1), */
|
|
+ 0x95, 0x03, /* Report Count (3), */
|
|
+ 0x81, 0x03, /* Input (Constant, Variable), */
|
|
+ 0x05, 0x01, /* Usage Page (Desktop), */
|
|
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
|
|
+ 0x75, 0x10, /* Report Size (16), */
|
|
+ 0x09, 0x30, /* Usage (X), */
|
|
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
|
|
+ 0x95, 0x01, /* Report Count (1), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
|
|
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
|
|
+ 0x09, 0x31, /* Usage (Y), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0xC0, /* End Collection, */
|
|
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
|
|
+ 0x09, 0x22, /* Usage (Finger), */
|
|
+ 0xA1, 0x02, /* Collection (Logical), */
|
|
+ 0x25, 0x01, /* Logical Maximum (1), */
|
|
+ 0x09, 0x47, /* Usage (Touch Valid), */
|
|
+ 0x09, 0x42, /* Usage (Tip Switch), */
|
|
+ 0x95, 0x02, /* Report Count (2), */
|
|
+ 0x75, 0x01, /* Report Size (1), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x95, 0x01, /* Report Count (1), */
|
|
+ 0x75, 0x03, /* Report Size (3), */
|
|
+ 0x25, 0x05, /* Logical Maximum (5), */
|
|
+ 0x09, 0x51, /* Usage (Contact Identifier), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x75, 0x01, /* Report Size (1), */
|
|
+ 0x95, 0x03, /* Report Count (3), */
|
|
+ 0x81, 0x03, /* Input (Constant, Variable), */
|
|
+ 0x05, 0x01, /* Usage Page (Desktop), */
|
|
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
|
|
+ 0x75, 0x10, /* Report Size (16), */
|
|
+ 0x09, 0x30, /* Usage (X), */
|
|
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
|
|
+ 0x95, 0x01, /* Report Count (1), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
|
|
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
|
|
+ 0x09, 0x31, /* Usage (Y), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0xC0, /* End Collection, */
|
|
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
|
|
+ 0x09, 0x22, /* Usage (Finger), */
|
|
+ 0xA1, 0x02, /* Collection (Logical), */
|
|
+ 0x25, 0x01, /* Logical Maximum (1), */
|
|
+ 0x09, 0x47, /* Usage (Touch Valid), */
|
|
+ 0x09, 0x42, /* Usage (Tip Switch), */
|
|
+ 0x95, 0x02, /* Report Count (2), */
|
|
+ 0x75, 0x01, /* Report Size (1), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x95, 0x01, /* Report Count (1), */
|
|
+ 0x75, 0x03, /* Report Size (3), */
|
|
+ 0x25, 0x05, /* Logical Maximum (5), */
|
|
+ 0x09, 0x51, /* Usage (Contact Identifier), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x75, 0x01, /* Report Size (1), */
|
|
+ 0x95, 0x03, /* Report Count (3), */
|
|
+ 0x81, 0x03, /* Input (Constant, Variable), */
|
|
+ 0x05, 0x01, /* Usage Page (Desktop), */
|
|
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
|
|
+ 0x75, 0x10, /* Report Size (16), */
|
|
+ 0x09, 0x30, /* Usage (X), */
|
|
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
|
|
+ 0x95, 0x01, /* Report Count (1), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
|
|
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
|
|
+ 0x09, 0x31, /* Usage (Y), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0xC0, /* End Collection, */
|
|
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
|
|
+ 0x55, 0x0C, /* Unit Exponent (12), */
|
|
+ 0x66, 0x01, 0x10, /* Unit (Seconds), */
|
|
+ 0x47, 0xFF, 0xFF, 0x00, 0x00,/* Physical Maximum (65535), */
|
|
+ 0x27, 0xFF, 0xFF, 0x00, 0x00,/* Logical Maximum (65535), */
|
|
+ 0x75, 0x10, /* Report Size (16), */
|
|
+ 0x95, 0x01, /* Report Count (1), */
|
|
+ 0x09, 0x56, /* Usage (Scan Time), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x09, 0x54, /* Usage (Contact Count), */
|
|
+ 0x25, 0x7F, /* Logical Maximum (127), */
|
|
+ 0x75, 0x08, /* Report Size (8), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x05, 0x09, /* Usage Page (Button), */
|
|
+ 0x09, 0x01, /* Usage (01h), */
|
|
+ 0x25, 0x01, /* Logical Maximum (1), */
|
|
+ 0x75, 0x01, /* Report Size (1), */
|
|
+ 0x95, 0x01, /* Report Count (1), */
|
|
+ 0x81, 0x02, /* Input (Variable), */
|
|
+ 0x95, 0x07, /* Report Count (7), */
|
|
+ 0x81, 0x03, /* Input (Constant, Variable), */
|
|
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
|
|
+ 0x85, 0x02, /* Report ID (2), */
|
|
+ 0x09, 0x55, /* Usage (Contact Count Maximum), */
|
|
+ 0x09, 0x59, /* Usage (59h), */
|
|
+ 0x75, 0x04, /* Report Size (4), */
|
|
+ 0x95, 0x02, /* Report Count (2), */
|
|
+ 0x25, 0x0F, /* Logical Maximum (15), */
|
|
+ 0xB1, 0x02, /* Feature (Variable), */
|
|
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
|
|
+ 0x85, 0x07, /* Report ID (7), */
|
|
+ 0x09, 0x60, /* Usage (60h), */
|
|
+ 0x75, 0x01, /* Report Size (1), */
|
|
+ 0x95, 0x01, /* Report Count (1), */
|
|
+ 0x25, 0x01, /* Logical Maximum (1), */
|
|
+ 0xB1, 0x02, /* Feature (Variable), */
|
|
+ 0x95, 0x07, /* Report Count (7), */
|
|
+ 0xB1, 0x03, /* Feature (Constant, Variable), */
|
|
+ 0x85, 0x06, /* Report ID (6), */
|
|
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
|
|
+ 0x09, 0xC5, /* Usage (C5h), */
|
|
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
|
|
+ 0x75, 0x08, /* Report Size (8), */
|
|
+ 0x96, 0x00, 0x01, /* Report Count (256), */
|
|
+ 0xB1, 0x02, /* Feature (Variable), */
|
|
+ 0xC0, /* End Collection, */
|
|
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
|
|
+ 0x09, 0x01, /* Usage (01h), */
|
|
+ 0xA1, 0x01, /* Collection (Application), */
|
|
+ 0x85, 0x0D, /* Report ID (13), */
|
|
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
|
|
+ 0x19, 0x01, /* Usage Minimum (01h), */
|
|
+ 0x29, 0x02, /* Usage Maximum (02h), */
|
|
+ 0x75, 0x08, /* Report Size (8), */
|
|
+ 0x95, 0x02, /* Report Count (2), */
|
|
+ 0xB1, 0x02, /* Feature (Variable), */
|
|
+ 0xC0, /* End Collection, */
|
|
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
|
|
+ 0x09, 0x0E, /* Usage (Configuration), */
|
|
+ 0xA1, 0x01, /* Collection (Application), */
|
|
+ 0x85, 0x03, /* Report ID (3), */
|
|
+ 0x09, 0x22, /* Usage (Finger), */
|
|
+ 0xA1, 0x02, /* Collection (Logical), */
|
|
+ 0x09, 0x52, /* Usage (Device Mode), */
|
|
+ 0x25, 0x0A, /* Logical Maximum (10), */
|
|
+ 0x95, 0x01, /* Report Count (1), */
|
|
+ 0xB1, 0x02, /* Feature (Variable), */
|
|
+ 0xC0, /* End Collection, */
|
|
+ 0x09, 0x22, /* Usage (Finger), */
|
|
+ 0xA1, 0x00, /* Collection (Physical), */
|
|
+ 0x85, 0x05, /* Report ID (5), */
|
|
+ 0x09, 0x57, /* Usage (57h), */
|
|
+ 0x09, 0x58, /* Usage (58h), */
|
|
+ 0x75, 0x01, /* Report Size (1), */
|
|
+ 0x95, 0x02, /* Report Count (2), */
|
|
+ 0x25, 0x01, /* Logical Maximum (1), */
|
|
+ 0xB1, 0x02, /* Feature (Variable), */
|
|
+ 0x95, 0x06, /* Report Count (6), */
|
|
+ 0xB1, 0x03, /* Feature (Constant, Variable),*/
|
|
+ 0xC0, /* End Collection, */
|
|
+ 0xC0 /* End Collection */
|
|
+ },
|
|
+ .hid_report_desc_size = 475,
|
|
+ .i2c_name = "SYNA3602:00"
|
|
+};
|
|
+
|
|
+
|
|
+static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
|
|
+ {
|
|
+ .ident = "Teclast F6 Pro",
|
|
+ .matches = {
|
|
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
|
|
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F6 Pro"),
|
|
+ },
|
|
+ .driver_data = (void *)&sipodev_desc
|
|
+ },
|
|
+ {
|
|
+ .ident = "Teclast F7",
|
|
+ .matches = {
|
|
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
|
|
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F7"),
|
|
+ },
|
|
+ .driver_data = (void *)&sipodev_desc
|
|
+ },
|
|
+ {
|
|
+ .ident = "Trekstor Primebook C13",
|
|
+ .matches = {
|
|
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
|
|
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C13"),
|
|
+ },
|
|
+ .driver_data = (void *)&sipodev_desc
|
|
+ },
|
|
+ {
|
|
+ .ident = "Trekstor Primebook C11",
|
|
+ .matches = {
|
|
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
|
|
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11"),
|
|
+ },
|
|
+ .driver_data = (void *)&sipodev_desc
|
|
+ },
|
|
+ {
|
|
+ .ident = "Direkt-Tek DTLAPY116-2",
|
|
+ .matches = {
|
|
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
|
|
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY116-2"),
|
|
+ },
|
|
+ .driver_data = (void *)&sipodev_desc
|
|
+ },
|
|
+ {
|
|
+ .ident = "Mediacom Flexbook Edge 11",
|
|
+ .matches = {
|
|
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
|
|
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
|
|
+ },
|
|
+ .driver_data = (void *)&sipodev_desc
|
|
+ }
|
|
+};
|
|
+
|
|
+
|
|
+struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
|
|
+{
|
|
+ struct i2c_hid_desc_override *override;
|
|
+ const struct dmi_system_id *system_id;
|
|
+
|
|
+ system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
|
|
+ if (!system_id)
|
|
+ return NULL;
|
|
+
|
|
+ override = system_id->driver_data;
|
|
+ if (strcmp(override->i2c_name, i2c_name))
|
|
+ return NULL;
|
|
+
|
|
+ return override->i2c_hid_desc;
|
|
+}
|
|
+
|
|
+char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
|
|
+ unsigned int *size)
|
|
+{
|
|
+ struct i2c_hid_desc_override *override;
|
|
+ const struct dmi_system_id *system_id;
|
|
+
|
|
+ system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
|
|
+ if (!system_id)
|
|
+ return NULL;
|
|
+
|
|
+ override = system_id->driver_data;
|
|
+ if (strcmp(override->i2c_name, i2c_name))
|
|
+ return NULL;
|
|
+
|
|
+ *size = override->hid_report_desc_size;
|
|
+ return override->hid_report_desc;
|
|
+}
|
|
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
|
|
deleted file mode 100644
|
|
index 136a34dc31b8..000000000000
|
|
--- a/drivers/hid/i2c-hid/i2c-hid.c
|
|
+++ /dev/null
|
|
@@ -1,1279 +0,0 @@
|
|
-/*
|
|
- * HID over I2C protocol implementation
|
|
- *
|
|
- * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
|
|
- * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
|
|
- * Copyright (c) 2012 Red Hat, Inc
|
|
- *
|
|
- * This code is partly based on "USB HID support for Linux":
|
|
- *
|
|
- * Copyright (c) 1999 Andreas Gal
|
|
- * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
|
|
- * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
|
|
- * Copyright (c) 2007-2008 Oliver Neukum
|
|
- * Copyright (c) 2006-2010 Jiri Kosina
|
|
- *
|
|
- * This file is subject to the terms and conditions of the GNU General Public
|
|
- * License. See the file COPYING in the main directory of this archive for
|
|
- * more details.
|
|
- */
|
|
-
|
|
-#include <linux/module.h>
|
|
-#include <linux/i2c.h>
|
|
-#include <linux/interrupt.h>
|
|
-#include <linux/input.h>
|
|
-#include <linux/irq.h>
|
|
-#include <linux/delay.h>
|
|
-#include <linux/slab.h>
|
|
-#include <linux/pm.h>
|
|
-#include <linux/pm_runtime.h>
|
|
-#include <linux/device.h>
|
|
-#include <linux/wait.h>
|
|
-#include <linux/err.h>
|
|
-#include <linux/string.h>
|
|
-#include <linux/list.h>
|
|
-#include <linux/jiffies.h>
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/hid.h>
|
|
-#include <linux/mutex.h>
|
|
-#include <linux/acpi.h>
|
|
-#include <linux/of.h>
|
|
-#include <linux/regulator/consumer.h>
|
|
-
|
|
-#include <linux/platform_data/i2c-hid.h>
|
|
-
|
|
-#include "../hid-ids.h"
|
|
-
|
|
-/* quirks to control the device */
|
|
-#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
|
|
-
|
|
-/* flags */
|
|
-#define I2C_HID_STARTED 0
|
|
-#define I2C_HID_RESET_PENDING 1
|
|
-#define I2C_HID_READ_PENDING 2
|
|
-
|
|
-#define I2C_HID_PWR_ON 0x00
|
|
-#define I2C_HID_PWR_SLEEP 0x01
|
|
-
|
|
-/* debug option */
|
|
-static bool debug;
|
|
-module_param(debug, bool, 0444);
|
|
-MODULE_PARM_DESC(debug, "print a lot of debug information");
|
|
-
|
|
-#define i2c_hid_dbg(ihid, fmt, arg...) \
|
|
-do { \
|
|
- if (debug) \
|
|
- dev_printk(KERN_DEBUG, &(ihid)->client->dev, fmt, ##arg); \
|
|
-} while (0)
|
|
-
|
|
-struct i2c_hid_desc {
|
|
- __le16 wHIDDescLength;
|
|
- __le16 bcdVersion;
|
|
- __le16 wReportDescLength;
|
|
- __le16 wReportDescRegister;
|
|
- __le16 wInputRegister;
|
|
- __le16 wMaxInputLength;
|
|
- __le16 wOutputRegister;
|
|
- __le16 wMaxOutputLength;
|
|
- __le16 wCommandRegister;
|
|
- __le16 wDataRegister;
|
|
- __le16 wVendorID;
|
|
- __le16 wProductID;
|
|
- __le16 wVersionID;
|
|
- __le32 reserved;
|
|
-} __packed;
|
|
-
|
|
-struct i2c_hid_cmd {
|
|
- unsigned int registerIndex;
|
|
- __u8 opcode;
|
|
- unsigned int length;
|
|
- bool wait;
|
|
-};
|
|
-
|
|
-union command {
|
|
- u8 data[0];
|
|
- struct cmd {
|
|
- __le16 reg;
|
|
- __u8 reportTypeID;
|
|
- __u8 opcode;
|
|
- } __packed c;
|
|
-};
|
|
-
|
|
-#define I2C_HID_CMD(opcode_) \
|
|
- .opcode = opcode_, .length = 4, \
|
|
- .registerIndex = offsetof(struct i2c_hid_desc, wCommandRegister)
|
|
-
|
|
-/* fetch HID descriptor */
|
|
-static const struct i2c_hid_cmd hid_descr_cmd = { .length = 2 };
|
|
-/* fetch report descriptors */
|
|
-static const struct i2c_hid_cmd hid_report_descr_cmd = {
|
|
- .registerIndex = offsetof(struct i2c_hid_desc,
|
|
- wReportDescRegister),
|
|
- .opcode = 0x00,
|
|
- .length = 2 };
|
|
-/* commands */
|
|
-static const struct i2c_hid_cmd hid_reset_cmd = { I2C_HID_CMD(0x01),
|
|
- .wait = true };
|
|
-static const struct i2c_hid_cmd hid_get_report_cmd = { I2C_HID_CMD(0x02) };
|
|
-static const struct i2c_hid_cmd hid_set_report_cmd = { I2C_HID_CMD(0x03) };
|
|
-static const struct i2c_hid_cmd hid_set_power_cmd = { I2C_HID_CMD(0x08) };
|
|
-static const struct i2c_hid_cmd hid_no_cmd = { .length = 0 };
|
|
-
|
|
-/*
|
|
- * These definitions are not used here, but are defined by the spec.
|
|
- * Keeping them here for documentation purposes.
|
|
- *
|
|
- * static const struct i2c_hid_cmd hid_get_idle_cmd = { I2C_HID_CMD(0x04) };
|
|
- * static const struct i2c_hid_cmd hid_set_idle_cmd = { I2C_HID_CMD(0x05) };
|
|
- * static const struct i2c_hid_cmd hid_get_protocol_cmd = { I2C_HID_CMD(0x06) };
|
|
- * static const struct i2c_hid_cmd hid_set_protocol_cmd = { I2C_HID_CMD(0x07) };
|
|
- */
|
|
-
|
|
-static DEFINE_MUTEX(i2c_hid_open_mut);
|
|
-
|
|
-/* The main device structure */
|
|
-struct i2c_hid {
|
|
- struct i2c_client *client; /* i2c client */
|
|
- struct hid_device *hid; /* pointer to corresponding HID dev */
|
|
- union {
|
|
- __u8 hdesc_buffer[sizeof(struct i2c_hid_desc)];
|
|
- struct i2c_hid_desc hdesc; /* the HID Descriptor */
|
|
- };
|
|
- __le16 wHIDDescRegister; /* location of the i2c
|
|
- * register of the HID
|
|
- * descriptor. */
|
|
- unsigned int bufsize; /* i2c buffer size */
|
|
- u8 *inbuf; /* Input buffer */
|
|
- u8 *rawbuf; /* Raw Input buffer */
|
|
- u8 *cmdbuf; /* Command buffer */
|
|
- u8 *argsbuf; /* Command arguments buffer */
|
|
-
|
|
- unsigned long flags; /* device flags */
|
|
- unsigned long quirks; /* Various quirks */
|
|
-
|
|
- wait_queue_head_t wait; /* For waiting the interrupt */
|
|
-
|
|
- struct i2c_hid_platform_data pdata;
|
|
-
|
|
- bool irq_wake_enabled;
|
|
- struct mutex reset_lock;
|
|
-};
|
|
-
|
|
-static const struct i2c_hid_quirks {
|
|
- __u16 idVendor;
|
|
- __u16 idProduct;
|
|
- __u32 quirks;
|
|
-} i2c_hid_quirks[] = {
|
|
- { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752,
|
|
- I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
|
|
- { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
|
|
- I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
|
|
- { 0, 0 }
|
|
-};
|
|
-
|
|
-/*
|
|
- * i2c_hid_lookup_quirk: return any quirks associated with a I2C HID device
|
|
- * @idVendor: the 16-bit vendor ID
|
|
- * @idProduct: the 16-bit product ID
|
|
- *
|
|
- * Returns: a u32 quirks value.
|
|
- */
|
|
-static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
|
|
-{
|
|
- u32 quirks = 0;
|
|
- int n;
|
|
-
|
|
- for (n = 0; i2c_hid_quirks[n].idVendor; n++)
|
|
- if (i2c_hid_quirks[n].idVendor == idVendor &&
|
|
- (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID ||
|
|
- i2c_hid_quirks[n].idProduct == idProduct))
|
|
- quirks = i2c_hid_quirks[n].quirks;
|
|
-
|
|
- return quirks;
|
|
-}
|
|
-
|
|
-static int __i2c_hid_command(struct i2c_client *client,
|
|
- const struct i2c_hid_cmd *command, u8 reportID,
|
|
- u8 reportType, u8 *args, int args_len,
|
|
- unsigned char *buf_recv, int data_len)
|
|
-{
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- union command *cmd = (union command *)ihid->cmdbuf;
|
|
- int ret;
|
|
- struct i2c_msg msg[2];
|
|
- int msg_num = 1;
|
|
-
|
|
- int length = command->length;
|
|
- bool wait = command->wait;
|
|
- unsigned int registerIndex = command->registerIndex;
|
|
-
|
|
- /* special case for hid_descr_cmd */
|
|
- if (command == &hid_descr_cmd) {
|
|
- cmd->c.reg = ihid->wHIDDescRegister;
|
|
- } else {
|
|
- cmd->data[0] = ihid->hdesc_buffer[registerIndex];
|
|
- cmd->data[1] = ihid->hdesc_buffer[registerIndex + 1];
|
|
- }
|
|
-
|
|
- if (length > 2) {
|
|
- cmd->c.opcode = command->opcode;
|
|
- cmd->c.reportTypeID = reportID | reportType << 4;
|
|
- }
|
|
-
|
|
- memcpy(cmd->data + length, args, args_len);
|
|
- length += args_len;
|
|
-
|
|
- i2c_hid_dbg(ihid, "%s: cmd=%*ph\n", __func__, length, cmd->data);
|
|
-
|
|
- msg[0].addr = client->addr;
|
|
- msg[0].flags = client->flags & I2C_M_TEN;
|
|
- msg[0].len = length;
|
|
- msg[0].buf = cmd->data;
|
|
- if (data_len > 0) {
|
|
- msg[1].addr = client->addr;
|
|
- msg[1].flags = client->flags & I2C_M_TEN;
|
|
- msg[1].flags |= I2C_M_RD;
|
|
- msg[1].len = data_len;
|
|
- msg[1].buf = buf_recv;
|
|
- msg_num = 2;
|
|
- set_bit(I2C_HID_READ_PENDING, &ihid->flags);
|
|
- }
|
|
-
|
|
- if (wait)
|
|
- set_bit(I2C_HID_RESET_PENDING, &ihid->flags);
|
|
-
|
|
- ret = i2c_transfer(client->adapter, msg, msg_num);
|
|
-
|
|
- if (data_len > 0)
|
|
- clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
|
|
-
|
|
- if (ret != msg_num)
|
|
- return ret < 0 ? ret : -EIO;
|
|
-
|
|
- ret = 0;
|
|
-
|
|
- if (wait) {
|
|
- i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
|
|
- if (!wait_event_timeout(ihid->wait,
|
|
- !test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
|
|
- msecs_to_jiffies(5000)))
|
|
- ret = -ENODATA;
|
|
- i2c_hid_dbg(ihid, "%s: finished.\n", __func__);
|
|
- }
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int i2c_hid_command(struct i2c_client *client,
|
|
- const struct i2c_hid_cmd *command,
|
|
- unsigned char *buf_recv, int data_len)
|
|
-{
|
|
- return __i2c_hid_command(client, command, 0, 0, NULL, 0,
|
|
- buf_recv, data_len);
|
|
-}
|
|
-
|
|
-static int i2c_hid_get_report(struct i2c_client *client, u8 reportType,
|
|
- u8 reportID, unsigned char *buf_recv, int data_len)
|
|
-{
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- u8 args[3];
|
|
- int ret;
|
|
- int args_len = 0;
|
|
- u16 readRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
|
|
-
|
|
- i2c_hid_dbg(ihid, "%s\n", __func__);
|
|
-
|
|
- if (reportID >= 0x0F) {
|
|
- args[args_len++] = reportID;
|
|
- reportID = 0x0F;
|
|
- }
|
|
-
|
|
- args[args_len++] = readRegister & 0xFF;
|
|
- args[args_len++] = readRegister >> 8;
|
|
-
|
|
- ret = __i2c_hid_command(client, &hid_get_report_cmd, reportID,
|
|
- reportType, args, args_len, buf_recv, data_len);
|
|
- if (ret) {
|
|
- dev_err(&client->dev,
|
|
- "failed to retrieve report from device.\n");
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/**
|
|
- * i2c_hid_set_or_send_report: forward an incoming report to the device
|
|
- * @client: the i2c_client of the device
|
|
- * @reportType: 0x03 for HID_FEATURE_REPORT ; 0x02 for HID_OUTPUT_REPORT
|
|
- * @reportID: the report ID
|
|
- * @buf: the actual data to transfer, without the report ID
|
|
- * @len: size of buf
|
|
- * @use_data: true: use SET_REPORT HID command, false: send plain OUTPUT report
|
|
- */
|
|
-static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
|
|
- u8 reportID, unsigned char *buf, size_t data_len, bool use_data)
|
|
-{
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- u8 *args = ihid->argsbuf;
|
|
- const struct i2c_hid_cmd *hidcmd;
|
|
- int ret;
|
|
- u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
|
|
- u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
|
|
- u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
|
|
- u16 size;
|
|
- int args_len;
|
|
- int index = 0;
|
|
-
|
|
- i2c_hid_dbg(ihid, "%s\n", __func__);
|
|
-
|
|
- if (data_len > ihid->bufsize)
|
|
- return -EINVAL;
|
|
-
|
|
- size = 2 /* size */ +
|
|
- (reportID ? 1 : 0) /* reportID */ +
|
|
- data_len /* buf */;
|
|
- args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
|
|
- 2 /* dataRegister */ +
|
|
- size /* args */;
|
|
-
|
|
- if (!use_data && maxOutputLength == 0)
|
|
- return -ENOSYS;
|
|
-
|
|
- if (reportID >= 0x0F) {
|
|
- args[index++] = reportID;
|
|
- reportID = 0x0F;
|
|
- }
|
|
-
|
|
- /*
|
|
- * use the data register for feature reports or if the device does not
|
|
- * support the output register
|
|
- */
|
|
- if (use_data) {
|
|
- args[index++] = dataRegister & 0xFF;
|
|
- args[index++] = dataRegister >> 8;
|
|
- hidcmd = &hid_set_report_cmd;
|
|
- } else {
|
|
- args[index++] = outputRegister & 0xFF;
|
|
- args[index++] = outputRegister >> 8;
|
|
- hidcmd = &hid_no_cmd;
|
|
- }
|
|
-
|
|
- args[index++] = size & 0xFF;
|
|
- args[index++] = size >> 8;
|
|
-
|
|
- if (reportID)
|
|
- args[index++] = reportID;
|
|
-
|
|
- memcpy(&args[index], buf, data_len);
|
|
-
|
|
- ret = __i2c_hid_command(client, hidcmd, reportID,
|
|
- reportType, args, args_len, NULL, 0);
|
|
- if (ret) {
|
|
- dev_err(&client->dev, "failed to set a report to device.\n");
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return data_len;
|
|
-}
|
|
-
|
|
-static int i2c_hid_set_power(struct i2c_client *client, int power_state)
|
|
-{
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- int ret;
|
|
-
|
|
- i2c_hid_dbg(ihid, "%s\n", __func__);
|
|
-
|
|
- /*
|
|
- * Some devices require to send a command to wakeup before power on.
|
|
- * The call will get a return value (EREMOTEIO) but device will be
|
|
- * triggered and activated. After that, it goes like a normal device.
|
|
- */
|
|
- if (power_state == I2C_HID_PWR_ON &&
|
|
- ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
|
|
- ret = i2c_hid_command(client, &hid_set_power_cmd, NULL, 0);
|
|
-
|
|
- /* Device was already activated */
|
|
- if (!ret)
|
|
- goto set_pwr_exit;
|
|
- }
|
|
-
|
|
- ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
|
|
- 0, NULL, 0, NULL, 0);
|
|
-
|
|
- if (ret)
|
|
- dev_err(&client->dev, "failed to change power setting.\n");
|
|
-
|
|
-set_pwr_exit:
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int i2c_hid_hwreset(struct i2c_client *client)
|
|
-{
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- int ret;
|
|
-
|
|
- i2c_hid_dbg(ihid, "%s\n", __func__);
|
|
-
|
|
- /*
|
|
- * This prevents sending feature reports while the device is
|
|
- * being reset. Otherwise we may lose the reset complete
|
|
- * interrupt.
|
|
- */
|
|
- mutex_lock(&ihid->reset_lock);
|
|
-
|
|
- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
|
|
- if (ret)
|
|
- goto out_unlock;
|
|
-
|
|
- /*
|
|
- * The HID over I2C specification states that if a DEVICE needs time
|
|
- * after the PWR_ON request, it should utilise CLOCK stretching.
|
|
- * However, it has been observered that the Windows driver provides a
|
|
- * 1ms sleep between the PWR_ON and RESET requests and that some devices
|
|
- * rely on this.
|
|
- */
|
|
- usleep_range(1000, 5000);
|
|
-
|
|
- i2c_hid_dbg(ihid, "resetting...\n");
|
|
-
|
|
- ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
|
|
- if (ret) {
|
|
- dev_err(&client->dev, "failed to reset device.\n");
|
|
- i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
|
|
- }
|
|
-
|
|
-out_unlock:
|
|
- mutex_unlock(&ihid->reset_lock);
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static void i2c_hid_get_input(struct i2c_hid *ihid)
|
|
-{
|
|
- int ret;
|
|
- u32 ret_size;
|
|
- int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
|
|
-
|
|
- if (size > ihid->bufsize)
|
|
- size = ihid->bufsize;
|
|
-
|
|
- ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
|
|
- if (ret != size) {
|
|
- if (ret < 0)
|
|
- return;
|
|
-
|
|
- dev_err(&ihid->client->dev, "%s: got %d data instead of %d\n",
|
|
- __func__, ret, size);
|
|
- return;
|
|
- }
|
|
-
|
|
- ret_size = ihid->inbuf[0] | ihid->inbuf[1] << 8;
|
|
-
|
|
- if (!ret_size) {
|
|
- /* host or device initiated RESET completed */
|
|
- if (test_and_clear_bit(I2C_HID_RESET_PENDING, &ihid->flags))
|
|
- wake_up(&ihid->wait);
|
|
- return;
|
|
- }
|
|
-
|
|
- if ((ret_size > size) || (ret_size < 2)) {
|
|
- dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
|
|
- __func__, size, ret_size);
|
|
- return;
|
|
- }
|
|
-
|
|
- i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
|
|
-
|
|
- if (test_bit(I2C_HID_STARTED, &ihid->flags))
|
|
- hid_input_report(ihid->hid, HID_INPUT_REPORT, ihid->inbuf + 2,
|
|
- ret_size - 2, 1);
|
|
-
|
|
- return;
|
|
-}
|
|
-
|
|
-static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
|
|
-{
|
|
- struct i2c_hid *ihid = dev_id;
|
|
-
|
|
- if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
|
|
- return IRQ_HANDLED;
|
|
-
|
|
- i2c_hid_get_input(ihid);
|
|
-
|
|
- return IRQ_HANDLED;
|
|
-}
|
|
-
|
|
-static int i2c_hid_get_report_length(struct hid_report *report)
|
|
-{
|
|
- return ((report->size - 1) >> 3) + 1 +
|
|
- report->device->report_enum[report->type].numbered + 2;
|
|
-}
|
|
-
|
|
-/*
|
|
- * Traverse the supplied list of reports and find the longest
|
|
- */
|
|
-static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
|
|
- unsigned int *max)
|
|
-{
|
|
- struct hid_report *report;
|
|
- unsigned int size;
|
|
-
|
|
- /* We should not rely on wMaxInputLength, as some devices may set it to
|
|
- * a wrong length. */
|
|
- list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
|
|
- size = i2c_hid_get_report_length(report);
|
|
- if (*max < size)
|
|
- *max = size;
|
|
- }
|
|
-}
|
|
-
|
|
-static void i2c_hid_free_buffers(struct i2c_hid *ihid)
|
|
-{
|
|
- kfree(ihid->inbuf);
|
|
- kfree(ihid->rawbuf);
|
|
- kfree(ihid->argsbuf);
|
|
- kfree(ihid->cmdbuf);
|
|
- ihid->inbuf = NULL;
|
|
- ihid->rawbuf = NULL;
|
|
- ihid->cmdbuf = NULL;
|
|
- ihid->argsbuf = NULL;
|
|
- ihid->bufsize = 0;
|
|
-}
|
|
-
|
|
-static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
|
|
-{
|
|
- /* the worst case is computed from the set_report command with a
|
|
- * reportID > 15 and the maximum report length */
|
|
- int args_len = sizeof(__u8) + /* ReportID */
|
|
- sizeof(__u8) + /* optional ReportID byte */
|
|
- sizeof(__u16) + /* data register */
|
|
- sizeof(__u16) + /* size of the report */
|
|
- report_size; /* report */
|
|
-
|
|
- ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
|
|
- ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
|
|
- ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
|
|
- ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
|
|
-
|
|
- if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
|
|
- i2c_hid_free_buffers(ihid);
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
- ihid->bufsize = report_size;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int i2c_hid_get_raw_report(struct hid_device *hid,
|
|
- unsigned char report_number, __u8 *buf, size_t count,
|
|
- unsigned char report_type)
|
|
-{
|
|
- struct i2c_client *client = hid->driver_data;
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- size_t ret_count, ask_count;
|
|
- int ret;
|
|
-
|
|
- if (report_type == HID_OUTPUT_REPORT)
|
|
- return -EINVAL;
|
|
-
|
|
- /* +2 bytes to include the size of the reply in the query buffer */
|
|
- ask_count = min(count + 2, (size_t)ihid->bufsize);
|
|
-
|
|
- ret = i2c_hid_get_report(client,
|
|
- report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
|
|
- report_number, ihid->rawbuf, ask_count);
|
|
-
|
|
- if (ret < 0)
|
|
- return ret;
|
|
-
|
|
- ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
|
|
-
|
|
- if (ret_count <= 2)
|
|
- return 0;
|
|
-
|
|
- ret_count = min(ret_count, ask_count);
|
|
-
|
|
- /* The query buffer contains the size, dropping it in the reply */
|
|
- count = min(count, ret_count - 2);
|
|
- memcpy(buf, ihid->rawbuf + 2, count);
|
|
-
|
|
- return count;
|
|
-}
|
|
-
|
|
-static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
|
|
- size_t count, unsigned char report_type, bool use_data)
|
|
-{
|
|
- struct i2c_client *client = hid->driver_data;
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- int report_id = buf[0];
|
|
- int ret;
|
|
-
|
|
- if (report_type == HID_INPUT_REPORT)
|
|
- return -EINVAL;
|
|
-
|
|
- mutex_lock(&ihid->reset_lock);
|
|
-
|
|
- if (report_id) {
|
|
- buf++;
|
|
- count--;
|
|
- }
|
|
-
|
|
- ret = i2c_hid_set_or_send_report(client,
|
|
- report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
|
|
- report_id, buf, count, use_data);
|
|
-
|
|
- if (report_id && ret >= 0)
|
|
- ret++; /* add report_id to the number of transfered bytes */
|
|
-
|
|
- mutex_unlock(&ihid->reset_lock);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int i2c_hid_output_report(struct hid_device *hid, __u8 *buf,
|
|
- size_t count)
|
|
-{
|
|
- return i2c_hid_output_raw_report(hid, buf, count, HID_OUTPUT_REPORT,
|
|
- false);
|
|
-}
|
|
-
|
|
-static int i2c_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
|
|
- __u8 *buf, size_t len, unsigned char rtype,
|
|
- int reqtype)
|
|
-{
|
|
- switch (reqtype) {
|
|
- case HID_REQ_GET_REPORT:
|
|
- return i2c_hid_get_raw_report(hid, reportnum, buf, len, rtype);
|
|
- case HID_REQ_SET_REPORT:
|
|
- if (buf[0] != reportnum)
|
|
- return -EINVAL;
|
|
- return i2c_hid_output_raw_report(hid, buf, len, rtype, true);
|
|
- default:
|
|
- return -EIO;
|
|
- }
|
|
-}
|
|
-
|
|
-static int i2c_hid_parse(struct hid_device *hid)
|
|
-{
|
|
- struct i2c_client *client = hid->driver_data;
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- struct i2c_hid_desc *hdesc = &ihid->hdesc;
|
|
- unsigned int rsize;
|
|
- char *rdesc;
|
|
- int ret;
|
|
- int tries = 3;
|
|
-
|
|
- i2c_hid_dbg(ihid, "entering %s\n", __func__);
|
|
-
|
|
- rsize = le16_to_cpu(hdesc->wReportDescLength);
|
|
- if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
|
|
- dbg_hid("weird size of report descriptor (%u)\n", rsize);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- do {
|
|
- ret = i2c_hid_hwreset(client);
|
|
- if (ret)
|
|
- msleep(1000);
|
|
- } while (tries-- > 0 && ret);
|
|
-
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- rdesc = kzalloc(rsize, GFP_KERNEL);
|
|
-
|
|
- if (!rdesc) {
|
|
- dbg_hid("couldn't allocate rdesc memory\n");
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
- i2c_hid_dbg(ihid, "asking HID report descriptor\n");
|
|
-
|
|
- ret = i2c_hid_command(client, &hid_report_descr_cmd, rdesc, rsize);
|
|
- if (ret) {
|
|
- hid_err(hid, "reading report descriptor failed\n");
|
|
- kfree(rdesc);
|
|
- return -EIO;
|
|
- }
|
|
-
|
|
- i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
|
|
-
|
|
- ret = hid_parse_report(hid, rdesc, rsize);
|
|
- kfree(rdesc);
|
|
- if (ret) {
|
|
- dbg_hid("parsing report descriptor failed\n");
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int i2c_hid_start(struct hid_device *hid)
|
|
-{
|
|
- struct i2c_client *client = hid->driver_data;
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- int ret;
|
|
- unsigned int bufsize = HID_MIN_BUFFER_SIZE;
|
|
-
|
|
- i2c_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
|
|
- i2c_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
|
|
- i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
|
|
-
|
|
- if (bufsize > ihid->bufsize) {
|
|
- disable_irq(client->irq);
|
|
- i2c_hid_free_buffers(ihid);
|
|
-
|
|
- ret = i2c_hid_alloc_buffers(ihid, bufsize);
|
|
- enable_irq(client->irq);
|
|
-
|
|
- if (ret)
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void i2c_hid_stop(struct hid_device *hid)
|
|
-{
|
|
- hid->claimed = 0;
|
|
-}
|
|
-
|
|
-static int i2c_hid_open(struct hid_device *hid)
|
|
-{
|
|
- struct i2c_client *client = hid->driver_data;
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- int ret = 0;
|
|
-
|
|
- ret = pm_runtime_get_sync(&client->dev);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
-
|
|
- set_bit(I2C_HID_STARTED, &ihid->flags);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void i2c_hid_close(struct hid_device *hid)
|
|
-{
|
|
- struct i2c_client *client = hid->driver_data;
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
-
|
|
- clear_bit(I2C_HID_STARTED, &ihid->flags);
|
|
-
|
|
- /* Save some power */
|
|
- pm_runtime_put(&client->dev);
|
|
-}
|
|
-
|
|
-static int i2c_hid_power(struct hid_device *hid, int lvl)
|
|
-{
|
|
- struct i2c_client *client = hid->driver_data;
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
-
|
|
- i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
|
|
-
|
|
- switch (lvl) {
|
|
- case PM_HINT_FULLON:
|
|
- pm_runtime_get_sync(&client->dev);
|
|
- break;
|
|
- case PM_HINT_NORMAL:
|
|
- pm_runtime_put(&client->dev);
|
|
- break;
|
|
- }
|
|
- return 0;
|
|
-}
|
|
-
|
|
-struct hid_ll_driver i2c_hid_ll_driver = {
|
|
- .parse = i2c_hid_parse,
|
|
- .start = i2c_hid_start,
|
|
- .stop = i2c_hid_stop,
|
|
- .open = i2c_hid_open,
|
|
- .close = i2c_hid_close,
|
|
- .power = i2c_hid_power,
|
|
- .output_report = i2c_hid_output_report,
|
|
- .raw_request = i2c_hid_raw_request,
|
|
-};
|
|
-EXPORT_SYMBOL_GPL(i2c_hid_ll_driver);
|
|
-
|
|
-static int i2c_hid_init_irq(struct i2c_client *client)
|
|
-{
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- unsigned long irqflags = 0;
|
|
- int ret;
|
|
-
|
|
- dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
|
|
-
|
|
- if (!irq_get_trigger_type(client->irq))
|
|
- irqflags = IRQF_TRIGGER_LOW;
|
|
-
|
|
- ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
|
|
- irqflags | IRQF_ONESHOT, client->name, ihid);
|
|
- if (ret < 0) {
|
|
- dev_warn(&client->dev,
|
|
- "Could not register for %s interrupt, irq = %d,"
|
|
- " ret = %d\n",
|
|
- client->name, client->irq, ret);
|
|
-
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
|
|
-{
|
|
- struct i2c_client *client = ihid->client;
|
|
- struct i2c_hid_desc *hdesc = &ihid->hdesc;
|
|
- unsigned int dsize;
|
|
- int ret;
|
|
-
|
|
- /* i2c hid fetch using a fixed descriptor size (30 bytes) */
|
|
- i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
|
|
- ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer,
|
|
- sizeof(struct i2c_hid_desc));
|
|
- if (ret) {
|
|
- dev_err(&client->dev, "hid_descr_cmd failed\n");
|
|
- return -ENODEV;
|
|
- }
|
|
-
|
|
- /* Validate the length of HID descriptor, the 4 first bytes:
|
|
- * bytes 0-1 -> length
|
|
- * bytes 2-3 -> bcdVersion (has to be 1.00) */
|
|
- /* check bcdVersion == 1.0 */
|
|
- if (le16_to_cpu(hdesc->bcdVersion) != 0x0100) {
|
|
- dev_err(&client->dev,
|
|
- "unexpected HID descriptor bcdVersion (0x%04hx)\n",
|
|
- le16_to_cpu(hdesc->bcdVersion));
|
|
- return -ENODEV;
|
|
- }
|
|
-
|
|
- /* Descriptor length should be 30 bytes as per the specification */
|
|
- dsize = le16_to_cpu(hdesc->wHIDDescLength);
|
|
- if (dsize != sizeof(struct i2c_hid_desc)) {
|
|
- dev_err(&client->dev, "weird size of HID descriptor (%u)\n",
|
|
- dsize);
|
|
- return -ENODEV;
|
|
- }
|
|
- i2c_hid_dbg(ihid, "HID Descriptor: %*ph\n", dsize, ihid->hdesc_buffer);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-#ifdef CONFIG_ACPI
|
|
-static int i2c_hid_acpi_pdata(struct i2c_client *client,
|
|
- struct i2c_hid_platform_data *pdata)
|
|
-{
|
|
- static guid_t i2c_hid_guid =
|
|
- GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
|
|
- 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
|
|
- union acpi_object *obj;
|
|
- struct acpi_device *adev;
|
|
- acpi_handle handle;
|
|
-
|
|
- handle = ACPI_HANDLE(&client->dev);
|
|
- if (!handle || acpi_bus_get_device(handle, &adev))
|
|
- return -ENODEV;
|
|
-
|
|
- obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
|
|
- ACPI_TYPE_INTEGER);
|
|
- if (!obj) {
|
|
- dev_err(&client->dev, "device _DSM execution failed\n");
|
|
- return -ENODEV;
|
|
- }
|
|
-
|
|
- pdata->hid_descriptor_address = obj->integer.value;
|
|
- ACPI_FREE(obj);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void i2c_hid_acpi_fix_up_power(struct device *dev)
|
|
-{
|
|
- acpi_handle handle = ACPI_HANDLE(dev);
|
|
- struct acpi_device *adev;
|
|
-
|
|
- if (handle && acpi_bus_get_device(handle, &adev) == 0)
|
|
- acpi_device_fix_up_power(adev);
|
|
-}
|
|
-
|
|
-static const struct acpi_device_id i2c_hid_acpi_match[] = {
|
|
- {"ACPI0C50", 0 },
|
|
- {"PNP0C50", 0 },
|
|
- { },
|
|
-};
|
|
-MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
|
|
-#else
|
|
-static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
|
|
- struct i2c_hid_platform_data *pdata)
|
|
-{
|
|
- return -ENODEV;
|
|
-}
|
|
-
|
|
-static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
|
|
-#endif
|
|
-
|
|
-#ifdef CONFIG_OF
|
|
-static int i2c_hid_of_probe(struct i2c_client *client,
|
|
- struct i2c_hid_platform_data *pdata)
|
|
-{
|
|
- struct device *dev = &client->dev;
|
|
- u32 val;
|
|
- int ret;
|
|
-
|
|
- ret = of_property_read_u32(dev->of_node, "hid-descr-addr", &val);
|
|
- if (ret) {
|
|
- dev_err(&client->dev, "HID register address not provided\n");
|
|
- return -ENODEV;
|
|
- }
|
|
- if (val >> 16) {
|
|
- dev_err(&client->dev, "Bad HID register address: 0x%08x\n",
|
|
- val);
|
|
- return -EINVAL;
|
|
- }
|
|
- pdata->hid_descriptor_address = val;
|
|
-
|
|
- ret = of_property_read_u32(dev->of_node, "post-power-on-delay-ms",
|
|
- &val);
|
|
- if (!ret)
|
|
- pdata->post_power_delay_ms = val;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static const struct of_device_id i2c_hid_of_match[] = {
|
|
- { .compatible = "hid-over-i2c" },
|
|
- {},
|
|
-};
|
|
-MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
|
|
-#else
|
|
-static inline int i2c_hid_of_probe(struct i2c_client *client,
|
|
- struct i2c_hid_platform_data *pdata)
|
|
-{
|
|
- return -ENODEV;
|
|
-}
|
|
-#endif
|
|
-
|
|
-static int i2c_hid_probe(struct i2c_client *client,
|
|
- const struct i2c_device_id *dev_id)
|
|
-{
|
|
- int ret;
|
|
- struct i2c_hid *ihid;
|
|
- struct hid_device *hid;
|
|
- __u16 hidRegister;
|
|
- struct i2c_hid_platform_data *platform_data = client->dev.platform_data;
|
|
-
|
|
- dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
|
|
-
|
|
- if (!client->irq) {
|
|
- dev_err(&client->dev,
|
|
- "HID over i2c has not been provided an Int IRQ\n");
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (client->irq < 0) {
|
|
- if (client->irq != -EPROBE_DEFER)
|
|
- dev_err(&client->dev,
|
|
- "HID over i2c doesn't have a valid IRQ\n");
|
|
- return client->irq;
|
|
- }
|
|
-
|
|
- ihid = kzalloc(sizeof(struct i2c_hid), GFP_KERNEL);
|
|
- if (!ihid)
|
|
- return -ENOMEM;
|
|
-
|
|
- if (client->dev.of_node) {
|
|
- ret = i2c_hid_of_probe(client, &ihid->pdata);
|
|
- if (ret)
|
|
- goto err;
|
|
- } else if (!platform_data) {
|
|
- ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
|
|
- if (ret) {
|
|
- dev_err(&client->dev,
|
|
- "HID register address not provided\n");
|
|
- goto err;
|
|
- }
|
|
- } else {
|
|
- ihid->pdata = *platform_data;
|
|
- }
|
|
-
|
|
- ihid->pdata.supply = devm_regulator_get(&client->dev, "vdd");
|
|
- if (IS_ERR(ihid->pdata.supply)) {
|
|
- ret = PTR_ERR(ihid->pdata.supply);
|
|
- if (ret != -EPROBE_DEFER)
|
|
- dev_err(&client->dev, "Failed to get regulator: %d\n",
|
|
- ret);
|
|
- goto err;
|
|
- }
|
|
-
|
|
- ret = regulator_enable(ihid->pdata.supply);
|
|
- if (ret < 0) {
|
|
- dev_err(&client->dev, "Failed to enable regulator: %d\n",
|
|
- ret);
|
|
- goto err;
|
|
- }
|
|
- if (ihid->pdata.post_power_delay_ms)
|
|
- msleep(ihid->pdata.post_power_delay_ms);
|
|
-
|
|
- i2c_set_clientdata(client, ihid);
|
|
-
|
|
- ihid->client = client;
|
|
-
|
|
- hidRegister = ihid->pdata.hid_descriptor_address;
|
|
- ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
|
|
-
|
|
- init_waitqueue_head(&ihid->wait);
|
|
- mutex_init(&ihid->reset_lock);
|
|
-
|
|
- /* we need to allocate the command buffer without knowing the maximum
|
|
- * size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
|
|
- * real computation later. */
|
|
- ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE);
|
|
- if (ret < 0)
|
|
- goto err_regulator;
|
|
-
|
|
- i2c_hid_acpi_fix_up_power(&client->dev);
|
|
-
|
|
- pm_runtime_get_noresume(&client->dev);
|
|
- pm_runtime_set_active(&client->dev);
|
|
- pm_runtime_enable(&client->dev);
|
|
- device_enable_async_suspend(&client->dev);
|
|
-
|
|
- /* Make sure there is something at this address */
|
|
- ret = i2c_smbus_read_byte(client);
|
|
- if (ret < 0) {
|
|
- dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
|
|
- ret = -ENXIO;
|
|
- goto err_pm;
|
|
- }
|
|
-
|
|
- ret = i2c_hid_fetch_hid_descriptor(ihid);
|
|
- if (ret < 0)
|
|
- goto err_pm;
|
|
-
|
|
- ret = i2c_hid_init_irq(client);
|
|
- if (ret < 0)
|
|
- goto err_pm;
|
|
-
|
|
- hid = hid_allocate_device();
|
|
- if (IS_ERR(hid)) {
|
|
- ret = PTR_ERR(hid);
|
|
- goto err_irq;
|
|
- }
|
|
-
|
|
- ihid->hid = hid;
|
|
-
|
|
- hid->driver_data = client;
|
|
- hid->ll_driver = &i2c_hid_ll_driver;
|
|
- hid->dev.parent = &client->dev;
|
|
- hid->bus = BUS_I2C;
|
|
- hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
|
|
- hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
|
|
- hid->product = le16_to_cpu(ihid->hdesc.wProductID);
|
|
-
|
|
- snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
|
|
- client->name, hid->vendor, hid->product);
|
|
- strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
|
|
-
|
|
- ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
|
|
-
|
|
- ret = hid_add_device(hid);
|
|
- if (ret) {
|
|
- if (ret != -ENODEV)
|
|
- hid_err(client, "can't add hid device: %d\n", ret);
|
|
- goto err_mem_free;
|
|
- }
|
|
-
|
|
- pm_runtime_put(&client->dev);
|
|
- return 0;
|
|
-
|
|
-err_mem_free:
|
|
- hid_destroy_device(hid);
|
|
-
|
|
-err_irq:
|
|
- free_irq(client->irq, ihid);
|
|
-
|
|
-err_pm:
|
|
- pm_runtime_put_noidle(&client->dev);
|
|
- pm_runtime_disable(&client->dev);
|
|
-
|
|
-err_regulator:
|
|
- regulator_disable(ihid->pdata.supply);
|
|
-
|
|
-err:
|
|
- i2c_hid_free_buffers(ihid);
|
|
- kfree(ihid);
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int i2c_hid_remove(struct i2c_client *client)
|
|
-{
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- struct hid_device *hid;
|
|
-
|
|
- pm_runtime_get_sync(&client->dev);
|
|
- pm_runtime_disable(&client->dev);
|
|
- pm_runtime_set_suspended(&client->dev);
|
|
- pm_runtime_put_noidle(&client->dev);
|
|
-
|
|
- hid = ihid->hid;
|
|
- hid_destroy_device(hid);
|
|
-
|
|
- free_irq(client->irq, ihid);
|
|
-
|
|
- if (ihid->bufsize)
|
|
- i2c_hid_free_buffers(ihid);
|
|
-
|
|
- regulator_disable(ihid->pdata.supply);
|
|
-
|
|
- kfree(ihid);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void i2c_hid_shutdown(struct i2c_client *client)
|
|
-{
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
-
|
|
- i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
|
|
- free_irq(client->irq, ihid);
|
|
-}
|
|
-
|
|
-#ifdef CONFIG_PM_SLEEP
|
|
-static int i2c_hid_suspend(struct device *dev)
|
|
-{
|
|
- struct i2c_client *client = to_i2c_client(dev);
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- struct hid_device *hid = ihid->hid;
|
|
- int ret;
|
|
- int wake_status;
|
|
-
|
|
- if (hid->driver && hid->driver->suspend) {
|
|
- /*
|
|
- * Wake up the device so that IO issues in
|
|
- * HID driver's suspend code can succeed.
|
|
- */
|
|
- ret = pm_runtime_resume(dev);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
-
|
|
- ret = hid->driver->suspend(hid, PMSG_SUSPEND);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
- }
|
|
-
|
|
- if (!pm_runtime_suspended(dev)) {
|
|
- /* Save some power */
|
|
- i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
|
|
-
|
|
- disable_irq(client->irq);
|
|
- }
|
|
-
|
|
- if (device_may_wakeup(&client->dev)) {
|
|
- wake_status = enable_irq_wake(client->irq);
|
|
- if (!wake_status)
|
|
- ihid->irq_wake_enabled = true;
|
|
- else
|
|
- hid_warn(hid, "Failed to enable irq wake: %d\n",
|
|
- wake_status);
|
|
- } else {
|
|
- ret = regulator_disable(ihid->pdata.supply);
|
|
- if (ret < 0)
|
|
- hid_warn(hid, "Failed to disable supply: %d\n", ret);
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int i2c_hid_resume(struct device *dev)
|
|
-{
|
|
- int ret;
|
|
- struct i2c_client *client = to_i2c_client(dev);
|
|
- struct i2c_hid *ihid = i2c_get_clientdata(client);
|
|
- struct hid_device *hid = ihid->hid;
|
|
- int wake_status;
|
|
-
|
|
- if (!device_may_wakeup(&client->dev)) {
|
|
- ret = regulator_enable(ihid->pdata.supply);
|
|
- if (ret < 0)
|
|
- hid_warn(hid, "Failed to enable supply: %d\n", ret);
|
|
- if (ihid->pdata.post_power_delay_ms)
|
|
- msleep(ihid->pdata.post_power_delay_ms);
|
|
- } else if (ihid->irq_wake_enabled) {
|
|
- wake_status = disable_irq_wake(client->irq);
|
|
- if (!wake_status)
|
|
- ihid->irq_wake_enabled = false;
|
|
- else
|
|
- hid_warn(hid, "Failed to disable irq wake: %d\n",
|
|
- wake_status);
|
|
- }
|
|
-
|
|
- /* We'll resume to full power */
|
|
- pm_runtime_disable(dev);
|
|
- pm_runtime_set_active(dev);
|
|
- pm_runtime_enable(dev);
|
|
-
|
|
- enable_irq(client->irq);
|
|
- ret = i2c_hid_hwreset(client);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- if (hid->driver && hid->driver->reset_resume) {
|
|
- ret = hid->driver->reset_resume(hid);
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-#endif
|
|
-
|
|
-#ifdef CONFIG_PM
|
|
-static int i2c_hid_runtime_suspend(struct device *dev)
|
|
-{
|
|
- struct i2c_client *client = to_i2c_client(dev);
|
|
-
|
|
- i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
|
|
- disable_irq(client->irq);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int i2c_hid_runtime_resume(struct device *dev)
|
|
-{
|
|
- struct i2c_client *client = to_i2c_client(dev);
|
|
-
|
|
- enable_irq(client->irq);
|
|
- i2c_hid_set_power(client, I2C_HID_PWR_ON);
|
|
- return 0;
|
|
-}
|
|
-#endif
|
|
-
|
|
-static const struct dev_pm_ops i2c_hid_pm = {
|
|
- SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
|
|
- SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume,
|
|
- NULL)
|
|
-};
|
|
-
|
|
-static const struct i2c_device_id i2c_hid_id_table[] = {
|
|
- { "hid", 0 },
|
|
- { "hid-over-i2c", 0 },
|
|
- { },
|
|
-};
|
|
-MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
|
|
-
|
|
-
|
|
-static struct i2c_driver i2c_hid_driver = {
|
|
- .driver = {
|
|
- .name = "i2c_hid",
|
|
- .pm = &i2c_hid_pm,
|
|
- .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
|
|
- .of_match_table = of_match_ptr(i2c_hid_of_match),
|
|
- },
|
|
-
|
|
- .probe = i2c_hid_probe,
|
|
- .remove = i2c_hid_remove,
|
|
- .shutdown = i2c_hid_shutdown,
|
|
- .id_table = i2c_hid_id_table,
|
|
-};
|
|
-
|
|
-module_i2c_driver(i2c_hid_driver);
|
|
-
|
|
-MODULE_DESCRIPTION("HID over I2C core driver");
|
|
-MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
|
|
-MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
|
|
new file mode 100644
|
|
index 000000000000..a8c19aef5824
|
|
--- /dev/null
|
|
+++ b/drivers/hid/i2c-hid/i2c-hid.h
|
|
@@ -0,0 +1,20 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+
|
|
+#ifndef I2C_HID_H
|
|
+#define I2C_HID_H
|
|
+
|
|
+
|
|
+#ifdef CONFIG_DMI
|
|
+struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
|
|
+char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
|
|
+ unsigned int *size);
|
|
+#else
|
|
+static inline struct i2c_hid_desc
|
|
+ *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
|
|
+{ return NULL; }
|
|
+static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
|
|
+ unsigned int *size)
|
|
+{ return NULL; }
|
|
+#endif
|
|
+
|
|
+#endif
|
|
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
|
|
index 9cdb3fbc8c1f..2f6f46ea68e9 100644
|
|
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
|
|
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
|
|
@@ -680,6 +680,10 @@ static const struct amba_id debug_ids[] = {
|
|
.id = 0x000bbd08,
|
|
.mask = 0x000fffff,
|
|
},
|
|
+ { /* Debug for Cortex-A73 */
|
|
+ .id = 0x000bbd09,
|
|
+ .mask = 0x000fffff,
|
|
+ },
|
|
{ 0, 0 },
|
|
};
|
|
|
|
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
|
|
index 155b4dfc0ae8..baab9afa9174 100644
|
|
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
|
|
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
|
|
@@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
|
|
unsigned long flags;
|
|
|
|
for (i = 0 ; i < dev->num_ports; i++) {
|
|
- cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
|
|
det = &sriov->alias_guid.ports_guid[i];
|
|
+ cancel_delayed_work_sync(&det->alias_guid_work);
|
|
spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
|
|
while (!list_empty(&det->cb_list)) {
|
|
cb_ctx = list_entry(det->cb_list.next,
|
|
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
|
|
index c0d1c4db5794..38d0128b8135 100644
|
|
--- a/drivers/iommu/dmar.c
|
|
+++ b/drivers/iommu/dmar.c
|
|
@@ -144,7 +144,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
|
|
for (tmp = dev; tmp; tmp = tmp->bus->self)
|
|
level++;
|
|
|
|
- size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
|
|
+ size = sizeof(*info) + level * sizeof(info->path[0]);
|
|
if (size <= sizeof(dmar_pci_notify_info_buf)) {
|
|
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
|
|
} else {
|
|
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
|
|
index 802ba7b16e09..fe935293fa7b 100644
|
|
--- a/drivers/iommu/intel-iommu.c
|
|
+++ b/drivers/iommu/intel-iommu.c
|
|
@@ -1646,6 +1646,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
|
|
u32 pmen;
|
|
unsigned long flags;
|
|
|
|
+ if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
|
|
+ return;
|
|
+
|
|
raw_spin_lock_irqsave(&iommu->register_lock, flags);
|
|
pmen = readl(iommu->reg + DMAR_PMEN_REG);
|
|
pmen &= ~DMA_PMEN_EPM;
|
|
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
|
|
index 567b29c47608..98b6e1d4b1a6 100644
|
|
--- a/drivers/irqchip/irq-mbigen.c
|
|
+++ b/drivers/irqchip/irq-mbigen.c
|
|
@@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
|
|
void __iomem *base = d->chip_data;
|
|
u32 val;
|
|
|
|
+ if (!msg->address_lo && !msg->address_hi)
|
|
+ return;
|
|
+
|
|
base += get_mbigen_vec_reg(d->hwirq);
|
|
val = readl_relaxed(base);
|
|
|
|
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
|
|
index 0d2005e5b24c..94b8d81f6020 100644
|
|
--- a/drivers/md/dm-crypt.c
|
|
+++ b/drivers/md/dm-crypt.c
|
|
@@ -334,7 +334,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
|
|
|
|
sg_init_one(&sg, cc->key, cc->key_size);
|
|
ahash_request_set_tfm(req, essiv->hash_tfm);
|
|
- ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
|
|
+ ahash_request_set_callback(req, 0, NULL, NULL);
|
|
ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
|
|
|
|
err = crypto_ahash_digest(req);
|
|
@@ -609,7 +609,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
|
|
int i, r;
|
|
|
|
desc->tfm = lmk->hash_tfm;
|
|
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
+ desc->flags = 0;
|
|
|
|
r = crypto_shash_init(desc);
|
|
if (r)
|
|
@@ -771,7 +771,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
|
|
|
|
/* calculate crc32 for every 32bit part and xor it */
|
|
desc->tfm = tcw->crc32_tfm;
|
|
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
+ desc->flags = 0;
|
|
for (i = 0; i < 4; i++) {
|
|
r = crypto_shash_init(desc);
|
|
if (r)
|
|
@@ -1254,7 +1254,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
|
|
* requests if driver request queue is full.
|
|
*/
|
|
skcipher_request_set_callback(ctx->r.req,
|
|
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
|
|
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
|
|
}
|
|
|
|
@@ -1271,7 +1271,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
|
|
* requests if driver request queue is full.
|
|
*/
|
|
aead_request_set_callback(ctx->r.req_aead,
|
|
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
|
|
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
|
|
}
|
|
|
|
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
|
|
index da4baea9cf83..036379a23499 100644
|
|
--- a/drivers/md/dm-integrity.c
|
|
+++ b/drivers/md/dm-integrity.c
|
|
@@ -493,7 +493,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
|
|
unsigned j, size;
|
|
|
|
desc->tfm = ic->journal_mac;
|
|
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
+ desc->flags = 0;
|
|
|
|
r = crypto_shash_init(desc);
|
|
if (unlikely(r)) {
|
|
@@ -637,7 +637,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
|
|
static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
|
|
{
|
|
int r;
|
|
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
|
complete_journal_encrypt, comp);
|
|
if (likely(encrypt))
|
|
r = crypto_skcipher_encrypt(req);
|
|
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
|
|
index cd363a2100d4..257ae0d8cfe2 100644
|
|
--- a/drivers/media/usb/au0828/au0828-core.c
|
|
+++ b/drivers/media/usb/au0828/au0828-core.c
|
|
@@ -629,7 +629,6 @@ static int au0828_usb_probe(struct usb_interface *interface,
|
|
pr_err("%s() au0282_dev_register failed to register on V4L2\n",
|
|
__func__);
|
|
mutex_unlock(&dev->lock);
|
|
- kfree(dev);
|
|
goto done;
|
|
}
|
|
|
|
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
|
|
index 687a0dbbe199..614612325332 100644
|
|
--- a/drivers/misc/lkdtm.h
|
|
+++ b/drivers/misc/lkdtm.h
|
|
@@ -45,7 +45,9 @@ void lkdtm_EXEC_KMALLOC(void);
|
|
void lkdtm_EXEC_VMALLOC(void);
|
|
void lkdtm_EXEC_RODATA(void);
|
|
void lkdtm_EXEC_USERSPACE(void);
|
|
+void lkdtm_EXEC_NULL(void);
|
|
void lkdtm_ACCESS_USERSPACE(void);
|
|
+void lkdtm_ACCESS_NULL(void);
|
|
|
|
/* lkdtm_refcount.c */
|
|
void lkdtm_REFCOUNT_INC_OVERFLOW(void);
|
|
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
|
|
index 981b3ef71e47..199271708aed 100644
|
|
--- a/drivers/misc/lkdtm_core.c
|
|
+++ b/drivers/misc/lkdtm_core.c
|
|
@@ -220,7 +220,9 @@ struct crashtype crashtypes[] = {
|
|
CRASHTYPE(EXEC_VMALLOC),
|
|
CRASHTYPE(EXEC_RODATA),
|
|
CRASHTYPE(EXEC_USERSPACE),
|
|
+ CRASHTYPE(EXEC_NULL),
|
|
CRASHTYPE(ACCESS_USERSPACE),
|
|
+ CRASHTYPE(ACCESS_NULL),
|
|
CRASHTYPE(WRITE_RO),
|
|
CRASHTYPE(WRITE_RO_AFTER_INIT),
|
|
CRASHTYPE(WRITE_KERN),
|
|
diff --git a/drivers/misc/lkdtm_perms.c b/drivers/misc/lkdtm_perms.c
|
|
index 53b85c9d16b8..62f76d506f04 100644
|
|
--- a/drivers/misc/lkdtm_perms.c
|
|
+++ b/drivers/misc/lkdtm_perms.c
|
|
@@ -47,7 +47,7 @@ static noinline void execute_location(void *dst, bool write)
|
|
{
|
|
void (*func)(void) = dst;
|
|
|
|
- pr_info("attempting ok execution at %p\n", do_nothing);
|
|
+ pr_info("attempting ok execution at %px\n", do_nothing);
|
|
do_nothing();
|
|
|
|
if (write == CODE_WRITE) {
|
|
@@ -55,7 +55,7 @@ static noinline void execute_location(void *dst, bool write)
|
|
flush_icache_range((unsigned long)dst,
|
|
(unsigned long)dst + EXEC_SIZE);
|
|
}
|
|
- pr_info("attempting bad execution at %p\n", func);
|
|
+ pr_info("attempting bad execution at %px\n", func);
|
|
func();
|
|
}
|
|
|
|
@@ -66,14 +66,14 @@ static void execute_user_location(void *dst)
|
|
/* Intentionally crossing kernel/user memory boundary. */
|
|
void (*func)(void) = dst;
|
|
|
|
- pr_info("attempting ok execution at %p\n", do_nothing);
|
|
+ pr_info("attempting ok execution at %px\n", do_nothing);
|
|
do_nothing();
|
|
|
|
copied = access_process_vm(current, (unsigned long)dst, do_nothing,
|
|
EXEC_SIZE, FOLL_WRITE);
|
|
if (copied < EXEC_SIZE)
|
|
return;
|
|
- pr_info("attempting bad execution at %p\n", func);
|
|
+ pr_info("attempting bad execution at %px\n", func);
|
|
func();
|
|
}
|
|
|
|
@@ -82,7 +82,7 @@ void lkdtm_WRITE_RO(void)
|
|
/* Explicitly cast away "const" for the test. */
|
|
unsigned long *ptr = (unsigned long *)&rodata;
|
|
|
|
- pr_info("attempting bad rodata write at %p\n", ptr);
|
|
+ pr_info("attempting bad rodata write at %px\n", ptr);
|
|
*ptr ^= 0xabcd1234;
|
|
}
|
|
|
|
@@ -100,7 +100,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
|
|
return;
|
|
}
|
|
|
|
- pr_info("attempting bad ro_after_init write at %p\n", ptr);
|
|
+ pr_info("attempting bad ro_after_init write at %px\n", ptr);
|
|
*ptr ^= 0xabcd1234;
|
|
}
|
|
|
|
@@ -112,7 +112,7 @@ void lkdtm_WRITE_KERN(void)
|
|
size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
|
|
ptr = (unsigned char *)do_overwritten;
|
|
|
|
- pr_info("attempting bad %zu byte write at %p\n", size, ptr);
|
|
+ pr_info("attempting bad %zu byte write at %px\n", size, ptr);
|
|
memcpy(ptr, (unsigned char *)do_nothing, size);
|
|
flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
|
|
|
|
@@ -164,6 +164,11 @@ void lkdtm_EXEC_USERSPACE(void)
|
|
vm_munmap(user_addr, PAGE_SIZE);
|
|
}
|
|
|
|
+void lkdtm_EXEC_NULL(void)
|
|
+{
|
|
+ execute_location(NULL, CODE_AS_IS);
|
|
+}
|
|
+
|
|
void lkdtm_ACCESS_USERSPACE(void)
|
|
{
|
|
unsigned long user_addr, tmp = 0;
|
|
@@ -185,16 +190,29 @@ void lkdtm_ACCESS_USERSPACE(void)
|
|
|
|
ptr = (unsigned long *)user_addr;
|
|
|
|
- pr_info("attempting bad read at %p\n", ptr);
|
|
+ pr_info("attempting bad read at %px\n", ptr);
|
|
tmp = *ptr;
|
|
tmp += 0xc0dec0de;
|
|
|
|
- pr_info("attempting bad write at %p\n", ptr);
|
|
+ pr_info("attempting bad write at %px\n", ptr);
|
|
*ptr = tmp;
|
|
|
|
vm_munmap(user_addr, PAGE_SIZE);
|
|
}
|
|
|
|
+void lkdtm_ACCESS_NULL(void)
|
|
+{
|
|
+ unsigned long tmp;
|
|
+ unsigned long *ptr = (unsigned long *)NULL;
|
|
+
|
|
+ pr_info("attempting bad read at %px\n", ptr);
|
|
+ tmp = *ptr;
|
|
+ tmp += 0xc0dec0de;
|
|
+
|
|
+ pr_info("attempting bad write at %px\n", ptr);
|
|
+ *ptr = tmp;
|
|
+}
|
|
+
|
|
void __init lkdtm_perms_init(void)
|
|
{
|
|
/* Make sure we can write to __ro_after_init values during __init */
|
|
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
|
|
index 351330dfb954..1bd1819cca7d 100644
|
|
--- a/drivers/mmc/host/davinci_mmc.c
|
|
+++ b/drivers/mmc/host/davinci_mmc.c
|
|
@@ -1118,7 +1118,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
|
|
{
|
|
}
|
|
#endif
|
|
-static void __init init_mmcsd_host(struct mmc_davinci_host *host)
|
|
+static void init_mmcsd_host(struct mmc_davinci_host *host)
|
|
{
|
|
|
|
mmc_davinci_reset_ctrl(host, 1);
|
|
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
|
|
index 5b783a91b115..8793fa57f844 100644
|
|
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
|
|
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
|
|
@@ -76,9 +76,9 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
|
|
|
|
static int
|
|
nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
|
|
- const struct bpf_verifier_env *env)
|
|
+ struct bpf_verifier_env *env)
|
|
{
|
|
- const struct bpf_reg_state *reg0 = &env->cur_state.regs[0];
|
|
+ const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
|
|
u64 imm;
|
|
|
|
if (nfp_prog->act == NN_ACT_XDP)
|
|
@@ -113,9 +113,10 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
|
|
|
|
static int
|
|
nfp_bpf_check_ctx_ptr(struct nfp_prog *nfp_prog,
|
|
- const struct bpf_verifier_env *env, u8 reg)
|
|
+ struct bpf_verifier_env *env, u8 reg_no)
|
|
{
|
|
- if (env->cur_state.regs[reg].type != PTR_TO_CTX)
|
|
+ const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
|
|
+ if (reg->type != PTR_TO_CTX)
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
index 4a9dbee6f054..f2429ec07b57 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
@@ -2536,9 +2536,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|
netdev_warn(priv->dev, "%s: failed debugFS registration\n",
|
|
__func__);
|
|
#endif
|
|
- /* Start the ball rolling... */
|
|
- stmmac_start_all_dma(priv);
|
|
-
|
|
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
|
|
|
|
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
|
|
@@ -2558,6 +2555,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|
priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
|
|
}
|
|
|
|
+ /* Start the ball rolling... */
|
|
+ stmmac_start_all_dma(priv);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
|
|
index e579d694d13c..21986ba56a3c 100644
|
|
--- a/drivers/net/wireless/rsi/rsi_common.h
|
|
+++ b/drivers/net/wireless/rsi/rsi_common.h
|
|
@@ -74,7 +74,6 @@ static inline int rsi_kill_thread(struct rsi_thread *handle)
|
|
atomic_inc(&handle->thread_done);
|
|
rsi_set_event(&handle->event);
|
|
|
|
- wait_for_completion(&handle->completion);
|
|
return kthread_stop(handle->task);
|
|
}
|
|
|
|
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
|
|
index f6542c159ed6..b4d06bd9ed51 100644
|
|
--- a/drivers/scsi/scsi_transport_iscsi.c
|
|
+++ b/drivers/scsi/scsi_transport_iscsi.c
|
|
@@ -2185,6 +2185,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
|
|
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
|
|
/* flush running scans then delete devices */
|
|
flush_work(&session->scan_work);
|
|
+ /* flush running unbind operations */
|
|
+ flush_work(&session->unbind_work);
|
|
__iscsi_unbind_session(&session->unbind_work);
|
|
|
|
/* hw iscsi may not have removed all connections from session */
|
|
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
|
|
index 7e9ef3431bea..2422ed56895a 100644
|
|
--- a/drivers/soc/tegra/pmc.c
|
|
+++ b/drivers/soc/tegra/pmc.c
|
|
@@ -521,16 +521,10 @@ EXPORT_SYMBOL(tegra_powergate_power_off);
|
|
*/
|
|
int tegra_powergate_is_powered(unsigned int id)
|
|
{
|
|
- int status;
|
|
-
|
|
if (!tegra_powergate_is_valid(id))
|
|
return -EINVAL;
|
|
|
|
- mutex_lock(&pmc->powergates_lock);
|
|
- status = tegra_powergate_state(id);
|
|
- mutex_unlock(&pmc->powergates_lock);
|
|
-
|
|
- return status;
|
|
+ return tegra_powergate_state(id);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
|
|
index 24b006a95142..8646fb7425f2 100644
|
|
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
|
|
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
|
|
@@ -128,8 +128,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = {
|
|
|
|
static void bcm2835_thermal_debugfs(struct platform_device *pdev)
|
|
{
|
|
- struct thermal_zone_device *tz = platform_get_drvdata(pdev);
|
|
- struct bcm2835_thermal_data *data = tz->devdata;
|
|
+ struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
|
|
struct debugfs_regset32 *regset;
|
|
|
|
data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL);
|
|
@@ -275,7 +274,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
|
|
|
|
data->tz = tz;
|
|
|
|
- platform_set_drvdata(pdev, tz);
|
|
+ platform_set_drvdata(pdev, data);
|
|
|
|
/*
|
|
* Thermal_zone doesn't enable hwmon as default,
|
|
@@ -299,8 +298,8 @@ err_clk:
|
|
|
|
static int bcm2835_thermal_remove(struct platform_device *pdev)
|
|
{
|
|
- struct thermal_zone_device *tz = platform_get_drvdata(pdev);
|
|
- struct bcm2835_thermal_data *data = tz->devdata;
|
|
+ struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
|
|
+ struct thermal_zone_device *tz = data->tz;
|
|
|
|
debugfs_remove_recursive(data->debugfsdir);
|
|
thermal_zone_of_sensor_unregister(&pdev->dev, tz);
|
|
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
|
|
index 43b90fd577e4..4a20f4d47b1d 100644
|
|
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
|
|
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
|
|
@@ -22,6 +22,13 @@ enum int3400_thermal_uuid {
|
|
INT3400_THERMAL_PASSIVE_1,
|
|
INT3400_THERMAL_ACTIVE,
|
|
INT3400_THERMAL_CRITICAL,
|
|
+ INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
|
|
+ INT3400_THERMAL_EMERGENCY_CALL_MODE,
|
|
+ INT3400_THERMAL_PASSIVE_2,
|
|
+ INT3400_THERMAL_POWER_BOSS,
|
|
+ INT3400_THERMAL_VIRTUAL_SENSOR,
|
|
+ INT3400_THERMAL_COOLING_MODE,
|
|
+ INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
|
|
INT3400_THERMAL_MAXIMUM_UUID,
|
|
};
|
|
|
|
@@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
|
|
"42A441D6-AE6A-462b-A84B-4A8CE79027D3",
|
|
"3A95C389-E4B8-4629-A526-C52C88626BAE",
|
|
"97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
|
|
+ "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
|
|
+ "5349962F-71E6-431D-9AE8-0A635B710AEE",
|
|
+ "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
|
|
+ "F5A35014-C209-46A4-993A-EB56DE7530A1",
|
|
+ "6ED722A7-9240-48A5-B479-31EEF723D7CF",
|
|
+ "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
|
|
+ "BE84BABF-C4D4-403D-B495-3128FD44dAC1",
|
|
};
|
|
|
|
struct int3400_thermal_priv {
|
|
@@ -302,10 +316,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
|
|
|
|
platform_set_drvdata(pdev, priv);
|
|
|
|
- if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
|
|
- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
|
|
- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
|
|
- }
|
|
+ int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
|
|
+ int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
|
|
+
|
|
priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
|
|
priv, &int3400_thermal_ops,
|
|
&int3400_thermal_params, 0, 0);
|
|
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
|
|
index d718cd179ddb..c3293fa2bb1b 100644
|
|
--- a/drivers/thermal/intel_powerclamp.c
|
|
+++ b/drivers/thermal/intel_powerclamp.c
|
|
@@ -101,7 +101,7 @@ struct powerclamp_worker_data {
|
|
bool clamping;
|
|
};
|
|
|
|
-static struct powerclamp_worker_data * __percpu worker_data;
|
|
+static struct powerclamp_worker_data __percpu *worker_data;
|
|
static struct thermal_cooling_device *cooling_dev;
|
|
static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
|
|
* clamping kthread worker
|
|
@@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu)
|
|
struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
|
|
struct kthread_worker *worker;
|
|
|
|
- worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu);
|
|
+ worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
|
|
if (IS_ERR(worker))
|
|
return;
|
|
|
|
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
|
|
index f438a2158006..b0da63737aa1 100644
|
|
--- a/drivers/tty/serial/xilinx_uartps.c
|
|
+++ b/drivers/tty/serial/xilinx_uartps.c
|
|
@@ -1270,7 +1270,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
|
|
*
|
|
* Return: 0 on success, negative errno otherwise.
|
|
*/
|
|
-static int __init cdns_uart_console_setup(struct console *co, char *options)
|
|
+static int cdns_uart_console_setup(struct console *co, char *options)
|
|
{
|
|
struct uart_port *port = &cdns_uart_port[co->index];
|
|
int baud = 9600;
|
|
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
|
|
index 8fb89ddc6cc7..c52f10efdc9c 100644
|
|
--- a/fs/9p/v9fs.c
|
|
+++ b/fs/9p/v9fs.c
|
|
@@ -61,6 +61,8 @@ enum {
|
|
Opt_cache_loose, Opt_fscache, Opt_mmap,
|
|
/* Access options */
|
|
Opt_access, Opt_posixacl,
|
|
+ /* Lock timeout option */
|
|
+ Opt_locktimeout,
|
|
/* Error token */
|
|
Opt_err
|
|
};
|
|
@@ -80,6 +82,7 @@ static const match_table_t tokens = {
|
|
{Opt_cachetag, "cachetag=%s"},
|
|
{Opt_access, "access=%s"},
|
|
{Opt_posixacl, "posixacl"},
|
|
+ {Opt_locktimeout, "locktimeout=%u"},
|
|
{Opt_err, NULL}
|
|
};
|
|
|
|
@@ -187,6 +190,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
|
|
#ifdef CONFIG_9P_FSCACHE
|
|
v9ses->cachetag = NULL;
|
|
#endif
|
|
+ v9ses->session_lock_timeout = P9_LOCK_TIMEOUT;
|
|
|
|
if (!opts)
|
|
return 0;
|
|
@@ -360,6 +364,23 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
|
|
#endif
|
|
break;
|
|
|
|
+ case Opt_locktimeout:
|
|
+ r = match_int(&args[0], &option);
|
|
+ if (r < 0) {
|
|
+ p9_debug(P9_DEBUG_ERROR,
|
|
+ "integer field, but no integer?\n");
|
|
+ ret = r;
|
|
+ continue;
|
|
+ }
|
|
+ if (option < 1) {
|
|
+ p9_debug(P9_DEBUG_ERROR,
|
|
+ "locktimeout must be a greater than zero integer.\n");
|
|
+ ret = -EINVAL;
|
|
+ continue;
|
|
+ }
|
|
+ v9ses->session_lock_timeout = (long)option * HZ;
|
|
+ break;
|
|
+
|
|
default:
|
|
continue;
|
|
}
|
|
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
|
|
index 982e017acadb..129e5243a6bf 100644
|
|
--- a/fs/9p/v9fs.h
|
|
+++ b/fs/9p/v9fs.h
|
|
@@ -116,6 +116,7 @@ struct v9fs_session_info {
|
|
struct p9_client *clnt; /* 9p client */
|
|
struct list_head slist; /* list of sessions registered with v9fs */
|
|
struct rw_semaphore rename_sem;
|
|
+ long session_lock_timeout; /* retry interval for blocking locks */
|
|
};
|
|
|
|
/* cache_validity flags */
|
|
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
|
|
index 48db9a9f13f9..cb6c4031af55 100644
|
|
--- a/fs/9p/vfs_dir.c
|
|
+++ b/fs/9p/vfs_dir.c
|
|
@@ -105,7 +105,6 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
|
|
int err = 0;
|
|
struct p9_fid *fid;
|
|
int buflen;
|
|
- int reclen = 0;
|
|
struct p9_rdir *rdir;
|
|
struct kvec kvec;
|
|
|
|
@@ -138,11 +137,10 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
|
|
while (rdir->head < rdir->tail) {
|
|
err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
|
|
rdir->tail - rdir->head, &st);
|
|
- if (err) {
|
|
+ if (err <= 0) {
|
|
p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
|
|
return -EIO;
|
|
}
|
|
- reclen = st.size+2;
|
|
|
|
over = !dir_emit(ctx, st.name, strlen(st.name),
|
|
v9fs_qid2ino(&st.qid), dt_type(&st));
|
|
@@ -150,8 +148,8 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
|
|
if (over)
|
|
return 0;
|
|
|
|
- rdir->head += reclen;
|
|
- ctx->pos += reclen;
|
|
+ rdir->head += err;
|
|
+ ctx->pos += err;
|
|
}
|
|
}
|
|
}
|
|
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
|
|
index af8cac975a74..89e69904976a 100644
|
|
--- a/fs/9p/vfs_file.c
|
|
+++ b/fs/9p/vfs_file.c
|
|
@@ -154,6 +154,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
|
|
uint8_t status = P9_LOCK_ERROR;
|
|
int res = 0;
|
|
unsigned char fl_type;
|
|
+ struct v9fs_session_info *v9ses;
|
|
|
|
fid = filp->private_data;
|
|
BUG_ON(fid == NULL);
|
|
@@ -189,6 +190,8 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
|
|
if (IS_SETLKW(cmd))
|
|
flock.flags = P9_LOCK_FLAGS_BLOCK;
|
|
|
|
+ v9ses = v9fs_inode2v9ses(file_inode(filp));
|
|
+
|
|
/*
|
|
* if its a blocked request and we get P9_LOCK_BLOCKED as the status
|
|
* for lock request, keep on trying
|
|
@@ -202,7 +205,8 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
|
|
break;
|
|
if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
|
|
break;
|
|
- if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
|
|
+ if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
|
|
+ != 0)
|
|
break;
|
|
/*
|
|
* p9_client_lock_dotl overwrites flock.client_id with the
|
|
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
|
|
index a90a637ae79a..6fd4a6a75234 100644
|
|
--- a/fs/cifs/inode.c
|
|
+++ b/fs/cifs/inode.c
|
|
@@ -779,43 +779,50 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
|
|
} else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
|
|
(strcmp(server->vals->version_string, SMB1_VERSION_STRING)
|
|
== 0)) {
|
|
- /*
|
|
- * For SMB2 and later the backup intent flag is already
|
|
- * sent if needed on open and there is no path based
|
|
- * FindFirst operation to use to retry with
|
|
- */
|
|
+ /*
|
|
+ * For SMB2 and later the backup intent flag is already
|
|
+ * sent if needed on open and there is no path based
|
|
+ * FindFirst operation to use to retry with
|
|
+ */
|
|
|
|
- srchinf = kzalloc(sizeof(struct cifs_search_info),
|
|
- GFP_KERNEL);
|
|
- if (srchinf == NULL) {
|
|
- rc = -ENOMEM;
|
|
- goto cgii_exit;
|
|
- }
|
|
+ srchinf = kzalloc(sizeof(struct cifs_search_info),
|
|
+ GFP_KERNEL);
|
|
+ if (srchinf == NULL) {
|
|
+ rc = -ENOMEM;
|
|
+ goto cgii_exit;
|
|
+ }
|
|
|
|
- srchinf->endOfSearch = false;
|
|
+ srchinf->endOfSearch = false;
|
|
+ if (tcon->unix_ext)
|
|
+ srchinf->info_level = SMB_FIND_FILE_UNIX;
|
|
+ else if ((tcon->ses->capabilities &
|
|
+ tcon->ses->server->vals->cap_nt_find) == 0)
|
|
+ srchinf->info_level = SMB_FIND_FILE_INFO_STANDARD;
|
|
+ else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
|
|
srchinf->info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
|
|
+ else /* no srvino useful for fallback to some netapp */
|
|
+ srchinf->info_level = SMB_FIND_FILE_DIRECTORY_INFO;
|
|
|
|
- srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
|
|
- CIFS_SEARCH_CLOSE_AT_END |
|
|
- CIFS_SEARCH_BACKUP_SEARCH;
|
|
+ srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
|
|
+ CIFS_SEARCH_CLOSE_AT_END |
|
|
+ CIFS_SEARCH_BACKUP_SEARCH;
|
|
|
|
- rc = CIFSFindFirst(xid, tcon, full_path,
|
|
- cifs_sb, NULL, srchflgs, srchinf, false);
|
|
- if (!rc) {
|
|
- data =
|
|
- (FILE_ALL_INFO *)srchinf->srch_entries_start;
|
|
+ rc = CIFSFindFirst(xid, tcon, full_path,
|
|
+ cifs_sb, NULL, srchflgs, srchinf, false);
|
|
+ if (!rc) {
|
|
+ data = (FILE_ALL_INFO *)srchinf->srch_entries_start;
|
|
|
|
- cifs_dir_info_to_fattr(&fattr,
|
|
- (FILE_DIRECTORY_INFO *)data, cifs_sb);
|
|
- fattr.cf_uniqueid = le64_to_cpu(
|
|
- ((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
|
|
- validinum = true;
|
|
+ cifs_dir_info_to_fattr(&fattr,
|
|
+ (FILE_DIRECTORY_INFO *)data, cifs_sb);
|
|
+ fattr.cf_uniqueid = le64_to_cpu(
|
|
+ ((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
|
|
+ validinum = true;
|
|
|
|
- cifs_buf_release(srchinf->ntwrk_buf_start);
|
|
- }
|
|
- kfree(srchinf);
|
|
- if (rc)
|
|
- goto cgii_exit;
|
|
+ cifs_buf_release(srchinf->ntwrk_buf_start);
|
|
+ }
|
|
+ kfree(srchinf);
|
|
+ if (rc)
|
|
+ goto cgii_exit;
|
|
} else
|
|
goto cgii_exit;
|
|
|
|
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
|
|
index d7e839cb773f..92c9cdf4704d 100644
|
|
--- a/fs/cifs/smb2maperror.c
|
|
+++ b/fs/cifs/smb2maperror.c
|
|
@@ -1035,7 +1035,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
|
|
{STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
|
|
"STATUS_UNFINISHED_CONTEXT_DELETED"},
|
|
{STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
|
|
- {STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"},
|
|
+ /* Note that ENOATTTR and ENODATA are the same errno */
|
|
+ {STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
|
|
{STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
|
|
{STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
|
|
"STATUS_WRONG_CREDENTIAL_HANDLE"},
|
|
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
|
|
index 7917cc89ab21..3dbf4e414706 100644
|
|
--- a/fs/ext4/ioctl.c
|
|
+++ b/fs/ext4/ioctl.c
|
|
@@ -940,6 +940,13 @@ resizefs_out:
|
|
if (!blk_queue_discard(q))
|
|
return -EOPNOTSUPP;
|
|
|
|
+ /*
|
|
+ * We haven't replayed the journal, so we cannot use our
|
|
+ * block-bitmap-guided storage zapping commands.
|
|
+ */
|
|
+ if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
|
|
+ return -EROFS;
|
|
+
|
|
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
|
|
sizeof(range)))
|
|
return -EFAULT;
|
|
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
|
|
index 6f0acfe31418..333fba05e1a5 100644
|
|
--- a/fs/ext4/resize.c
|
|
+++ b/fs/ext4/resize.c
|
|
@@ -907,11 +907,18 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
|
|
memcpy(n_group_desc, o_group_desc,
|
|
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
|
|
n_group_desc[gdb_num] = gdb_bh;
|
|
+
|
|
+ BUFFER_TRACE(gdb_bh, "get_write_access");
|
|
+ err = ext4_journal_get_write_access(handle, gdb_bh);
|
|
+ if (err) {
|
|
+ kvfree(n_group_desc);
|
|
+ brelse(gdb_bh);
|
|
+ return err;
|
|
+ }
|
|
+
|
|
EXT4_SB(sb)->s_group_desc = n_group_desc;
|
|
EXT4_SB(sb)->s_gdb_count++;
|
|
kvfree(o_group_desc);
|
|
- BUFFER_TRACE(gdb_bh, "get_write_access");
|
|
- err = ext4_journal_get_write_access(handle, gdb_bh);
|
|
return err;
|
|
}
|
|
|
|
@@ -2042,6 +2049,10 @@ out:
|
|
free_flex_gd(flex_gd);
|
|
if (resize_inode != NULL)
|
|
iput(resize_inode);
|
|
- ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
|
|
+ if (err)
|
|
+ ext4_warning(sb, "error (%d) occurred during "
|
|
+ "file system resize", err);
|
|
+ ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
|
|
+ ext4_blocks_count(es));
|
|
return err;
|
|
}
|
|
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
|
|
index fc5c41257e68..4c169ba50c0f 100644
|
|
--- a/fs/f2fs/super.c
|
|
+++ b/fs/f2fs/super.c
|
|
@@ -1959,7 +1959,7 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
|
unsigned int segment_count_main;
|
|
unsigned int cp_pack_start_sum, cp_payload;
|
|
block_t user_block_count;
|
|
- int i;
|
|
+ int i, j;
|
|
|
|
total = le32_to_cpu(raw_super->segment_count);
|
|
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
|
|
@@ -2000,11 +2000,43 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
|
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
|
|
le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
|
|
return 1;
|
|
+ for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
|
|
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
|
|
+ le32_to_cpu(ckpt->cur_node_segno[j])) {
|
|
+ f2fs_msg(sbi->sb, KERN_ERR,
|
|
+ "Node segment (%u, %u) has the same "
|
|
+ "segno: %u", i, j,
|
|
+ le32_to_cpu(ckpt->cur_node_segno[i]));
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
}
|
|
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
|
|
if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
|
|
le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
|
|
return 1;
|
|
+ for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
|
|
+ if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
|
|
+ le32_to_cpu(ckpt->cur_data_segno[j])) {
|
|
+ f2fs_msg(sbi->sb, KERN_ERR,
|
|
+ "Data segment (%u, %u) has the same "
|
|
+ "segno: %u", i, j,
|
|
+ le32_to_cpu(ckpt->cur_data_segno[i]));
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
|
|
+ for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
|
|
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
|
|
+ le32_to_cpu(ckpt->cur_data_segno[j])) {
|
|
+ f2fs_msg(sbi->sb, KERN_ERR,
|
|
+ "Data segment (%u) and Data segment (%u)"
|
|
+ " has the same segno: %u", i, j,
|
|
+ le32_to_cpu(ckpt->cur_node_segno[i]));
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
|
|
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
|
|
index 4d356e168692..03885e63f92b 100644
|
|
--- a/include/linux/atalk.h
|
|
+++ b/include/linux/atalk.h
|
|
@@ -151,19 +151,29 @@ extern int sysctl_aarp_retransmit_limit;
|
|
extern int sysctl_aarp_resolve_time;
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
-extern void atalk_register_sysctl(void);
|
|
+extern int atalk_register_sysctl(void);
|
|
extern void atalk_unregister_sysctl(void);
|
|
#else
|
|
-#define atalk_register_sysctl() do { } while(0)
|
|
-#define atalk_unregister_sysctl() do { } while(0)
|
|
+static inline int atalk_register_sysctl(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+static inline void atalk_unregister_sysctl(void)
|
|
+{
|
|
+}
|
|
#endif
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
extern int atalk_proc_init(void);
|
|
extern void atalk_proc_exit(void);
|
|
#else
|
|
-#define atalk_proc_init() ({ 0; })
|
|
-#define atalk_proc_exit() do { } while(0)
|
|
+static inline int atalk_proc_init(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+static inline void atalk_proc_exit(void)
|
|
+{
|
|
+}
|
|
#endif /* CONFIG_PROC_FS */
|
|
|
|
#endif /* __LINUX_ATALK_H__ */
|
|
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
|
|
index 8458cc5fbce5..d8b3240cfe6e 100644
|
|
--- a/include/linux/bpf_verifier.h
|
|
+++ b/include/linux/bpf_verifier.h
|
|
@@ -91,14 +91,20 @@ enum bpf_stack_slot_type {
|
|
|
|
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
|
|
|
|
+struct bpf_stack_state {
|
|
+ struct bpf_reg_state spilled_ptr;
|
|
+ u8 slot_type[BPF_REG_SIZE];
|
|
+};
|
|
+
|
|
/* state of the program:
|
|
* type of all registers and stack info
|
|
*/
|
|
struct bpf_verifier_state {
|
|
struct bpf_reg_state regs[MAX_BPF_REG];
|
|
- u8 stack_slot_type[MAX_BPF_STACK];
|
|
- struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
|
|
struct bpf_verifier_state *parent;
|
|
+ int allocated_stack;
|
|
+ struct bpf_stack_state *stack;
|
|
+ bool speculative;
|
|
};
|
|
|
|
/* linked list of verifier states used to prune search */
|
|
@@ -107,14 +113,24 @@ struct bpf_verifier_state_list {
|
|
struct bpf_verifier_state_list *next;
|
|
};
|
|
|
|
+/* Possible states for alu_state member. */
|
|
+#define BPF_ALU_SANITIZE_SRC 1U
|
|
+#define BPF_ALU_SANITIZE_DST 2U
|
|
+#define BPF_ALU_NEG_VALUE (1U << 2)
|
|
+#define BPF_ALU_NON_POINTER (1U << 3)
|
|
+#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
|
|
+ BPF_ALU_SANITIZE_DST)
|
|
+
|
|
struct bpf_insn_aux_data {
|
|
union {
|
|
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
|
|
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
|
|
+ u32 alu_limit; /* limit for add/sub register with pointer */
|
|
};
|
|
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
|
|
int sanitize_stack_off; /* stack slot to be cleared */
|
|
bool seen; /* this insn was processed by the verifier */
|
|
+ u8 alu_state; /* used in combination with alu_limit */
|
|
};
|
|
|
|
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
|
|
@@ -129,11 +145,13 @@ struct bpf_ext_analyzer_ops {
|
|
* one verifier_env per bpf_check() call
|
|
*/
|
|
struct bpf_verifier_env {
|
|
+ u32 insn_idx;
|
|
+ u32 prev_insn_idx;
|
|
struct bpf_prog *prog; /* eBPF program being verified */
|
|
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
|
|
int stack_size; /* number of states to be processed */
|
|
bool strict_alignment; /* perform strict pointer alignment checks */
|
|
- struct bpf_verifier_state cur_state; /* current verifier state */
|
|
+ struct bpf_verifier_state *cur_state; /* current verifier state */
|
|
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
|
|
const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
|
|
void *analyzer_priv; /* pointer to external analyzer's private data */
|
|
@@ -145,6 +163,11 @@ struct bpf_verifier_env {
|
|
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
|
|
};
|
|
|
|
+static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
|
|
+{
|
|
+ return env->cur_state->regs;
|
|
+}
|
|
+
|
|
int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
|
|
void *priv);
|
|
|
|
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
|
|
index a704d032713b..67c3934fb9ed 100644
|
|
--- a/include/linux/compiler.h
|
|
+++ b/include/linux/compiler.h
|
|
@@ -119,7 +119,10 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|
# define ASM_UNREACHABLE
|
|
#endif
|
|
#ifndef unreachable
|
|
-# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
|
|
+# define unreachable() do { \
|
|
+ annotate_unreachable(); \
|
|
+ __builtin_unreachable(); \
|
|
+} while (0)
|
|
#endif
|
|
|
|
/*
|
|
diff --git a/include/linux/filter.h b/include/linux/filter.h
|
|
index 56d2cda9931b..ac2272778f2e 100644
|
|
--- a/include/linux/filter.h
|
|
+++ b/include/linux/filter.h
|
|
@@ -46,14 +46,10 @@ struct bpf_prog_aux;
|
|
#define BPF_REG_X BPF_REG_7
|
|
#define BPF_REG_TMP BPF_REG_8
|
|
|
|
-/* Kernel hidden auxiliary/helper register for hardening step.
|
|
- * Only used by eBPF JITs. It's nothing more than a temporary
|
|
- * register that JITs use internally, only that here it's part
|
|
- * of eBPF instructions that have been rewritten for blinding
|
|
- * constants. See JIT pre-step in bpf_jit_blind_constants().
|
|
- */
|
|
+/* Kernel hidden auxiliary/helper register. */
|
|
#define BPF_REG_AX MAX_BPF_REG
|
|
-#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
|
|
+#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
|
|
+#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
|
|
|
|
/* unused opcode to mark special call to bpf_tail_call() helper */
|
|
#define BPF_TAIL_CALL 0xf0
|
|
diff --git a/include/linux/swap.h b/include/linux/swap.h
|
|
index 4fd1ab9565ba..e643866912b7 100644
|
|
--- a/include/linux/swap.h
|
|
+++ b/include/linux/swap.h
|
|
@@ -155,9 +155,9 @@ struct swap_extent {
|
|
/*
|
|
* Max bad pages in the new format..
|
|
*/
|
|
-#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
|
|
#define MAX_SWAP_BADPAGES \
|
|
- ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
|
|
+ ((offsetof(union swap_header, magic.magic) - \
|
|
+ offsetof(union swap_header, info.badpages)) / sizeof(int))
|
|
|
|
enum {
|
|
SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
|
|
diff --git a/include/uapi/linux/netfilter/xt_cgroup.h b/include/uapi/linux/netfilter/xt_cgroup.h
|
|
index e96dfa1b34f7..b74e370d6133 100644
|
|
--- a/include/uapi/linux/netfilter/xt_cgroup.h
|
|
+++ b/include/uapi/linux/netfilter/xt_cgroup.h
|
|
@@ -22,4 +22,20 @@ struct xt_cgroup_info_v1 {
|
|
void *priv __attribute__((aligned(8)));
|
|
};
|
|
|
|
+#define XT_CGROUP_PATH_MAX 512
|
|
+
|
|
+struct xt_cgroup_info_v2 {
|
|
+ __u8 has_path;
|
|
+ __u8 has_classid;
|
|
+ __u8 invert_path;
|
|
+ __u8 invert_classid;
|
|
+ union {
|
|
+ char path[XT_CGROUP_PATH_MAX];
|
|
+ __u32 classid;
|
|
+ };
|
|
+
|
|
+ /* kernel internal data */
|
|
+ void *priv __attribute__((aligned(8)));
|
|
+};
|
|
+
|
|
#endif /* _UAPI_XT_CGROUP_H */
|
|
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
|
|
index d203a5d6b726..e46106c6ac39 100644
|
|
--- a/kernel/bpf/core.c
|
|
+++ b/kernel/bpf/core.c
|
|
@@ -51,6 +51,7 @@
|
|
#define DST regs[insn->dst_reg]
|
|
#define SRC regs[insn->src_reg]
|
|
#define FP regs[BPF_REG_FP]
|
|
+#define AX regs[BPF_REG_AX]
|
|
#define ARG1 regs[BPF_REG_ARG1]
|
|
#define CTX regs[BPF_REG_CTX]
|
|
#define IMM insn->imm
|
|
@@ -552,6 +553,26 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
|
|
BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
|
|
BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
|
|
|
|
+ /* Constraints on AX register:
|
|
+ *
|
|
+ * AX register is inaccessible from user space. It is mapped in
|
|
+ * all JITs, and used here for constant blinding rewrites. It is
|
|
+ * typically "stateless" meaning its contents are only valid within
|
|
+ * the executed instruction, but not across several instructions.
|
|
+ * There are a few exceptions however which are further detailed
|
|
+ * below.
|
|
+ *
|
|
+ * Constant blinding is only used by JITs, not in the interpreter.
|
|
+ * The interpreter uses AX in some occasions as a local temporary
|
|
+ * register e.g. in DIV or MOD instructions.
|
|
+ *
|
|
+ * In restricted circumstances, the verifier can also use the AX
|
|
+ * register for rewrites as long as they do not interfere with
|
|
+ * the above cases!
|
|
+ */
|
|
+ if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
|
|
+ goto out;
|
|
+
|
|
if (from->imm == 0 &&
|
|
(from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
|
|
from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
|
|
@@ -939,22 +960,22 @@ select_insn:
|
|
ALU64_MOD_X:
|
|
if (unlikely(SRC == 0))
|
|
return 0;
|
|
- div64_u64_rem(DST, SRC, &tmp);
|
|
- DST = tmp;
|
|
+ div64_u64_rem(DST, SRC, &AX);
|
|
+ DST = AX;
|
|
CONT;
|
|
ALU_MOD_X:
|
|
if (unlikely((u32)SRC == 0))
|
|
return 0;
|
|
- tmp = (u32) DST;
|
|
- DST = do_div(tmp, (u32) SRC);
|
|
+ AX = (u32) DST;
|
|
+ DST = do_div(AX, (u32) SRC);
|
|
CONT;
|
|
ALU64_MOD_K:
|
|
- div64_u64_rem(DST, IMM, &tmp);
|
|
- DST = tmp;
|
|
+ div64_u64_rem(DST, IMM, &AX);
|
|
+ DST = AX;
|
|
CONT;
|
|
ALU_MOD_K:
|
|
- tmp = (u32) DST;
|
|
- DST = do_div(tmp, (u32) IMM);
|
|
+ AX = (u32) DST;
|
|
+ DST = do_div(AX, (u32) IMM);
|
|
CONT;
|
|
ALU64_DIV_X:
|
|
if (unlikely(SRC == 0))
|
|
@@ -964,17 +985,17 @@ select_insn:
|
|
ALU_DIV_X:
|
|
if (unlikely((u32)SRC == 0))
|
|
return 0;
|
|
- tmp = (u32) DST;
|
|
- do_div(tmp, (u32) SRC);
|
|
- DST = (u32) tmp;
|
|
+ AX = (u32) DST;
|
|
+ do_div(AX, (u32) SRC);
|
|
+ DST = (u32) AX;
|
|
CONT;
|
|
ALU64_DIV_K:
|
|
DST = div64_u64(DST, IMM);
|
|
CONT;
|
|
ALU_DIV_K:
|
|
- tmp = (u32) DST;
|
|
- do_div(tmp, (u32) IMM);
|
|
- DST = (u32) tmp;
|
|
+ AX = (u32) DST;
|
|
+ do_div(AX, (u32) IMM);
|
|
+ DST = (u32) AX;
|
|
CONT;
|
|
ALU_END_TO_BE:
|
|
switch (IMM) {
|
|
@@ -1278,7 +1299,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
|
|
static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
|
|
{ \
|
|
u64 stack[stack_size / sizeof(u64)]; \
|
|
- u64 regs[MAX_BPF_REG]; \
|
|
+ u64 regs[MAX_BPF_EXT_REG]; \
|
|
\
|
|
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
|
|
ARG1 = (u64) (unsigned long) ctx; \
|
|
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
|
|
index be1dde967208..ccf9ffd5da78 100644
|
|
--- a/kernel/bpf/inode.c
|
|
+++ b/kernel/bpf/inode.c
|
|
@@ -365,19 +365,6 @@ out:
|
|
}
|
|
EXPORT_SYMBOL_GPL(bpf_obj_get_user);
|
|
|
|
-static void bpf_evict_inode(struct inode *inode)
|
|
-{
|
|
- enum bpf_type type;
|
|
-
|
|
- truncate_inode_pages_final(&inode->i_data);
|
|
- clear_inode(inode);
|
|
-
|
|
- if (S_ISLNK(inode->i_mode))
|
|
- kfree(inode->i_link);
|
|
- if (!bpf_inode_type(inode, &type))
|
|
- bpf_any_put(inode->i_private, type);
|
|
-}
|
|
-
|
|
/*
|
|
* Display the mount options in /proc/mounts.
|
|
*/
|
|
@@ -390,11 +377,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
|
|
return 0;
|
|
}
|
|
|
|
+static void bpf_destroy_inode_deferred(struct rcu_head *head)
|
|
+{
|
|
+ struct inode *inode = container_of(head, struct inode, i_rcu);
|
|
+ enum bpf_type type;
|
|
+
|
|
+ if (S_ISLNK(inode->i_mode))
|
|
+ kfree(inode->i_link);
|
|
+ if (!bpf_inode_type(inode, &type))
|
|
+ bpf_any_put(inode->i_private, type);
|
|
+ free_inode_nonrcu(inode);
|
|
+}
|
|
+
|
|
+static void bpf_destroy_inode(struct inode *inode)
|
|
+{
|
|
+ call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
|
|
+}
|
|
+
|
|
static const struct super_operations bpf_super_ops = {
|
|
.statfs = simple_statfs,
|
|
.drop_inode = generic_delete_inode,
|
|
.show_options = bpf_show_options,
|
|
- .evict_inode = bpf_evict_inode,
|
|
+ .destroy_inode = bpf_destroy_inode,
|
|
};
|
|
|
|
enum {
|
|
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
|
|
index 1da574612bea..c0c494b7647b 100644
|
|
--- a/kernel/bpf/map_in_map.c
|
|
+++ b/kernel/bpf/map_in_map.c
|
|
@@ -12,6 +12,7 @@
|
|
struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
|
|
{
|
|
struct bpf_map *inner_map, *inner_map_meta;
|
|
+ u32 inner_map_meta_size;
|
|
struct fd f;
|
|
|
|
f = fdget(inner_map_ufd);
|
|
@@ -34,7 +35,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
- inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER);
|
|
+ inner_map_meta_size = sizeof(*inner_map_meta);
|
|
+ /* In some cases verifier needs to access beyond just base map. */
|
|
+ if (inner_map->ops == &array_map_ops)
|
|
+ inner_map_meta_size = sizeof(struct bpf_array);
|
|
+
|
|
+ inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
|
|
if (!inner_map_meta) {
|
|
fdput(f);
|
|
return ERR_PTR(-ENOMEM);
|
|
@@ -44,9 +50,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
|
|
inner_map_meta->key_size = inner_map->key_size;
|
|
inner_map_meta->value_size = inner_map->value_size;
|
|
inner_map_meta->map_flags = inner_map->map_flags;
|
|
- inner_map_meta->ops = inner_map->ops;
|
|
inner_map_meta->max_entries = inner_map->max_entries;
|
|
|
|
+ /* Misc members not needed in bpf_map_meta_equal() check. */
|
|
+ inner_map_meta->ops = inner_map->ops;
|
|
+ if (inner_map->ops == &array_map_ops) {
|
|
+ inner_map_meta->unpriv_array = inner_map->unpriv_array;
|
|
+ container_of(inner_map_meta, struct bpf_array, map)->index_mask =
|
|
+ container_of(inner_map, struct bpf_array, map)->index_mask;
|
|
+ }
|
|
+
|
|
fdput(f);
|
|
return inner_map_meta;
|
|
}
|
|
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
|
|
index f6755fd5bae2..a4875ff0bab1 100644
|
|
--- a/kernel/bpf/verifier.c
|
|
+++ b/kernel/bpf/verifier.c
|
|
@@ -265,10 +265,11 @@ static void print_verifier_state(struct bpf_verifier_state *state)
|
|
verbose(")");
|
|
}
|
|
}
|
|
- for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
|
|
- if (state->stack_slot_type[i] == STACK_SPILL)
|
|
- verbose(" fp%d=%s", -MAX_BPF_STACK + i,
|
|
- reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]);
|
|
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
|
|
+ if (state->stack[i].slot_type[0] == STACK_SPILL)
|
|
+ verbose(" fp%d=%s",
|
|
+ (-i - 1) * BPF_REG_SIZE,
|
|
+ reg_type_str[state->stack[i].spilled_ptr.type]);
|
|
}
|
|
verbose("\n");
|
|
}
|
|
@@ -434,40 +435,133 @@ static void print_bpf_insn(const struct bpf_verifier_env *env,
|
|
}
|
|
}
|
|
|
|
-static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx)
|
|
+static int copy_stack_state(struct bpf_verifier_state *dst,
|
|
+ const struct bpf_verifier_state *src)
|
|
{
|
|
- struct bpf_verifier_stack_elem *elem;
|
|
- int insn_idx;
|
|
+ if (!src->stack)
|
|
+ return 0;
|
|
+ if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) {
|
|
+ /* internal bug, make state invalid to reject the program */
|
|
+ memset(dst, 0, sizeof(*dst));
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ memcpy(dst->stack, src->stack,
|
|
+ sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
|
|
+ * make it consume minimal amount of memory. check_stack_write() access from
|
|
+ * the program calls into realloc_verifier_state() to grow the stack size.
|
|
+ * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
|
|
+ * which this function copies over. It points to previous bpf_verifier_state
|
|
+ * which is never reallocated
|
|
+ */
|
|
+static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
|
|
+ bool copy_old)
|
|
+{
|
|
+ u32 old_size = state->allocated_stack;
|
|
+ struct bpf_stack_state *new_stack;
|
|
+ int slot = size / BPF_REG_SIZE;
|
|
+
|
|
+ if (size <= old_size || !size) {
|
|
+ if (copy_old)
|
|
+ return 0;
|
|
+ state->allocated_stack = slot * BPF_REG_SIZE;
|
|
+ if (!size && old_size) {
|
|
+ kfree(state->stack);
|
|
+ state->stack = NULL;
|
|
+ }
|
|
+ return 0;
|
|
+ }
|
|
+ new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state),
|
|
+ GFP_KERNEL);
|
|
+ if (!new_stack)
|
|
+ return -ENOMEM;
|
|
+ if (copy_old) {
|
|
+ if (state->stack)
|
|
+ memcpy(new_stack, state->stack,
|
|
+ sizeof(*new_stack) * (old_size / BPF_REG_SIZE));
|
|
+ memset(new_stack + old_size / BPF_REG_SIZE, 0,
|
|
+ sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE);
|
|
+ }
|
|
+ state->allocated_stack = slot * BPF_REG_SIZE;
|
|
+ kfree(state->stack);
|
|
+ state->stack = new_stack;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void free_verifier_state(struct bpf_verifier_state *state,
|
|
+ bool free_self)
|
|
+{
|
|
+ kfree(state->stack);
|
|
+ if (free_self)
|
|
+ kfree(state);
|
|
+}
|
|
+
|
|
+/* copy verifier state from src to dst growing dst stack space
|
|
+ * when necessary to accommodate larger src stack
|
|
+ */
|
|
+static int copy_verifier_state(struct bpf_verifier_state *dst,
|
|
+ const struct bpf_verifier_state *src)
|
|
+{
|
|
+ int err;
|
|
+
|
|
+ err = realloc_verifier_state(dst, src->allocated_stack, false);
|
|
+ if (err)
|
|
+ return err;
|
|
+ memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack));
|
|
+ return copy_stack_state(dst, src);
|
|
+}
|
|
+
|
|
+static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
|
|
+ int *insn_idx)
|
|
+{
|
|
+ struct bpf_verifier_state *cur = env->cur_state;
|
|
+ struct bpf_verifier_stack_elem *elem, *head = env->head;
|
|
+ int err;
|
|
|
|
if (env->head == NULL)
|
|
- return -1;
|
|
+ return -ENOENT;
|
|
|
|
- memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state));
|
|
- insn_idx = env->head->insn_idx;
|
|
+ if (cur) {
|
|
+ err = copy_verifier_state(cur, &head->st);
|
|
+ if (err)
|
|
+ return err;
|
|
+ }
|
|
+ if (insn_idx)
|
|
+ *insn_idx = head->insn_idx;
|
|
if (prev_insn_idx)
|
|
- *prev_insn_idx = env->head->prev_insn_idx;
|
|
- elem = env->head->next;
|
|
- kfree(env->head);
|
|
+ *prev_insn_idx = head->prev_insn_idx;
|
|
+ elem = head->next;
|
|
+ free_verifier_state(&head->st, false);
|
|
+ kfree(head);
|
|
env->head = elem;
|
|
env->stack_size--;
|
|
- return insn_idx;
|
|
+ return 0;
|
|
}
|
|
|
|
static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
|
|
- int insn_idx, int prev_insn_idx)
|
|
+ int insn_idx, int prev_insn_idx,
|
|
+ bool speculative)
|
|
{
|
|
struct bpf_verifier_stack_elem *elem;
|
|
+ struct bpf_verifier_state *cur = env->cur_state;
|
|
+ int err;
|
|
|
|
- elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
|
|
+ elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
|
|
if (!elem)
|
|
goto err;
|
|
|
|
- memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state));
|
|
elem->insn_idx = insn_idx;
|
|
elem->prev_insn_idx = prev_insn_idx;
|
|
elem->next = env->head;
|
|
+ elem->st.speculative |= speculative;
|
|
env->head = elem;
|
|
env->stack_size++;
|
|
+ err = copy_verifier_state(&elem->st, cur);
|
|
+ if (err)
|
|
+ goto err;
|
|
if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
|
|
verbose("BPF program is too complex\n");
|
|
goto err;
|
|
@@ -475,7 +569,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
|
|
return &elem->st;
|
|
err:
|
|
/* pop all elements and return */
|
|
- while (pop_stack(env, NULL) >= 0);
|
|
+ while (!pop_stack(env, NULL, NULL));
|
|
return NULL;
|
|
}
|
|
|
|
@@ -671,7 +765,7 @@ static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
|
|
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
|
|
enum reg_arg_type t)
|
|
{
|
|
- struct bpf_reg_state *regs = env->cur_state.regs;
|
|
+ struct bpf_reg_state *regs = env->cur_state->regs;
|
|
|
|
if (regno >= MAX_BPF_REG) {
|
|
verbose("R%d is invalid\n", regno);
|
|
@@ -684,7 +778,7 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
|
|
verbose("R%d !read_ok\n", regno);
|
|
return -EACCES;
|
|
}
|
|
- mark_reg_read(&env->cur_state, regno);
|
|
+ mark_reg_read(env->cur_state, regno);
|
|
} else {
|
|
/* check whether register used as dest operand can be written to */
|
|
if (regno == BPF_REG_FP) {
|
|
@@ -721,10 +815,21 @@ static int check_stack_write(struct bpf_verifier_env *env,
|
|
struct bpf_verifier_state *state, int off,
|
|
int size, int value_regno, int insn_idx)
|
|
{
|
|
- int i, spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE;
|
|
+ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
|
|
+
|
|
+ err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE),
|
|
+ true);
|
|
+ if (err)
|
|
+ return err;
|
|
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
|
|
* so it's aligned access and [off, off + size) are within stack limits
|
|
*/
|
|
+ if (!env->allow_ptr_leaks &&
|
|
+ state->stack[spi].slot_type[0] == STACK_SPILL &&
|
|
+ size != BPF_REG_SIZE) {
|
|
+ verbose("attempt to corrupt spilled pointer on stack\n");
|
|
+ return -EACCES;
|
|
+ }
|
|
|
|
if (value_regno >= 0 &&
|
|
is_spillable_regtype(state->regs[value_regno].type)) {
|
|
@@ -736,11 +841,11 @@ static int check_stack_write(struct bpf_verifier_env *env,
|
|
}
|
|
|
|
/* save register state */
|
|
- state->spilled_regs[spi] = state->regs[value_regno];
|
|
- state->spilled_regs[spi].live |= REG_LIVE_WRITTEN;
|
|
+ state->stack[spi].spilled_ptr = state->regs[value_regno];
|
|
+ state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
|
|
|
|
for (i = 0; i < BPF_REG_SIZE; i++) {
|
|
- if (state->stack_slot_type[MAX_BPF_STACK + off + i] == STACK_MISC &&
|
|
+ if (state->stack[spi].slot_type[i] == STACK_MISC &&
|
|
!env->allow_ptr_leaks) {
|
|
int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
|
|
int soff = (-spi - 1) * BPF_REG_SIZE;
|
|
@@ -763,14 +868,15 @@ static int check_stack_write(struct bpf_verifier_env *env,
|
|
}
|
|
*poff = soff;
|
|
}
|
|
- state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
|
|
+ state->stack[spi].slot_type[i] = STACK_SPILL;
|
|
}
|
|
} else {
|
|
/* regular write of data into stack */
|
|
- state->spilled_regs[spi] = (struct bpf_reg_state) {};
|
|
+ state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
|
|
|
|
for (i = 0; i < size; i++)
|
|
- state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
|
|
+ state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
|
|
+ STACK_MISC;
|
|
}
|
|
return 0;
|
|
}
|
|
@@ -781,10 +887,10 @@ static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slo
|
|
|
|
while (parent) {
|
|
/* if read wasn't screened by an earlier write ... */
|
|
- if (state->spilled_regs[slot].live & REG_LIVE_WRITTEN)
|
|
+ if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
|
|
break;
|
|
/* ... then we depend on parent's value */
|
|
- parent->spilled_regs[slot].live |= REG_LIVE_READ;
|
|
+ parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
|
|
state = parent;
|
|
parent = state->parent;
|
|
}
|
|
@@ -793,34 +899,37 @@ static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slo
|
|
static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
|
|
int value_regno)
|
|
{
|
|
- u8 *slot_type;
|
|
- int i, spi;
|
|
+ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
|
|
+ u8 *stype;
|
|
|
|
- slot_type = &state->stack_slot_type[MAX_BPF_STACK + off];
|
|
+ if (state->allocated_stack <= slot) {
|
|
+ verbose("invalid read from stack off %d+0 size %d\n",
|
|
+ off, size);
|
|
+ return -EACCES;
|
|
+ }
|
|
+ stype = state->stack[spi].slot_type;
|
|
|
|
- if (slot_type[0] == STACK_SPILL) {
|
|
+ if (stype[0] == STACK_SPILL) {
|
|
if (size != BPF_REG_SIZE) {
|
|
verbose("invalid size of register spill\n");
|
|
return -EACCES;
|
|
}
|
|
for (i = 1; i < BPF_REG_SIZE; i++) {
|
|
- if (slot_type[i] != STACK_SPILL) {
|
|
+ if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
|
|
verbose("corrupted spill memory\n");
|
|
return -EACCES;
|
|
}
|
|
}
|
|
|
|
- spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE;
|
|
-
|
|
if (value_regno >= 0) {
|
|
/* restore register state from stack */
|
|
- state->regs[value_regno] = state->spilled_regs[spi];
|
|
+ state->regs[value_regno] = state->stack[spi].spilled_ptr;
|
|
mark_stack_slot_read(state, spi);
|
|
}
|
|
return 0;
|
|
} else {
|
|
for (i = 0; i < size; i++) {
|
|
- if (slot_type[i] != STACK_MISC) {
|
|
+ if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) {
|
|
verbose("invalid read from stack off %d+%d size %d\n",
|
|
off, i, size);
|
|
return -EACCES;
|
|
@@ -833,11 +942,37 @@ static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
|
|
}
|
|
}
|
|
|
|
+static int check_stack_access(struct bpf_verifier_env *env,
|
|
+ const struct bpf_reg_state *reg,
|
|
+ int off, int size)
|
|
+{
|
|
+ /* Stack accesses must be at a fixed offset, so that we
|
|
+ * can determine what type of data were returned. See
|
|
+ * check_stack_read().
|
|
+ */
|
|
+ if (!tnum_is_const(reg->var_off)) {
|
|
+ char tn_buf[48];
|
|
+
|
|
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
|
+ verbose("variable stack access var_off=%s off=%d size=%d",
|
|
+ tn_buf, off, size);
|
|
+ return -EACCES;
|
|
+ }
|
|
+
|
|
+ if (off >= 0 || off < -MAX_BPF_STACK) {
|
|
+ verbose("invalid stack off=%d size=%d\n", off, size);
|
|
+ return -EACCES;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/* check read/write into map element returned by bpf_map_lookup_elem() */
|
|
static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|
int size)
|
|
{
|
|
- struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
|
|
+ struct bpf_reg_state *regs = cur_regs(env);
|
|
+ struct bpf_map *map = regs[regno].map_ptr;
|
|
|
|
if (off < 0 || size <= 0 || off + size > map->value_size) {
|
|
verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
|
|
@@ -849,9 +984,9 @@ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|
|
|
/* check read/write into a map element with possible variable offset */
|
|
static int check_map_access(struct bpf_verifier_env *env, u32 regno,
|
|
- int off, int size)
|
|
+ int off, int size)
|
|
{
|
|
- struct bpf_verifier_state *state = &env->cur_state;
|
|
+ struct bpf_verifier_state *state = env->cur_state;
|
|
struct bpf_reg_state *reg = &state->regs[regno];
|
|
int err;
|
|
|
|
@@ -861,13 +996,17 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
|
|
*/
|
|
if (log_level)
|
|
print_verifier_state(state);
|
|
+
|
|
/* The minimum value is only important with signed
|
|
* comparisons where we can't assume the floor of a
|
|
* value is 0. If we are using signed variables for our
|
|
* index'es we need to make sure that whatever we use
|
|
* will have a set floor within our range.
|
|
*/
|
|
- if (reg->smin_value < 0) {
|
|
+ if (reg->smin_value < 0 &&
|
|
+ (reg->smin_value == S64_MIN ||
|
|
+ (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
|
|
+ reg->smin_value + off < 0)) {
|
|
verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
|
|
regno);
|
|
return -EACCES;
|
|
@@ -924,7 +1063,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
|
|
static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
|
|
int off, int size)
|
|
{
|
|
- struct bpf_reg_state *regs = env->cur_state.regs;
|
|
+ struct bpf_reg_state *regs = cur_regs(env);
|
|
struct bpf_reg_state *reg = ®s[regno];
|
|
|
|
if (off < 0 || size <= 0 || (u64)off + size > reg->range) {
|
|
@@ -938,7 +1077,7 @@ static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
|
|
static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|
int size)
|
|
{
|
|
- struct bpf_reg_state *regs = env->cur_state.regs;
|
|
+ struct bpf_reg_state *regs = cur_regs(env);
|
|
struct bpf_reg_state *reg = ®s[regno];
|
|
int err;
|
|
|
|
@@ -1008,19 +1147,19 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
|
|
|
|
static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
|
|
{
|
|
- return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
|
|
+ return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
|
|
}
|
|
|
|
static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
|
|
{
|
|
- const struct bpf_reg_state *reg = &env->cur_state.regs[regno];
|
|
+ const struct bpf_reg_state *reg = cur_regs(env) + regno;
|
|
|
|
return reg->type == PTR_TO_CTX;
|
|
}
|
|
|
|
static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
|
|
{
|
|
- const struct bpf_reg_state *reg = &env->cur_state.regs[regno];
|
|
+ const struct bpf_reg_state *reg = cur_regs(env) + regno;
|
|
|
|
return reg->type == PTR_TO_PACKET;
|
|
}
|
|
@@ -1145,8 +1284,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|
int off, int bpf_size, enum bpf_access_type t,
|
|
int value_regno, bool strict_alignment_once)
|
|
{
|
|
- struct bpf_verifier_state *state = &env->cur_state;
|
|
- struct bpf_reg_state *reg = &state->regs[regno];
|
|
+ struct bpf_verifier_state *state = env->cur_state;
|
|
+ struct bpf_reg_state *regs = cur_regs(env);
|
|
+ struct bpf_reg_state *reg = regs + regno;
|
|
int size, err = 0;
|
|
|
|
size = bpf_size_to_bytes(bpf_size);
|
|
@@ -1170,7 +1310,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|
|
|
err = check_map_access(env, regno, off, size);
|
|
if (!err && t == BPF_READ && value_regno >= 0)
|
|
- mark_reg_unknown(state->regs, value_regno);
|
|
+ mark_reg_unknown(regs, value_regno);
|
|
|
|
} else if (reg->type == PTR_TO_CTX) {
|
|
enum bpf_reg_type reg_type = SCALAR_VALUE;
|
|
@@ -1203,49 +1343,29 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|
* the offset is zero.
|
|
*/
|
|
if (reg_type == SCALAR_VALUE)
|
|
- mark_reg_unknown(state->regs, value_regno);
|
|
+ mark_reg_unknown(regs, value_regno);
|
|
else
|
|
- mark_reg_known_zero(state->regs, value_regno);
|
|
- state->regs[value_regno].id = 0;
|
|
- state->regs[value_regno].off = 0;
|
|
- state->regs[value_regno].range = 0;
|
|
- state->regs[value_regno].type = reg_type;
|
|
+ mark_reg_known_zero(regs, value_regno);
|
|
+ regs[value_regno].id = 0;
|
|
+ regs[value_regno].off = 0;
|
|
+ regs[value_regno].range = 0;
|
|
+ regs[value_regno].type = reg_type;
|
|
}
|
|
|
|
} else if (reg->type == PTR_TO_STACK) {
|
|
- /* stack accesses must be at a fixed offset, so that we can
|
|
- * determine what type of data were returned.
|
|
- * See check_stack_read().
|
|
- */
|
|
- if (!tnum_is_const(reg->var_off)) {
|
|
- char tn_buf[48];
|
|
-
|
|
- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
|
- verbose("variable stack access var_off=%s off=%d size=%d",
|
|
- tn_buf, off, size);
|
|
- return -EACCES;
|
|
- }
|
|
off += reg->var_off.value;
|
|
- if (off >= 0 || off < -MAX_BPF_STACK) {
|
|
- verbose("invalid stack off=%d size=%d\n", off, size);
|
|
- return -EACCES;
|
|
- }
|
|
+ err = check_stack_access(env, reg, off, size);
|
|
+ if (err)
|
|
+ return err;
|
|
|
|
if (env->prog->aux->stack_depth < -off)
|
|
env->prog->aux->stack_depth = -off;
|
|
|
|
- if (t == BPF_WRITE) {
|
|
- if (!env->allow_ptr_leaks &&
|
|
- state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
|
|
- size != BPF_REG_SIZE) {
|
|
- verbose("attempt to corrupt spilled pointer on stack\n");
|
|
- return -EACCES;
|
|
- }
|
|
+ if (t == BPF_WRITE)
|
|
err = check_stack_write(env, state, off, size,
|
|
value_regno, insn_idx);
|
|
- } else {
|
|
+ else
|
|
err = check_stack_read(state, off, size, value_regno);
|
|
- }
|
|
} else if (reg->type == PTR_TO_PACKET) {
|
|
if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
|
|
verbose("cannot write into packet\n");
|
|
@@ -1258,7 +1378,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|
}
|
|
err = check_packet_access(env, regno, off, size);
|
|
if (!err && t == BPF_READ && value_regno >= 0)
|
|
- mark_reg_unknown(state->regs, value_regno);
|
|
+ mark_reg_unknown(regs, value_regno);
|
|
} else {
|
|
verbose("R%d invalid mem access '%s'\n",
|
|
regno, reg_type_str[reg->type]);
|
|
@@ -1266,9 +1386,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|
}
|
|
|
|
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
|
|
- state->regs[value_regno].type == SCALAR_VALUE) {
|
|
+ regs[value_regno].type == SCALAR_VALUE) {
|
|
/* b/h/w load zero-extends, mark upper bits as known 0 */
|
|
- coerce_reg_to_size(&state->regs[value_regno], size);
|
|
+ coerce_reg_to_size(®s[value_regno], size);
|
|
}
|
|
return err;
|
|
}
|
|
@@ -1333,9 +1453,9 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
|
|
int access_size, bool zero_size_allowed,
|
|
struct bpf_call_arg_meta *meta)
|
|
{
|
|
- struct bpf_verifier_state *state = &env->cur_state;
|
|
+ struct bpf_verifier_state *state = env->cur_state;
|
|
struct bpf_reg_state *regs = state->regs;
|
|
- int off, i;
|
|
+ int off, i, slot, spi;
|
|
|
|
if (regs[regno].type != PTR_TO_STACK) {
|
|
/* Allow zero-byte read from NULL, regardless of pointer type */
|
|
@@ -1376,7 +1496,11 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
|
|
}
|
|
|
|
for (i = 0; i < access_size; i++) {
|
|
- if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
|
|
+ slot = -(off + i) - 1;
|
|
+ spi = slot / BPF_REG_SIZE;
|
|
+ if (state->allocated_stack <= slot ||
|
|
+ state->stack[spi].slot_type[slot % BPF_REG_SIZE] !=
|
|
+ STACK_MISC) {
|
|
verbose("invalid indirect read from stack off %d+%d size %d\n",
|
|
off, i, access_size);
|
|
return -EACCES;
|
|
@@ -1389,7 +1513,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
|
int access_size, bool zero_size_allowed,
|
|
struct bpf_call_arg_meta *meta)
|
|
{
|
|
- struct bpf_reg_state *regs = env->cur_state.regs, *reg = ®s[regno];
|
|
+ struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
|
|
|
|
switch (reg->type) {
|
|
case PTR_TO_PACKET:
|
|
@@ -1406,7 +1530,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
|
|
enum bpf_arg_type arg_type,
|
|
struct bpf_call_arg_meta *meta)
|
|
{
|
|
- struct bpf_reg_state *regs = env->cur_state.regs, *reg = ®s[regno];
|
|
+ struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
|
|
enum bpf_reg_type expected_type, type = reg->type;
|
|
int err = 0;
|
|
|
|
@@ -1678,7 +1802,7 @@ static int check_raw_mode(const struct bpf_func_proto *fn)
|
|
*/
|
|
static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
|
|
{
|
|
- struct bpf_verifier_state *state = &env->cur_state;
|
|
+ struct bpf_verifier_state *state = env->cur_state;
|
|
struct bpf_reg_state *regs = state->regs, *reg;
|
|
int i;
|
|
|
|
@@ -1687,10 +1811,10 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
|
|
regs[i].type == PTR_TO_PACKET_END)
|
|
mark_reg_unknown(regs, i);
|
|
|
|
- for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
|
|
- if (state->stack_slot_type[i] != STACK_SPILL)
|
|
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
|
|
+ if (state->stack[i].slot_type[0] != STACK_SPILL)
|
|
continue;
|
|
- reg = &state->spilled_regs[i / BPF_REG_SIZE];
|
|
+ reg = &state->stack[i].spilled_ptr;
|
|
if (reg->type != PTR_TO_PACKET &&
|
|
reg->type != PTR_TO_PACKET_END)
|
|
continue;
|
|
@@ -1700,9 +1824,8 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
|
|
|
|
static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
|
|
{
|
|
- struct bpf_verifier_state *state = &env->cur_state;
|
|
const struct bpf_func_proto *fn = NULL;
|
|
- struct bpf_reg_state *regs = state->regs;
|
|
+ struct bpf_reg_state *regs;
|
|
struct bpf_call_arg_meta meta;
|
|
bool changes_data;
|
|
int i, err;
|
|
@@ -1776,6 +1899,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
|
|
return err;
|
|
}
|
|
|
|
+ regs = cur_regs(env);
|
|
/* reset caller saved regs */
|
|
for (i = 0; i < CALLER_SAVED_REGS; i++) {
|
|
mark_reg_not_init(regs, caller_saved[i]);
|
|
@@ -1880,6 +2004,125 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
|
|
return true;
|
|
}
|
|
|
|
+static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
|
|
+{
|
|
+ return &env->insn_aux_data[env->insn_idx];
|
|
+}
|
|
+
|
|
+static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
|
|
+ u32 *ptr_limit, u8 opcode, bool off_is_neg)
|
|
+{
|
|
+ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
|
|
+ (opcode == BPF_SUB && !off_is_neg);
|
|
+ u32 off;
|
|
+
|
|
+ switch (ptr_reg->type) {
|
|
+ case PTR_TO_STACK:
|
|
+ off = ptr_reg->off + ptr_reg->var_off.value;
|
|
+ if (mask_to_left)
|
|
+ *ptr_limit = MAX_BPF_STACK + off;
|
|
+ else
|
|
+ *ptr_limit = -off;
|
|
+ return 0;
|
|
+ case PTR_TO_MAP_VALUE:
|
|
+ if (mask_to_left) {
|
|
+ *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
|
|
+ } else {
|
|
+ off = ptr_reg->smin_value + ptr_reg->off;
|
|
+ *ptr_limit = ptr_reg->map_ptr->value_size - off;
|
|
+ }
|
|
+ return 0;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+}
|
|
+
|
|
+static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
|
|
+ const struct bpf_insn *insn)
|
|
+{
|
|
+ return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
|
|
+}
|
|
+
|
|
+static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
|
|
+ u32 alu_state, u32 alu_limit)
|
|
+{
|
|
+ /* If we arrived here from different branches with different
|
|
+ * state or limits to sanitize, then this won't work.
|
|
+ */
|
|
+ if (aux->alu_state &&
|
|
+ (aux->alu_state != alu_state ||
|
|
+ aux->alu_limit != alu_limit))
|
|
+ return -EACCES;
|
|
+
|
|
+ /* Corresponding fixup done in fixup_bpf_calls(). */
|
|
+ aux->alu_state = alu_state;
|
|
+ aux->alu_limit = alu_limit;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int sanitize_val_alu(struct bpf_verifier_env *env,
|
|
+ struct bpf_insn *insn)
|
|
+{
|
|
+ struct bpf_insn_aux_data *aux = cur_aux(env);
|
|
+
|
|
+ if (can_skip_alu_sanitation(env, insn))
|
|
+ return 0;
|
|
+
|
|
+ return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
|
|
+}
|
|
+
|
|
+static int sanitize_ptr_alu(struct bpf_verifier_env *env,
|
|
+ struct bpf_insn *insn,
|
|
+ const struct bpf_reg_state *ptr_reg,
|
|
+ struct bpf_reg_state *dst_reg,
|
|
+ bool off_is_neg)
|
|
+{
|
|
+ struct bpf_verifier_state *vstate = env->cur_state;
|
|
+ struct bpf_insn_aux_data *aux = cur_aux(env);
|
|
+ bool ptr_is_dst_reg = ptr_reg == dst_reg;
|
|
+ u8 opcode = BPF_OP(insn->code);
|
|
+ u32 alu_state, alu_limit;
|
|
+ struct bpf_reg_state tmp;
|
|
+ bool ret;
|
|
+
|
|
+ if (can_skip_alu_sanitation(env, insn))
|
|
+ return 0;
|
|
+
|
|
+ /* We already marked aux for masking from non-speculative
|
|
+ * paths, thus we got here in the first place. We only care
|
|
+ * to explore bad access from here.
|
|
+ */
|
|
+ if (vstate->speculative)
|
|
+ goto do_sim;
|
|
+
|
|
+ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
|
|
+ alu_state |= ptr_is_dst_reg ?
|
|
+ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
|
|
+
|
|
+ if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
|
|
+ return 0;
|
|
+ if (update_alu_sanitation_state(aux, alu_state, alu_limit))
|
|
+ return -EACCES;
|
|
+do_sim:
|
|
+ /* Simulate and find potential out-of-bounds access under
|
|
+ * speculative execution from truncation as a result of
|
|
+ * masking when off was not within expected range. If off
|
|
+ * sits in dst, then we temporarily need to move ptr there
|
|
+ * to simulate dst (== 0) +/-= ptr. Needed, for example,
|
|
+ * for cases where we use K-based arithmetic in one direction
|
|
+ * and truncated reg-based in the other in order to explore
|
|
+ * bad access.
|
|
+ */
|
|
+ if (!ptr_is_dst_reg) {
|
|
+ tmp = *dst_reg;
|
|
+ *dst_reg = *ptr_reg;
|
|
+ }
|
|
+ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
|
|
+ if (!ptr_is_dst_reg && ret)
|
|
+ *dst_reg = tmp;
|
|
+ return !ret ? -EFAULT : 0;
|
|
+}
|
|
+
|
|
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
|
|
* Caller should also handle BPF_MOV case separately.
|
|
* If we return -EACCES, caller may want to try again treating pointer as a
|
|
@@ -1890,14 +2133,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
const struct bpf_reg_state *ptr_reg,
|
|
const struct bpf_reg_state *off_reg)
|
|
{
|
|
- struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
|
|
+ struct bpf_reg_state *regs = cur_regs(env), *dst_reg;
|
|
bool known = tnum_is_const(off_reg->var_off);
|
|
s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
|
|
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
|
|
u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
|
|
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
|
|
+ u32 dst = insn->dst_reg, src = insn->src_reg;
|
|
u8 opcode = BPF_OP(insn->code);
|
|
- u32 dst = insn->dst_reg;
|
|
+ int ret;
|
|
|
|
dst_reg = ®s[dst];
|
|
|
|
@@ -1949,6 +2193,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
|
|
switch (opcode) {
|
|
case BPF_ADD:
|
|
+ ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
|
|
+ if (ret < 0) {
|
|
+ verbose("R%d tried to add from different maps or paths\n", dst);
|
|
+ return ret;
|
|
+ }
|
|
/* We can take a fixed offset as long as it doesn't overflow
|
|
* the s32 'off' field
|
|
*/
|
|
@@ -1999,6 +2248,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
}
|
|
break;
|
|
case BPF_SUB:
|
|
+ ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
|
|
+ if (ret < 0) {
|
|
+ verbose("R%d tried to sub from different maps or paths\n", dst);
|
|
+ return ret;
|
|
+ }
|
|
if (dst_reg == off_reg) {
|
|
/* scalar -= pointer. Creates an unknown scalar */
|
|
if (!env->allow_ptr_leaks)
|
|
@@ -2071,6 +2325,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
verbose("R%d bitwise operator %s on pointer prohibited\n",
|
|
dst, bpf_alu_string[opcode >> 4]);
|
|
return -EACCES;
|
|
+ case PTR_TO_MAP_VALUE:
|
|
+ if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
|
|
+ verbose("R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
|
|
+ off_reg == dst_reg ? dst : src);
|
|
+ return -EACCES;
|
|
+ }
|
|
+ /* fall-through */
|
|
default:
|
|
/* other operators (e.g. MUL,LSH) produce non-pointer results */
|
|
if (!env->allow_ptr_leaks)
|
|
@@ -2085,6 +2346,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
__update_reg_bounds(dst_reg);
|
|
__reg_deduce_bounds(dst_reg);
|
|
__reg_bound_offset(dst_reg);
|
|
+
|
|
+ /* For unprivileged we require that resulting offset must be in bounds
|
|
+ * in order to be able to sanitize access later on.
|
|
+ */
|
|
+ if (!env->allow_ptr_leaks) {
|
|
+ if (dst_reg->type == PTR_TO_MAP_VALUE &&
|
|
+ check_map_access(env, dst, dst_reg->off, 1)) {
|
|
+ verbose("R%d pointer arithmetic of map value goes out of range, "
|
|
+ "prohibited for !root\n", dst);
|
|
+ return -EACCES;
|
|
+ } else if (dst_reg->type == PTR_TO_STACK &&
|
|
+ check_stack_access(env, dst_reg, dst_reg->off +
|
|
+ dst_reg->var_off.value, 1)) {
|
|
+ verbose("R%d stack pointer arithmetic goes out of range, "
|
|
+ "prohibited for !root\n", dst);
|
|
+ return -EACCES;
|
|
+ }
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -2097,12 +2377,14 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|
struct bpf_reg_state *dst_reg,
|
|
struct bpf_reg_state src_reg)
|
|
{
|
|
- struct bpf_reg_state *regs = env->cur_state.regs;
|
|
+ struct bpf_reg_state *regs = cur_regs(env);
|
|
u8 opcode = BPF_OP(insn->code);
|
|
bool src_known, dst_known;
|
|
s64 smin_val, smax_val;
|
|
u64 umin_val, umax_val;
|
|
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
|
|
+ u32 dst = insn->dst_reg;
|
|
+ int ret;
|
|
|
|
if (insn_bitness == 32) {
|
|
/* Relevant for 32-bit RSH: Information can propagate towards
|
|
@@ -2137,6 +2419,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|
|
|
switch (opcode) {
|
|
case BPF_ADD:
|
|
+ ret = sanitize_val_alu(env, insn);
|
|
+ if (ret < 0) {
|
|
+ verbose("R%d tried to add from different pointers or scalars\n", dst);
|
|
+ return ret;
|
|
+ }
|
|
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
|
|
signed_add_overflows(dst_reg->smax_value, smax_val)) {
|
|
dst_reg->smin_value = S64_MIN;
|
|
@@ -2156,6 +2443,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
|
|
break;
|
|
case BPF_SUB:
|
|
+ ret = sanitize_val_alu(env, insn);
|
|
+ if (ret < 0) {
|
|
+ verbose("R%d tried to sub from different pointers or scalars\n", dst);
|
|
+ return ret;
|
|
+ }
|
|
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
|
|
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
|
|
/* Overflow possible, we know nothing */
|
|
@@ -2345,7 +2637,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|
static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
|
struct bpf_insn *insn)
|
|
{
|
|
- struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg, *src_reg;
|
|
+ struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
|
|
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
|
|
u8 opcode = BPF_OP(insn->code);
|
|
int rc;
|
|
@@ -2419,12 +2711,12 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
|
|
|
/* Got here implies adding two SCALAR_VALUEs */
|
|
if (WARN_ON_ONCE(ptr_reg)) {
|
|
- print_verifier_state(&env->cur_state);
|
|
+ print_verifier_state(env->cur_state);
|
|
verbose("verifier internal error: unexpected ptr_reg\n");
|
|
return -EINVAL;
|
|
}
|
|
if (WARN_ON(!src_reg)) {
|
|
- print_verifier_state(&env->cur_state);
|
|
+ print_verifier_state(env->cur_state);
|
|
verbose("verifier internal error: no src_reg\n");
|
|
return -EINVAL;
|
|
}
|
|
@@ -2434,7 +2726,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
|
/* check validity of 32-bit and 64-bit arithmetic operations */
|
|
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|
{
|
|
- struct bpf_reg_state *regs = env->cur_state.regs;
|
|
+ struct bpf_reg_state *regs = cur_regs(env);
|
|
u8 opcode = BPF_OP(insn->code);
|
|
int err;
|
|
|
|
@@ -2661,10 +2953,10 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
|
|
/* keep the maximum range already checked */
|
|
regs[i].range = max(regs[i].range, new_range);
|
|
|
|
- for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
|
|
- if (state->stack_slot_type[i] != STACK_SPILL)
|
|
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
|
|
+ if (state->stack[i].slot_type[0] != STACK_SPILL)
|
|
continue;
|
|
- reg = &state->spilled_regs[i / BPF_REG_SIZE];
|
|
+ reg = &state->stack[i].spilled_ptr;
|
|
if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
|
|
reg->range = max(reg->range, new_range);
|
|
}
|
|
@@ -2914,17 +3206,17 @@ static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
|
|
for (i = 0; i < MAX_BPF_REG; i++)
|
|
mark_map_reg(regs, i, id, is_null);
|
|
|
|
- for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
|
|
- if (state->stack_slot_type[i] != STACK_SPILL)
|
|
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
|
|
+ if (state->stack[i].slot_type[0] != STACK_SPILL)
|
|
continue;
|
|
- mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, is_null);
|
|
+ mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
|
|
}
|
|
}
|
|
|
|
static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
|
struct bpf_insn *insn, int *insn_idx)
|
|
{
|
|
- struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state;
|
|
+ struct bpf_verifier_state *other_branch, *this_branch = env->cur_state;
|
|
struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
|
|
u8 opcode = BPF_OP(insn->code);
|
|
int err;
|
|
@@ -2984,7 +3276,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
|
}
|
|
}
|
|
|
|
- other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
|
|
+ other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
|
|
+ false);
|
|
if (!other_branch)
|
|
return -EFAULT;
|
|
|
|
@@ -3087,7 +3380,7 @@ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
|
|
/* verify BPF_LD_IMM64 instruction */
|
|
static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|
{
|
|
- struct bpf_reg_state *regs = env->cur_state.regs;
|
|
+ struct bpf_reg_state *regs = cur_regs(env);
|
|
int err;
|
|
|
|
if (BPF_SIZE(insn->code) != BPF_DW) {
|
|
@@ -3148,7 +3441,7 @@ static bool may_access_skb(enum bpf_prog_type type)
|
|
*/
|
|
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|
{
|
|
- struct bpf_reg_state *regs = env->cur_state.regs;
|
|
+ struct bpf_reg_state *regs = cur_regs(env);
|
|
u8 mode = BPF_MODE(insn->code);
|
|
int i, err;
|
|
|
|
@@ -3534,6 +3827,57 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
|
return false;
|
|
}
|
|
|
|
+static bool stacksafe(struct bpf_verifier_state *old,
|
|
+ struct bpf_verifier_state *cur,
|
|
+ struct idpair *idmap)
|
|
+{
|
|
+ int i, spi;
|
|
+
|
|
+ /* if explored stack has more populated slots than current stack
|
|
+ * such stacks are not equivalent
|
|
+ */
|
|
+ if (old->allocated_stack > cur->allocated_stack)
|
|
+ return false;
|
|
+
|
|
+ /* walk slots of the explored stack and ignore any additional
|
|
+ * slots in the current stack, since explored(safe) state
|
|
+ * didn't use them
|
|
+ */
|
|
+ for (i = 0; i < old->allocated_stack; i++) {
|
|
+ spi = i / BPF_REG_SIZE;
|
|
+
|
|
+ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
|
|
+ continue;
|
|
+ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
|
|
+ cur->stack[spi].slot_type[i % BPF_REG_SIZE])
|
|
+ /* Ex: old explored (safe) state has STACK_SPILL in
|
|
+ * this stack slot, but current has has STACK_MISC ->
|
|
+ * this verifier states are not equivalent,
|
|
+ * return false to continue verification of this path
|
|
+ */
|
|
+ return false;
|
|
+ if (i % BPF_REG_SIZE)
|
|
+ continue;
|
|
+ if (old->stack[spi].slot_type[0] != STACK_SPILL)
|
|
+ continue;
|
|
+ if (!regsafe(&old->stack[spi].spilled_ptr,
|
|
+ &cur->stack[spi].spilled_ptr,
|
|
+ idmap))
|
|
+ /* when explored and current stack slot are both storing
|
|
+ * spilled registers, check that stored pointers types
|
|
+ * are the same as well.
|
|
+ * Ex: explored safe path could have stored
|
|
+ * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
|
|
+ * but current path has stored:
|
|
+ * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
|
|
+ * such verifier states are not equivalent.
|
|
+ * return false to continue verification of this path
|
|
+ */
|
|
+ return false;
|
|
+ }
|
|
+ return true;
|
|
+}
|
|
+
|
|
/* compare two verifier states
|
|
*
|
|
* all states stored in state_list are known to be valid, since
|
|
@@ -3568,6 +3912,12 @@ static bool states_equal(struct bpf_verifier_env *env,
|
|
bool ret = false;
|
|
int i;
|
|
|
|
+ /* Verification state from speculative execution simulation
|
|
+ * must never prune a non-speculative execution one.
|
|
+ */
|
|
+ if (old->speculative && !cur->speculative)
|
|
+ return false;
|
|
+
|
|
idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
|
|
/* If we failed to allocate the idmap, just say it's not safe */
|
|
if (!idmap)
|
|
@@ -3578,37 +3928,8 @@ static bool states_equal(struct bpf_verifier_env *env,
|
|
goto out_free;
|
|
}
|
|
|
|
- for (i = 0; i < MAX_BPF_STACK; i++) {
|
|
- if (old->stack_slot_type[i] == STACK_INVALID)
|
|
- continue;
|
|
- if (old->stack_slot_type[i] != cur->stack_slot_type[i])
|
|
- /* Ex: old explored (safe) state has STACK_SPILL in
|
|
- * this stack slot, but current has has STACK_MISC ->
|
|
- * this verifier states are not equivalent,
|
|
- * return false to continue verification of this path
|
|
- */
|
|
- goto out_free;
|
|
- if (i % BPF_REG_SIZE)
|
|
- continue;
|
|
- if (old->stack_slot_type[i] != STACK_SPILL)
|
|
- continue;
|
|
- if (!regsafe(&old->spilled_regs[i / BPF_REG_SIZE],
|
|
- &cur->spilled_regs[i / BPF_REG_SIZE],
|
|
- idmap))
|
|
- /* when explored and current stack slot are both storing
|
|
- * spilled registers, check that stored pointers types
|
|
- * are the same as well.
|
|
- * Ex: explored safe path could have stored
|
|
- * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
|
|
- * but current path has stored:
|
|
- * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
|
|
- * such verifier states are not equivalent.
|
|
- * return false to continue verification of this path
|
|
- */
|
|
- goto out_free;
|
|
- else
|
|
- continue;
|
|
- }
|
|
+ if (!stacksafe(old, cur, idmap))
|
|
+ goto out_free;
|
|
ret = true;
|
|
out_free:
|
|
kfree(idmap);
|
|
@@ -3644,17 +3965,19 @@ static bool do_propagate_liveness(const struct bpf_verifier_state *state,
|
|
}
|
|
}
|
|
/* ... and stack slots */
|
|
- for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) {
|
|
- if (parent->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL)
|
|
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
|
|
+ i < parent->allocated_stack / BPF_REG_SIZE; i++) {
|
|
+ if (parent->stack[i].slot_type[0] != STACK_SPILL)
|
|
continue;
|
|
- if (state->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL)
|
|
+ if (state->stack[i].slot_type[0] != STACK_SPILL)
|
|
continue;
|
|
- if (parent->spilled_regs[i].live & REG_LIVE_READ)
|
|
+ if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
|
|
continue;
|
|
- if (writes && (state->spilled_regs[i].live & REG_LIVE_WRITTEN))
|
|
+ if (writes &&
|
|
+ (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN))
|
|
continue;
|
|
- if (state->spilled_regs[i].live & REG_LIVE_READ) {
|
|
- parent->spilled_regs[i].live |= REG_LIVE_READ;
|
|
+ if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) {
|
|
+ parent->stack[i].spilled_ptr.live |= REG_LIVE_READ;
|
|
touched = true;
|
|
}
|
|
}
|
|
@@ -3684,7 +4007,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
|
|
{
|
|
struct bpf_verifier_state_list *new_sl;
|
|
struct bpf_verifier_state_list *sl;
|
|
- int i;
|
|
+ struct bpf_verifier_state *cur = env->cur_state;
|
|
+ int i, err;
|
|
|
|
sl = env->explored_states[insn_idx];
|
|
if (!sl)
|
|
@@ -3694,7 +4018,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
|
|
return 0;
|
|
|
|
while (sl != STATE_LIST_MARK) {
|
|
- if (states_equal(env, &sl->state, &env->cur_state)) {
|
|
+ if (states_equal(env, &sl->state, cur)) {
|
|
/* reached equivalent register/stack state,
|
|
* prune the search.
|
|
* Registers read by the continuation are read by us.
|
|
@@ -3705,7 +4029,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
|
|
* they'll be immediately forgotten as we're pruning
|
|
* this state and will pop a new one.
|
|
*/
|
|
- propagate_liveness(&sl->state, &env->cur_state);
|
|
+ propagate_liveness(&sl->state, cur);
|
|
return 1;
|
|
}
|
|
sl = sl->next;
|
|
@@ -3717,16 +4041,21 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
|
|
* it will be rejected. Since there are no loops, we won't be
|
|
* seeing this 'insn_idx' instruction again on the way to bpf_exit
|
|
*/
|
|
- new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER);
|
|
+ new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
|
|
if (!new_sl)
|
|
return -ENOMEM;
|
|
|
|
/* add new state to the head of linked list */
|
|
- memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state));
|
|
+ err = copy_verifier_state(&new_sl->state, cur);
|
|
+ if (err) {
|
|
+ free_verifier_state(&new_sl->state, false);
|
|
+ kfree(new_sl);
|
|
+ return err;
|
|
+ }
|
|
new_sl->next = env->explored_states[insn_idx];
|
|
env->explored_states[insn_idx] = new_sl;
|
|
/* connect new state to parentage chain */
|
|
- env->cur_state.parent = &new_sl->state;
|
|
+ cur->parent = &new_sl->state;
|
|
/* clear write marks in current state: the writes we did are not writes
|
|
* our child did, so they don't screen off its reads from us.
|
|
* (There are no read marks in current state, because reads always mark
|
|
@@ -3734,10 +4063,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
|
|
* explored_states can get read marks.)
|
|
*/
|
|
for (i = 0; i < BPF_REG_FP; i++)
|
|
- env->cur_state.regs[i].live = REG_LIVE_NONE;
|
|
- for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++)
|
|
- if (env->cur_state.stack_slot_type[i * BPF_REG_SIZE] == STACK_SPILL)
|
|
- env->cur_state.spilled_regs[i].live = REG_LIVE_NONE;
|
|
+ cur->regs[i].live = REG_LIVE_NONE;
|
|
+ for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++)
|
|
+ if (cur->stack[i].slot_type[0] == STACK_SPILL)
|
|
+ cur->stack[i].spilled_ptr.live = REG_LIVE_NONE;
|
|
return 0;
|
|
}
|
|
|
|
@@ -3752,29 +4081,31 @@ static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
|
|
|
|
static int do_check(struct bpf_verifier_env *env)
|
|
{
|
|
- struct bpf_verifier_state *state = &env->cur_state;
|
|
+ struct bpf_verifier_state *state;
|
|
struct bpf_insn *insns = env->prog->insnsi;
|
|
- struct bpf_reg_state *regs = state->regs;
|
|
+ struct bpf_reg_state *regs;
|
|
int insn_cnt = env->prog->len;
|
|
- int insn_idx, prev_insn_idx = 0;
|
|
int insn_processed = 0;
|
|
bool do_print_state = false;
|
|
|
|
- init_reg_state(regs);
|
|
+ state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
|
|
+ if (!state)
|
|
+ return -ENOMEM;
|
|
+ env->cur_state = state;
|
|
+ init_reg_state(state->regs);
|
|
state->parent = NULL;
|
|
- insn_idx = 0;
|
|
for (;;) {
|
|
struct bpf_insn *insn;
|
|
u8 class;
|
|
int err;
|
|
|
|
- if (insn_idx >= insn_cnt) {
|
|
+ if (env->insn_idx >= insn_cnt) {
|
|
verbose("invalid insn idx %d insn_cnt %d\n",
|
|
- insn_idx, insn_cnt);
|
|
+ env->insn_idx, insn_cnt);
|
|
return -EFAULT;
|
|
}
|
|
|
|
- insn = &insns[insn_idx];
|
|
+ insn = &insns[env->insn_idx];
|
|
class = BPF_CLASS(insn->code);
|
|
|
|
if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
|
|
@@ -3783,17 +4114,19 @@ static int do_check(struct bpf_verifier_env *env)
|
|
return -E2BIG;
|
|
}
|
|
|
|
- err = is_state_visited(env, insn_idx);
|
|
+ err = is_state_visited(env, env->insn_idx);
|
|
if (err < 0)
|
|
return err;
|
|
if (err == 1) {
|
|
/* found equivalent state, can prune the search */
|
|
if (log_level) {
|
|
if (do_print_state)
|
|
- verbose("\nfrom %d to %d: safe\n",
|
|
- prev_insn_idx, insn_idx);
|
|
+ verbose("\nfrom %d to %d%s: safe\n",
|
|
+ env->prev_insn_idx, env->insn_idx,
|
|
+ env->cur_state->speculative ?
|
|
+ " (speculative execution)" : "");
|
|
else
|
|
- verbose("%d: safe\n", insn_idx);
|
|
+ verbose("%d: safe\n", env->insn_idx);
|
|
}
|
|
goto process_bpf_exit;
|
|
}
|
|
@@ -3803,24 +4136,27 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
if (log_level > 1 || (log_level && do_print_state)) {
|
|
if (log_level > 1)
|
|
- verbose("%d:", insn_idx);
|
|
+ verbose("%d:", env->insn_idx);
|
|
else
|
|
- verbose("\nfrom %d to %d:",
|
|
- prev_insn_idx, insn_idx);
|
|
- print_verifier_state(&env->cur_state);
|
|
+ verbose("\nfrom %d to %d%s:",
|
|
+ env->prev_insn_idx, env->insn_idx,
|
|
+ env->cur_state->speculative ?
|
|
+ " (speculative execution)" : "");
|
|
+ print_verifier_state(env->cur_state);
|
|
do_print_state = false;
|
|
}
|
|
|
|
if (log_level) {
|
|
- verbose("%d: ", insn_idx);
|
|
+ verbose("%d: ", env->insn_idx);
|
|
print_bpf_insn(env, insn);
|
|
}
|
|
|
|
- err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
|
|
+ err = ext_analyzer_insn_hook(env, env->insn_idx, env->prev_insn_idx);
|
|
if (err)
|
|
return err;
|
|
|
|
- env->insn_aux_data[insn_idx].seen = true;
|
|
+ regs = cur_regs(env);
|
|
+ env->insn_aux_data[env->insn_idx].seen = true;
|
|
if (class == BPF_ALU || class == BPF_ALU64) {
|
|
err = check_alu_op(env, insn);
|
|
if (err)
|
|
@@ -3845,13 +4181,13 @@ static int do_check(struct bpf_verifier_env *env)
|
|
/* check that memory (src_reg + off) is readable,
|
|
* the state of dst_reg will be updated by this func
|
|
*/
|
|
- err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
|
|
- BPF_SIZE(insn->code), BPF_READ,
|
|
- insn->dst_reg, false);
|
|
+ err = check_mem_access(env, env->insn_idx, insn->src_reg,
|
|
+ insn->off, BPF_SIZE(insn->code),
|
|
+ BPF_READ, insn->dst_reg, false);
|
|
if (err)
|
|
return err;
|
|
|
|
- prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
|
|
+ prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
|
|
|
|
if (*prev_src_type == NOT_INIT) {
|
|
/* saw a valid insn
|
|
@@ -3878,10 +4214,10 @@ static int do_check(struct bpf_verifier_env *env)
|
|
enum bpf_reg_type *prev_dst_type, dst_reg_type;
|
|
|
|
if (BPF_MODE(insn->code) == BPF_XADD) {
|
|
- err = check_xadd(env, insn_idx, insn);
|
|
+ err = check_xadd(env, env->insn_idx, insn);
|
|
if (err)
|
|
return err;
|
|
- insn_idx++;
|
|
+ env->insn_idx++;
|
|
continue;
|
|
}
|
|
|
|
@@ -3897,13 +4233,13 @@ static int do_check(struct bpf_verifier_env *env)
|
|
dst_reg_type = regs[insn->dst_reg].type;
|
|
|
|
/* check that memory (dst_reg + off) is writeable */
|
|
- err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
|
- BPF_SIZE(insn->code), BPF_WRITE,
|
|
- insn->src_reg, false);
|
|
+ err = check_mem_access(env, env->insn_idx, insn->dst_reg,
|
|
+ insn->off, BPF_SIZE(insn->code),
|
|
+ BPF_WRITE, insn->src_reg, false);
|
|
if (err)
|
|
return err;
|
|
|
|
- prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
|
|
+ prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
|
|
|
|
if (*prev_dst_type == NOT_INIT) {
|
|
*prev_dst_type = dst_reg_type;
|
|
@@ -3932,9 +4268,9 @@ static int do_check(struct bpf_verifier_env *env)
|
|
}
|
|
|
|
/* check that memory (dst_reg + off) is writeable */
|
|
- err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
|
- BPF_SIZE(insn->code), BPF_WRITE,
|
|
- -1, false);
|
|
+ err = check_mem_access(env, env->insn_idx, insn->dst_reg,
|
|
+ insn->off, BPF_SIZE(insn->code),
|
|
+ BPF_WRITE, -1, false);
|
|
if (err)
|
|
return err;
|
|
|
|
@@ -3950,7 +4286,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- err = check_call(env, insn->imm, insn_idx);
|
|
+ err = check_call(env, insn->imm, env->insn_idx);
|
|
if (err)
|
|
return err;
|
|
|
|
@@ -3963,7 +4299,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- insn_idx += insn->off + 1;
|
|
+ env->insn_idx += insn->off + 1;
|
|
continue;
|
|
|
|
} else if (opcode == BPF_EXIT) {
|
|
@@ -3991,15 +4327,17 @@ static int do_check(struct bpf_verifier_env *env)
|
|
}
|
|
|
|
process_bpf_exit:
|
|
- insn_idx = pop_stack(env, &prev_insn_idx);
|
|
- if (insn_idx < 0) {
|
|
+ err = pop_stack(env, &env->prev_insn_idx, &env->insn_idx);
|
|
+ if (err < 0) {
|
|
+ if (err != -ENOENT)
|
|
+ return err;
|
|
break;
|
|
} else {
|
|
do_print_state = true;
|
|
continue;
|
|
}
|
|
} else {
|
|
- err = check_cond_jmp_op(env, insn, &insn_idx);
|
|
+ err = check_cond_jmp_op(env, insn, &env->insn_idx);
|
|
if (err)
|
|
return err;
|
|
}
|
|
@@ -4016,8 +4354,8 @@ process_bpf_exit:
|
|
if (err)
|
|
return err;
|
|
|
|
- insn_idx++;
|
|
- env->insn_aux_data[insn_idx].seen = true;
|
|
+ env->insn_idx++;
|
|
+ env->insn_aux_data[env->insn_idx].seen = true;
|
|
} else {
|
|
verbose("invalid BPF_LD mode\n");
|
|
return -EINVAL;
|
|
@@ -4027,7 +4365,7 @@ process_bpf_exit:
|
|
return -EINVAL;
|
|
}
|
|
|
|
- insn_idx++;
|
|
+ env->insn_idx++;
|
|
}
|
|
|
|
verbose("processed %d insns, stack depth %d\n",
|
|
@@ -4402,6 +4740,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|
struct bpf_prog *new_prog;
|
|
struct bpf_map *map_ptr;
|
|
int i, cnt, delta = 0;
|
|
+ struct bpf_insn_aux_data *aux;
|
|
|
|
for (i = 0; i < insn_cnt; i++, insn++) {
|
|
if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
|
|
@@ -4422,6 +4761,58 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|
continue;
|
|
}
|
|
|
|
+ if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
|
|
+ insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
|
|
+ const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
|
|
+ const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
|
|
+ struct bpf_insn insn_buf[16];
|
|
+ struct bpf_insn *patch = &insn_buf[0];
|
|
+ bool issrc, isneg;
|
|
+ u32 off_reg;
|
|
+
|
|
+ aux = &env->insn_aux_data[i + delta];
|
|
+ if (!aux->alu_state ||
|
|
+ aux->alu_state == BPF_ALU_NON_POINTER)
|
|
+ continue;
|
|
+
|
|
+ isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
|
|
+ issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
|
|
+ BPF_ALU_SANITIZE_SRC;
|
|
+
|
|
+ off_reg = issrc ? insn->src_reg : insn->dst_reg;
|
|
+ if (isneg)
|
|
+ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
|
|
+ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
|
|
+ *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
|
|
+ *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
|
|
+ *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
|
|
+ *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
|
|
+ if (issrc) {
|
|
+ *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
|
|
+ off_reg);
|
|
+ insn->src_reg = BPF_REG_AX;
|
|
+ } else {
|
|
+ *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
|
|
+ BPF_REG_AX);
|
|
+ }
|
|
+ if (isneg)
|
|
+ insn->code = insn->code == code_add ?
|
|
+ code_sub : code_add;
|
|
+ *patch++ = *insn;
|
|
+ if (issrc && isneg)
|
|
+ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
|
|
+ cnt = patch - insn_buf;
|
|
+
|
|
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
|
|
+ if (!new_prog)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ delta += cnt - 1;
|
|
+ env->prog = prog = new_prog;
|
|
+ insn = new_prog->insnsi + i + delta;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
if (insn->code != (BPF_JMP | BPF_CALL))
|
|
continue;
|
|
|
|
@@ -4557,6 +4948,7 @@ static void free_states(struct bpf_verifier_env *env)
|
|
if (sl)
|
|
while (sl != STATE_LIST_MARK) {
|
|
sln = sl->next;
|
|
+ free_verifier_state(&sl->state, false);
|
|
kfree(sl);
|
|
sl = sln;
|
|
}
|
|
@@ -4633,9 +5025,13 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
|
|
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
|
|
|
|
ret = do_check(env);
|
|
+ if (env->cur_state) {
|
|
+ free_verifier_state(env->cur_state, true);
|
|
+ env->cur_state = NULL;
|
|
+ }
|
|
|
|
skip_full_check:
|
|
- while (pop_stack(env, NULL) >= 0);
|
|
+ while (!pop_stack(env, NULL, NULL));
|
|
free_states(env);
|
|
|
|
if (ret == 0)
|
|
@@ -4741,9 +5137,13 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
|
|
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
|
|
|
|
ret = do_check(env);
|
|
+ if (env->cur_state) {
|
|
+ free_verifier_state(env->cur_state, true);
|
|
+ env->cur_state = NULL;
|
|
+ }
|
|
|
|
skip_full_check:
|
|
- while (pop_stack(env, NULL) >= 0);
|
|
+ while (!pop_stack(env, NULL, NULL));
|
|
free_states(env);
|
|
|
|
mutex_unlock(&bpf_verifier_lock);
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index 92939b5397df..580616e6fcee 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -6923,6 +6923,7 @@ static void perf_event_mmap_output(struct perf_event *event,
|
|
struct perf_output_handle handle;
|
|
struct perf_sample_data sample;
|
|
int size = mmap_event->event_id.header.size;
|
|
+ u32 type = mmap_event->event_id.header.type;
|
|
int ret;
|
|
|
|
if (!perf_event_mmap_match(event, data))
|
|
@@ -6966,6 +6967,7 @@ static void perf_event_mmap_output(struct perf_event *event,
|
|
perf_output_end(&handle);
|
|
out:
|
|
mmap_event->event_id.header.size = size;
|
|
+ mmap_event->event_id.header.type = type;
|
|
}
|
|
|
|
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
|
|
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
|
|
index f9aaf4994062..2e4869fa66c9 100644
|
|
--- a/kernel/hung_task.c
|
|
+++ b/kernel/hung_task.c
|
|
@@ -15,6 +15,7 @@
|
|
#include <linux/lockdep.h>
|
|
#include <linux/export.h>
|
|
#include <linux/sysctl.h>
|
|
+#include <linux/suspend.h>
|
|
#include <linux/utsname.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/sched/debug.h>
|
|
@@ -232,6 +233,28 @@ void reset_hung_task_detector(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(reset_hung_task_detector);
|
|
|
|
+static bool hung_detector_suspended;
|
|
+
|
|
+static int hungtask_pm_notify(struct notifier_block *self,
|
|
+ unsigned long action, void *hcpu)
|
|
+{
|
|
+ switch (action) {
|
|
+ case PM_SUSPEND_PREPARE:
|
|
+ case PM_HIBERNATION_PREPARE:
|
|
+ case PM_RESTORE_PREPARE:
|
|
+ hung_detector_suspended = true;
|
|
+ break;
|
|
+ case PM_POST_SUSPEND:
|
|
+ case PM_POST_HIBERNATION:
|
|
+ case PM_POST_RESTORE:
|
|
+ hung_detector_suspended = false;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ return NOTIFY_OK;
|
|
+}
|
|
+
|
|
/*
|
|
* kthread which checks for tasks stuck in D state
|
|
*/
|
|
@@ -246,7 +269,8 @@ static int watchdog(void *dummy)
|
|
long t = hung_timeout_jiffies(hung_last_checked, timeout);
|
|
|
|
if (t <= 0) {
|
|
- if (!atomic_xchg(&reset_hung_task, 0))
|
|
+ if (!atomic_xchg(&reset_hung_task, 0) &&
|
|
+ !hung_detector_suspended)
|
|
check_hung_uninterruptible_tasks(timeout);
|
|
hung_last_checked = jiffies;
|
|
continue;
|
|
@@ -260,6 +284,10 @@ static int watchdog(void *dummy)
|
|
static int __init hung_task_init(void)
|
|
{
|
|
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
|
|
+
|
|
+ /* Disable hung task detector on suspend */
|
|
+ pm_notifier(hungtask_pm_notify, 0);
|
|
+
|
|
watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
|
|
|
|
return 0;
|
|
diff --git a/lib/div64.c b/lib/div64.c
|
|
index 58e2a404097e..a2688b882461 100644
|
|
--- a/lib/div64.c
|
|
+++ b/lib/div64.c
|
|
@@ -103,7 +103,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
|
|
quot = div_u64_rem(dividend, divisor, &rem32);
|
|
*remainder = rem32;
|
|
} else {
|
|
- int n = 1 + fls(high);
|
|
+ int n = fls(high);
|
|
quot = div_u64(dividend >> n, divisor >> n);
|
|
|
|
if (quot != 0)
|
|
@@ -141,7 +141,7 @@ u64 div64_u64(u64 dividend, u64 divisor)
|
|
if (high == 0) {
|
|
quot = div_u64(dividend, divisor);
|
|
} else {
|
|
- int n = 1 + fls(high);
|
|
+ int n = fls(high);
|
|
quot = div_u64(dividend >> n, divisor >> n);
|
|
|
|
if (quot != 0)
|
|
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
|
|
index 9743837aebc6..766d1ef4640a 100644
|
|
--- a/net/9p/protocol.c
|
|
+++ b/net/9p/protocol.c
|
|
@@ -570,9 +570,10 @@ int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
|
|
if (ret) {
|
|
p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
|
|
trace_9p_protocol_dump(clnt, &fake_pdu);
|
|
+ return ret;
|
|
}
|
|
|
|
- return ret;
|
|
+ return fake_pdu.offset;
|
|
}
|
|
EXPORT_SYMBOL(p9stat_read);
|
|
|
|
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
|
|
index af46bc49e1e9..b5f84f428aa6 100644
|
|
--- a/net/appletalk/atalk_proc.c
|
|
+++ b/net/appletalk/atalk_proc.c
|
|
@@ -293,7 +293,7 @@ out_interface:
|
|
goto out;
|
|
}
|
|
|
|
-void __exit atalk_proc_exit(void)
|
|
+void atalk_proc_exit(void)
|
|
{
|
|
remove_proc_entry("interface", atalk_proc_dir);
|
|
remove_proc_entry("route", atalk_proc_dir);
|
|
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
|
|
index 5d035c1f1156..d1b68cc7da89 100644
|
|
--- a/net/appletalk/ddp.c
|
|
+++ b/net/appletalk/ddp.c
|
|
@@ -1912,12 +1912,16 @@ static const char atalk_err_snap[] __initconst =
|
|
/* Called by proto.c on kernel start up */
|
|
static int __init atalk_init(void)
|
|
{
|
|
- int rc = proto_register(&ddp_proto, 0);
|
|
+ int rc;
|
|
|
|
- if (rc != 0)
|
|
+ rc = proto_register(&ddp_proto, 0);
|
|
+ if (rc)
|
|
goto out;
|
|
|
|
- (void)sock_register(&atalk_family_ops);
|
|
+ rc = sock_register(&atalk_family_ops);
|
|
+ if (rc)
|
|
+ goto out_proto;
|
|
+
|
|
ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
|
|
if (!ddp_dl)
|
|
printk(atalk_err_snap);
|
|
@@ -1925,12 +1929,33 @@ static int __init atalk_init(void)
|
|
dev_add_pack(<alk_packet_type);
|
|
dev_add_pack(&ppptalk_packet_type);
|
|
|
|
- register_netdevice_notifier(&ddp_notifier);
|
|
+ rc = register_netdevice_notifier(&ddp_notifier);
|
|
+ if (rc)
|
|
+ goto out_sock;
|
|
+
|
|
aarp_proto_init();
|
|
- atalk_proc_init();
|
|
- atalk_register_sysctl();
|
|
+ rc = atalk_proc_init();
|
|
+ if (rc)
|
|
+ goto out_aarp;
|
|
+
|
|
+ rc = atalk_register_sysctl();
|
|
+ if (rc)
|
|
+ goto out_proc;
|
|
out:
|
|
return rc;
|
|
+out_proc:
|
|
+ atalk_proc_exit();
|
|
+out_aarp:
|
|
+ aarp_cleanup_module();
|
|
+ unregister_netdevice_notifier(&ddp_notifier);
|
|
+out_sock:
|
|
+ dev_remove_pack(&ppptalk_packet_type);
|
|
+ dev_remove_pack(<alk_packet_type);
|
|
+ unregister_snap_client(ddp_dl);
|
|
+ sock_unregister(PF_APPLETALK);
|
|
+out_proto:
|
|
+ proto_unregister(&ddp_proto);
|
|
+ goto out;
|
|
}
|
|
module_init(atalk_init);
|
|
|
|
diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
|
|
index c744a853fa5f..d945b7c0176d 100644
|
|
--- a/net/appletalk/sysctl_net_atalk.c
|
|
+++ b/net/appletalk/sysctl_net_atalk.c
|
|
@@ -45,9 +45,12 @@ static struct ctl_table atalk_table[] = {
|
|
|
|
static struct ctl_table_header *atalk_table_header;
|
|
|
|
-void atalk_register_sysctl(void)
|
|
+int __init atalk_register_sysctl(void)
|
|
{
|
|
atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
|
|
+ if (!atalk_table_header)
|
|
+ return -ENOMEM;
|
|
+ return 0;
|
|
}
|
|
|
|
void atalk_unregister_sysctl(void)
|
|
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
|
|
index 891f4e7e8ea7..db18c0177b0f 100644
|
|
--- a/net/netfilter/xt_cgroup.c
|
|
+++ b/net/netfilter/xt_cgroup.c
|
|
@@ -66,6 +66,38 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
|
|
return 0;
|
|
}
|
|
|
|
+static int cgroup_mt_check_v2(const struct xt_mtchk_param *par)
|
|
+{
|
|
+ struct xt_cgroup_info_v2 *info = par->matchinfo;
|
|
+ struct cgroup *cgrp;
|
|
+
|
|
+ if ((info->invert_path & ~1) || (info->invert_classid & ~1))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (!info->has_path && !info->has_classid) {
|
|
+ pr_info("xt_cgroup: no path or classid specified\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (info->has_path && info->has_classid) {
|
|
+ pr_info_ratelimited("path and classid specified\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ info->priv = NULL;
|
|
+ if (info->has_path) {
|
|
+ cgrp = cgroup_get_from_path(info->path);
|
|
+ if (IS_ERR(cgrp)) {
|
|
+ pr_info_ratelimited("invalid path, errno=%ld\n",
|
|
+ PTR_ERR(cgrp));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ info->priv = cgrp;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static bool
|
|
cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
|
|
{
|
|
@@ -95,6 +127,24 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
|
|
info->invert_classid;
|
|
}
|
|
|
|
+static bool cgroup_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
|
|
+{
|
|
+ const struct xt_cgroup_info_v2 *info = par->matchinfo;
|
|
+ struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
|
|
+ struct cgroup *ancestor = info->priv;
|
|
+ struct sock *sk = skb->sk;
|
|
+
|
|
+ if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
|
|
+ return false;
|
|
+
|
|
+ if (ancestor)
|
|
+ return cgroup_is_descendant(sock_cgroup_ptr(skcd), ancestor) ^
|
|
+ info->invert_path;
|
|
+ else
|
|
+ return (info->classid == sock_cgroup_classid(skcd)) ^
|
|
+ info->invert_classid;
|
|
+}
|
|
+
|
|
static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par)
|
|
{
|
|
struct xt_cgroup_info_v1 *info = par->matchinfo;
|
|
@@ -103,6 +153,14 @@ static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par)
|
|
cgroup_put(info->priv);
|
|
}
|
|
|
|
+static void cgroup_mt_destroy_v2(const struct xt_mtdtor_param *par)
|
|
+{
|
|
+ struct xt_cgroup_info_v2 *info = par->matchinfo;
|
|
+
|
|
+ if (info->priv)
|
|
+ cgroup_put(info->priv);
|
|
+}
|
|
+
|
|
static struct xt_match cgroup_mt_reg[] __read_mostly = {
|
|
{
|
|
.name = "cgroup",
|
|
@@ -130,6 +188,20 @@ static struct xt_match cgroup_mt_reg[] __read_mostly = {
|
|
(1 << NF_INET_POST_ROUTING) |
|
|
(1 << NF_INET_LOCAL_IN),
|
|
},
|
|
+ {
|
|
+ .name = "cgroup",
|
|
+ .revision = 2,
|
|
+ .family = NFPROTO_UNSPEC,
|
|
+ .checkentry = cgroup_mt_check_v2,
|
|
+ .match = cgroup_mt_v2,
|
|
+ .matchsize = sizeof(struct xt_cgroup_info_v2),
|
|
+ .usersize = offsetof(struct xt_cgroup_info_v2, priv),
|
|
+ .destroy = cgroup_mt_destroy_v2,
|
|
+ .me = THIS_MODULE,
|
|
+ .hooks = (1 << NF_INET_LOCAL_OUT) |
|
|
+ (1 << NF_INET_POST_ROUTING) |
|
|
+ (1 << NF_INET_LOCAL_IN),
|
|
+ },
|
|
};
|
|
|
|
static int __init cgroup_mt_init(void)
|
|
diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h
|
|
index eaef435e0528..abf6c23a721c 100644
|
|
--- a/sound/drivers/opl3/opl3_voice.h
|
|
+++ b/sound/drivers/opl3/opl3_voice.h
|
|
@@ -41,7 +41,7 @@ void snd_opl3_timer_func(unsigned long data);
|
|
|
|
/* Prototypes for opl3_drums.c */
|
|
void snd_opl3_load_drums(struct snd_opl3 *opl3);
|
|
-void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan);
|
|
+void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan);
|
|
|
|
/* Prototypes for opl3_oss.c */
|
|
#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
|
|
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
|
|
index d77dcba276b5..1eb8b61a185b 100644
|
|
--- a/sound/isa/sb/sb8.c
|
|
+++ b/sound/isa/sb/sb8.c
|
|
@@ -111,6 +111,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
|
|
|
|
/* block the 0x388 port to avoid PnP conflicts */
|
|
acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
|
|
+ if (!acard->fm_res) {
|
|
+ err = -EBUSY;
|
|
+ goto _err;
|
|
+ }
|
|
|
|
if (port[dev] != SNDRV_AUTO_PORT) {
|
|
if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
|
|
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
|
|
index d68f99e076a8..e1f0bcd45c37 100644
|
|
--- a/sound/pci/echoaudio/echoaudio.c
|
|
+++ b/sound/pci/echoaudio/echoaudio.c
|
|
@@ -1953,6 +1953,11 @@ static int snd_echo_create(struct snd_card *card,
|
|
}
|
|
chip->dsp_registers = (volatile u32 __iomem *)
|
|
ioremap_nocache(chip->dsp_registers_phys, sz);
|
|
+ if (!chip->dsp_registers) {
|
|
+ dev_err(chip->card->dev, "ioremap failed\n");
|
|
+ snd_echo_free(chip);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
|
|
if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
|
|
KBUILD_MODNAME, chip)) {
|
|
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
|
|
index 5b4fff3adc4b..782a8966b721 100644
|
|
--- a/tools/perf/Documentation/perf-config.txt
|
|
+++ b/tools/perf/Documentation/perf-config.txt
|
|
@@ -114,7 +114,7 @@ Given a $HOME/.perfconfig like this:
|
|
|
|
[report]
|
|
# Defaults
|
|
- sort-order = comm,dso,symbol
|
|
+ sort_order = comm,dso,symbol
|
|
percent-limit = 0
|
|
queue-size = 0
|
|
children = true
|
|
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
|
|
index 3103a33c13a8..133eb7949321 100644
|
|
--- a/tools/perf/builtin-top.c
|
|
+++ b/tools/perf/builtin-top.c
|
|
@@ -1345,8 +1345,9 @@ int cmd_top(int argc, const char **argv)
|
|
goto out_delete_evlist;
|
|
|
|
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
|
|
- if (symbol__init(NULL) < 0)
|
|
- return -1;
|
|
+ status = symbol__init(NULL);
|
|
+ if (status < 0)
|
|
+ goto out_delete_evlist;
|
|
|
|
sort__setup_elide(stdout);
|
|
|
|
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
|
|
index d0406116c905..926a8e1b5e94 100644
|
|
--- a/tools/perf/tests/evsel-tp-sched.c
|
|
+++ b/tools/perf/tests/evsel-tp-sched.c
|
|
@@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
|
|
if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
|
|
ret = -1;
|
|
|
|
+ perf_evsel__delete(evsel);
|
|
return ret;
|
|
}
|
|
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
|
|
index 01f0706995a9..9acc1e80b936 100644
|
|
--- a/tools/perf/tests/expr.c
|
|
+++ b/tools/perf/tests/expr.c
|
|
@@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
|
|
const char *p;
|
|
const char **other;
|
|
double val;
|
|
- int ret;
|
|
+ int i, ret;
|
|
struct parse_ctx ctx;
|
|
int num_other;
|
|
|
|
@@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
|
|
TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
|
|
TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
|
|
TEST_ASSERT_VAL("find other", other[3] == NULL);
|
|
+
|
|
+ for (i = 0; i < num_other; i++)
|
|
+ free((void *)other[i]);
|
|
free((void *)other);
|
|
|
|
return 0;
|
|
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
|
|
index c531e6deb104..493ecb611540 100644
|
|
--- a/tools/perf/tests/openat-syscall-all-cpus.c
|
|
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
|
|
@@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
|
|
if (IS_ERR(evsel)) {
|
|
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
|
|
pr_debug("%s\n", errbuf);
|
|
- goto out_thread_map_delete;
|
|
+ goto out_cpu_map_delete;
|
|
}
|
|
|
|
if (perf_evsel__open(evsel, cpus, threads) < 0) {
|
|
@@ -119,6 +119,8 @@ out_close_fd:
|
|
perf_evsel__close_fd(evsel);
|
|
out_evsel_delete:
|
|
perf_evsel__delete(evsel);
|
|
+out_cpu_map_delete:
|
|
+ cpu_map__put(cpus);
|
|
out_thread_map_delete:
|
|
thread_map__put(threads);
|
|
return err;
|
|
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
|
|
index 7f8553630c4d..69910deab6e0 100644
|
|
--- a/tools/perf/util/build-id.c
|
|
+++ b/tools/perf/util/build-id.c
|
|
@@ -185,6 +185,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
|
|
return bf;
|
|
}
|
|
|
|
+/* The caller is responsible to free the returned buffer. */
|
|
char *build_id_cache__origname(const char *sbuild_id)
|
|
{
|
|
char *linkname;
|
|
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
|
|
index 4b893c622236..a0c9ff27c7bf 100644
|
|
--- a/tools/perf/util/config.c
|
|
+++ b/tools/perf/util/config.c
|
|
@@ -628,11 +628,10 @@ static int collect_config(const char *var, const char *value,
|
|
}
|
|
|
|
ret = set_value(item, value);
|
|
- return ret;
|
|
|
|
out_free:
|
|
free(key);
|
|
- return -1;
|
|
+ return ret;
|
|
}
|
|
|
|
int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
|
|
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
|
|
index 44c2f62b47a3..0cf6f537f980 100644
|
|
--- a/tools/perf/util/evsel.c
|
|
+++ b/tools/perf/util/evsel.c
|
|
@@ -1229,6 +1229,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
|
|
{
|
|
assert(list_empty(&evsel->node));
|
|
assert(evsel->evlist == NULL);
|
|
+ perf_evsel__free_counts(evsel);
|
|
perf_evsel__free_fd(evsel);
|
|
perf_evsel__free_id(evsel);
|
|
perf_evsel__free_config_terms(evsel);
|
|
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
|
|
index 5d420209505e..5b8bc1fd943d 100644
|
|
--- a/tools/perf/util/hist.c
|
|
+++ b/tools/perf/util/hist.c
|
|
@@ -1040,8 +1040,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
|
|
|
|
err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
|
|
iter->evsel, al, max_stack_depth);
|
|
- if (err)
|
|
+ if (err) {
|
|
+ map__put(alm);
|
|
return err;
|
|
+ }
|
|
|
|
err = iter->ops->prepare_entry(iter, al);
|
|
if (err)
|
|
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
|
|
index d0b92d374ba9..29e2bb304168 100644
|
|
--- a/tools/perf/util/parse-events.c
|
|
+++ b/tools/perf/util/parse-events.c
|
|
@@ -2109,6 +2109,7 @@ static bool is_event_supported(u8 type, unsigned config)
|
|
perf_evsel__delete(evsel);
|
|
}
|
|
|
|
+ thread_map__put(tmap);
|
|
return ret;
|
|
}
|
|
|
|
@@ -2179,6 +2180,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
|
|
printf(" %-50s [%s]\n", buf, "SDT event");
|
|
free(buf);
|
|
}
|
|
+ free(path);
|
|
} else
|
|
printf(" %-50s [%s]\n", nd->s, "SDT event");
|
|
if (nd2) {
|
|
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
|
|
index 7a1b20ec5216..d1b2348db0f9 100644
|
|
--- a/tools/power/x86/turbostat/turbostat.c
|
|
+++ b/tools/power/x86/turbostat/turbostat.c
|
|
@@ -4588,6 +4588,9 @@ int fork_it(char **argv)
|
|
signal(SIGQUIT, SIG_IGN);
|
|
if (waitpid(child_pid, &status, 0) == -1)
|
|
err(status, "waitpid");
|
|
+
|
|
+ if (WIFEXITED(status))
|
|
+ status = WEXITSTATUS(status);
|
|
}
|
|
/*
|
|
* n.b. fork_it() does not check for errors from for_all_cpus()
|
|
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
|
|
index a0591d06c61b..913539aea645 100644
|
|
--- a/tools/testing/selftests/bpf/test_verifier.c
|
|
+++ b/tools/testing/selftests/bpf/test_verifier.c
|
|
@@ -1860,6 +1860,7 @@ static struct bpf_test tests[] = {
|
|
},
|
|
.result = REJECT,
|
|
.errstr = "invalid stack off=-79992 size=8",
|
|
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
|
},
|
|
{
|
|
"PTR_TO_STACK store/load - out of bounds high",
|
|
@@ -2243,6 +2244,8 @@ static struct bpf_test tests[] = {
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
+ .result_unpriv = REJECT,
|
|
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
|
},
|
|
{
|
|
"unpriv: cmp of stack pointer",
|
|
@@ -7013,6 +7016,7 @@ static struct bpf_test tests[] = {
|
|
},
|
|
.fixup_map1 = { 3 },
|
|
.errstr = "pointer offset 1073741822",
|
|
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
|
|
.result = REJECT
|
|
},
|
|
{
|
|
@@ -7034,6 +7038,7 @@ static struct bpf_test tests[] = {
|
|
},
|
|
.fixup_map1 = { 3 },
|
|
.errstr = "pointer offset -1073741822",
|
|
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
|
|
.result = REJECT
|
|
},
|
|
{
|
|
@@ -7203,6 +7208,7 @@ static struct bpf_test tests[] = {
|
|
BPF_EXIT_INSN()
|
|
},
|
|
.errstr = "fp pointer offset 1073741822",
|
|
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
|
|
.result = REJECT
|
|
},
|
|
{
|
|
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
|
|
index ed8c9d360c0f..4225d462701d 100644
|
|
--- a/tools/usb/usbip/libsrc/vhci_driver.c
|
|
+++ b/tools/usb/usbip/libsrc/vhci_driver.c
|
|
@@ -150,7 +150,7 @@ static int get_nports(struct udev_device *hc_device)
|
|
|
|
static int vhci_hcd_filter(const struct dirent *dirent)
|
|
{
|
|
- return strcmp(dirent->d_name, "vhci_hcd") >= 0;
|
|
+ return !strncmp(dirent->d_name, "vhci_hcd.", 9);
|
|
}
|
|
|
|
static int get_ncontrollers(void)
|