mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-30 18:51:30 +00:00
1875 lines
63 KiB
Diff
1875 lines
63 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index ce741a9f5b1c..ba6a94cf354b 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 3
|
|
PATCHLEVEL = 10
|
|
-SUBLEVEL = 90
|
|
+SUBLEVEL = 91
|
|
EXTRAVERSION =
|
|
NAME = TOSSUG Baby Fish
|
|
|
|
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
|
|
index 1ba358ba16b8..7d4ce431107b 100644
|
|
--- a/arch/arm/Makefile
|
|
+++ b/arch/arm/Makefile
|
|
@@ -55,6 +55,14 @@ endif
|
|
|
|
comma = ,
|
|
|
|
+#
|
|
+# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
|
|
+# later may result in code being generated that handles signed short and signed
|
|
+# char struct members incorrectly. So disable it.
|
|
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
|
|
+#
|
|
+KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
|
|
+
|
|
# This selects which instruction set is used.
|
|
# Note that GCC does not numerically define an architecture version
|
|
# macro, but instead defines a whole series of macros which makes
|
|
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
|
|
index b5d458769b65..250319040de2 100644
|
|
--- a/arch/arm64/mm/fault.c
|
|
+++ b/arch/arm64/mm/fault.c
|
|
@@ -278,6 +278,7 @@ retry:
|
|
* starvation.
|
|
*/
|
|
mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
|
+ mm_flags |= FAULT_FLAG_TRIED;
|
|
goto retry;
|
|
}
|
|
}
|
|
diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
|
|
index 5a822bb790f7..066e74f666ae 100644
|
|
--- a/arch/m68k/include/asm/linkage.h
|
|
+++ b/arch/m68k/include/asm/linkage.h
|
|
@@ -4,4 +4,34 @@
|
|
#define __ALIGN .align 4
|
|
#define __ALIGN_STR ".align 4"
|
|
|
|
+/*
|
|
+ * Make sure the compiler doesn't do anything stupid with the
|
|
+ * arguments on the stack - they are owned by the *caller*, not
|
|
+ * the callee. This just fools gcc into not spilling into them,
|
|
+ * and keeps it from doing tailcall recursion and/or using the
|
|
+ * stack slots for temporaries, since they are live and "used"
|
|
+ * all the way to the end of the function.
|
|
+ */
|
|
+#define asmlinkage_protect(n, ret, args...) \
|
|
+ __asmlinkage_protect##n(ret, ##args)
|
|
+#define __asmlinkage_protect_n(ret, args...) \
|
|
+ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
|
|
+#define __asmlinkage_protect0(ret) \
|
|
+ __asmlinkage_protect_n(ret)
|
|
+#define __asmlinkage_protect1(ret, arg1) \
|
|
+ __asmlinkage_protect_n(ret, "m" (arg1))
|
|
+#define __asmlinkage_protect2(ret, arg1, arg2) \
|
|
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
|
|
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
|
|
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
|
|
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
|
|
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
|
+ "m" (arg4))
|
|
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
|
|
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
|
+ "m" (arg4), "m" (arg5))
|
|
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
|
|
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
|
+ "m" (arg4), "m" (arg5), "m" (arg6))
|
|
+
|
|
#endif
|
|
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
|
|
index 23129d1005db..5fa55b80b7b6 100644
|
|
--- a/arch/mips/mm/dma-default.c
|
|
+++ b/arch/mips/mm/dma-default.c
|
|
@@ -91,7 +91,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
|
|
else
|
|
#endif
|
|
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
|
|
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
|
|
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
|
|
dma_flag = __GFP_DMA;
|
|
else
|
|
#endif
|
|
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
|
|
index 8ee842ce3aba..0473d31b3a4d 100644
|
|
--- a/arch/powerpc/platforms/powernv/pci.c
|
|
+++ b/arch/powerpc/platforms/powernv/pci.c
|
|
@@ -106,6 +106,7 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
struct pnv_phb *phb = hose->private_data;
|
|
struct msi_desc *entry;
|
|
+ irq_hw_number_t hwirq;
|
|
|
|
if (WARN_ON(!phb))
|
|
return;
|
|
@@ -113,10 +114,10 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
|
|
list_for_each_entry(entry, &pdev->msi_list, list) {
|
|
if (entry->irq == NO_IRQ)
|
|
continue;
|
|
+ hwirq = virq_to_hw(entry->irq);
|
|
irq_set_msi_desc(entry->irq, NULL);
|
|
- msi_bitmap_free_hwirqs(&phb->msi_bmp,
|
|
- virq_to_hw(entry->irq) - phb->msi_base, 1);
|
|
irq_dispose_mapping(entry->irq);
|
|
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
|
|
}
|
|
}
|
|
#endif /* CONFIG_PCI_MSI */
|
|
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
|
|
index ab02db3d02d8..6616fa619945 100644
|
|
--- a/arch/powerpc/sysdev/fsl_msi.c
|
|
+++ b/arch/powerpc/sysdev/fsl_msi.c
|
|
@@ -108,15 +108,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
|
|
{
|
|
struct msi_desc *entry;
|
|
struct fsl_msi *msi_data;
|
|
+ irq_hw_number_t hwirq;
|
|
|
|
list_for_each_entry(entry, &pdev->msi_list, list) {
|
|
if (entry->irq == NO_IRQ)
|
|
continue;
|
|
+ hwirq = virq_to_hw(entry->irq);
|
|
msi_data = irq_get_chip_data(entry->irq);
|
|
irq_set_msi_desc(entry->irq, NULL);
|
|
- msi_bitmap_free_hwirqs(&msi_data->bitmap,
|
|
- virq_to_hw(entry->irq), 1);
|
|
irq_dispose_mapping(entry->irq);
|
|
+ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
|
|
}
|
|
|
|
return;
|
|
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
|
|
index 38e62382070c..9e14d82287a1 100644
|
|
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
|
|
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
|
|
@@ -74,6 +74,7 @@ static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
|
|
static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
|
|
{
|
|
struct msi_desc *entry;
|
|
+ irq_hw_number_t hwirq;
|
|
|
|
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
|
|
|
|
@@ -81,10 +82,11 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
|
|
if (entry->irq == NO_IRQ)
|
|
continue;
|
|
|
|
+ hwirq = virq_to_hw(entry->irq);
|
|
irq_set_msi_desc(entry->irq, NULL);
|
|
- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
|
|
- virq_to_hw(entry->irq), ALLOC_CHUNK);
|
|
irq_dispose_mapping(entry->irq);
|
|
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
|
|
+ hwirq, ALLOC_CHUNK);
|
|
}
|
|
|
|
return;
|
|
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
|
|
index 9a7aa0ed9c1c..dfc3486bf802 100644
|
|
--- a/arch/powerpc/sysdev/mpic_u3msi.c
|
|
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
|
|
@@ -124,15 +124,16 @@ static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
|
|
static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
|
|
{
|
|
struct msi_desc *entry;
|
|
+ irq_hw_number_t hwirq;
|
|
|
|
list_for_each_entry(entry, &pdev->msi_list, list) {
|
|
if (entry->irq == NO_IRQ)
|
|
continue;
|
|
|
|
+ hwirq = virq_to_hw(entry->irq);
|
|
irq_set_msi_desc(entry->irq, NULL);
|
|
- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
|
|
- virq_to_hw(entry->irq), 1);
|
|
irq_dispose_mapping(entry->irq);
|
|
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
|
|
}
|
|
|
|
return;
|
|
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
|
|
index 43948da837a7..c3e65129940b 100644
|
|
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
|
|
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
|
|
@@ -121,16 +121,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
|
|
{
|
|
struct msi_desc *entry;
|
|
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
|
|
+ irq_hw_number_t hwirq;
|
|
|
|
dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
|
|
|
|
list_for_each_entry(entry, &dev->msi_list, list) {
|
|
if (entry->irq == NO_IRQ)
|
|
continue;
|
|
+ hwirq = virq_to_hw(entry->irq);
|
|
irq_set_msi_desc(entry->irq, NULL);
|
|
- msi_bitmap_free_hwirqs(&msi_data->bitmap,
|
|
- virq_to_hw(entry->irq), 1);
|
|
irq_dispose_mapping(entry->irq);
|
|
+ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
|
|
}
|
|
}
|
|
|
|
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
|
|
index 142810c457dc..34df5c22df90 100644
|
|
--- a/arch/x86/include/asm/uaccess_64.h
|
|
+++ b/arch/x86/include/asm/uaccess_64.h
|
|
@@ -77,11 +77,10 @@ int copy_to_user(void __user *dst, const void *src, unsigned size)
|
|
}
|
|
|
|
static __always_inline __must_check
|
|
-int __copy_from_user(void *dst, const void __user *src, unsigned size)
|
|
+int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
|
|
{
|
|
int ret = 0;
|
|
|
|
- might_fault();
|
|
if (!__builtin_constant_p(size))
|
|
return copy_user_generic(dst, (__force void *)src, size);
|
|
switch (size) {
|
|
@@ -121,11 +120,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
|
|
}
|
|
|
|
static __always_inline __must_check
|
|
-int __copy_to_user(void __user *dst, const void *src, unsigned size)
|
|
+int __copy_from_user(void *dst, const void __user *src, unsigned size)
|
|
+{
|
|
+ might_fault();
|
|
+ return __copy_from_user_nocheck(dst, src, size);
|
|
+}
|
|
+
|
|
+static __always_inline __must_check
|
|
+int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
|
|
{
|
|
int ret = 0;
|
|
|
|
- might_fault();
|
|
if (!__builtin_constant_p(size))
|
|
return copy_user_generic((__force void *)dst, src, size);
|
|
switch (size) {
|
|
@@ -165,6 +170,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
|
|
}
|
|
|
|
static __always_inline __must_check
|
|
+int __copy_to_user(void __user *dst, const void *src, unsigned size)
|
|
+{
|
|
+ might_fault();
|
|
+ return __copy_to_user_nocheck(dst, src, size);
|
|
+}
|
|
+
|
|
+static __always_inline __must_check
|
|
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
|
{
|
|
int ret = 0;
|
|
@@ -220,13 +232,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
|
static __must_check __always_inline int
|
|
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
|
|
{
|
|
- return copy_user_generic(dst, (__force const void *)src, size);
|
|
+ return __copy_from_user_nocheck(dst, (__force const void *)src, size);
|
|
}
|
|
|
|
static __must_check __always_inline int
|
|
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
|
|
{
|
|
- return copy_user_generic((__force void *)dst, src, size);
|
|
+ return __copy_to_user_nocheck((__force void *)dst, src, size);
|
|
}
|
|
|
|
extern long __copy_user_nocache(void *dst, const void __user *src,
|
|
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
|
|
index 033eb44dc661..9620d18cb638 100644
|
|
--- a/arch/x86/kernel/apic/apic.c
|
|
+++ b/arch/x86/kernel/apic/apic.c
|
|
@@ -350,6 +350,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
|
|
apic_write(APIC_LVTT, lvtt_value);
|
|
|
|
if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
|
|
+ /*
|
|
+ * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
|
|
+ * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
|
|
+ * According to Intel, MFENCE can do the serialization here.
|
|
+ */
|
|
+ asm volatile("mfence" : : : "memory");
|
|
+
|
|
printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
|
|
return;
|
|
}
|
|
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
|
|
index 27e3a14fc917..9714a7aa32fc 100644
|
|
--- a/arch/x86/kernel/tsc.c
|
|
+++ b/arch/x86/kernel/tsc.c
|
|
@@ -20,6 +20,7 @@
|
|
#include <asm/hypervisor.h>
|
|
#include <asm/nmi.h>
|
|
#include <asm/x86_init.h>
|
|
+#include <asm/geode.h>
|
|
|
|
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
|
|
EXPORT_SYMBOL(cpu_khz);
|
|
@@ -806,15 +807,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
|
|
|
static void __init check_system_tsc_reliable(void)
|
|
{
|
|
-#ifdef CONFIG_MGEODE_LX
|
|
- /* RTSC counts during suspend */
|
|
+#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
|
|
+ if (is_geode_lx()) {
|
|
+ /* RTSC counts during suspend */
|
|
#define RTSC_SUSP 0x100
|
|
- unsigned long res_low, res_high;
|
|
+ unsigned long res_low, res_high;
|
|
|
|
- rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
|
|
- /* Geode_LX - the OLPC CPU has a very reliable TSC */
|
|
- if (res_low & RTSC_SUSP)
|
|
- tsc_clocksource_reliable = 1;
|
|
+ rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
|
|
+ /* Geode_LX - the OLPC CPU has a very reliable TSC */
|
|
+ if (res_low & RTSC_SUSP)
|
|
+ tsc_clocksource_reliable = 1;
|
|
+ }
|
|
#endif
|
|
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
|
|
tsc_clocksource_reliable = 1;
|
|
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
|
|
index 224d2ef754cc..3deddd796f76 100644
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -496,7 +496,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
if (svm->vmcb->control.next_rip != 0) {
|
|
- WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
|
|
+ WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
|
|
svm->next_rip = svm->vmcb->control.next_rip;
|
|
}
|
|
|
|
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
|
|
index 2db3f30bed75..b04e50262088 100644
|
|
--- a/arch/x86/mm/init_64.c
|
|
+++ b/arch/x86/mm/init_64.c
|
|
@@ -1163,7 +1163,7 @@ void mark_rodata_ro(void)
|
|
* has been zapped already via cleanup_highmem().
|
|
*/
|
|
all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
|
|
- set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
|
|
+ set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
|
|
|
|
rodata_test();
|
|
|
|
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
|
|
index 13d926282c89..511630db00a8 100644
|
|
--- a/arch/x86/xen/enlighten.c
|
|
+++ b/arch/x86/xen/enlighten.c
|
|
@@ -33,6 +33,10 @@
|
|
#include <linux/memblock.h>
|
|
#include <linux/edd.h>
|
|
|
|
+#ifdef CONFIG_KEXEC_CORE
|
|
+#include <linux/kexec.h>
|
|
+#endif
|
|
+
|
|
#include <xen/xen.h>
|
|
#include <xen/events.h>
|
|
#include <xen/interface/xen.h>
|
|
@@ -1744,6 +1748,21 @@ static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
|
|
.notifier_call = xen_hvm_cpu_notify,
|
|
};
|
|
|
|
+#ifdef CONFIG_KEXEC_CORE
|
|
+static void xen_hvm_shutdown(void)
|
|
+{
|
|
+ native_machine_shutdown();
|
|
+ if (kexec_in_progress)
|
|
+ xen_reboot(SHUTDOWN_soft_reset);
|
|
+}
|
|
+
|
|
+static void xen_hvm_crash_shutdown(struct pt_regs *regs)
|
|
+{
|
|
+ native_machine_crash_shutdown(regs);
|
|
+ xen_reboot(SHUTDOWN_soft_reset);
|
|
+}
|
|
+#endif
|
|
+
|
|
static void __init xen_hvm_guest_init(void)
|
|
{
|
|
init_hvm_pv_info();
|
|
@@ -1758,6 +1777,10 @@ static void __init xen_hvm_guest_init(void)
|
|
x86_init.irqs.intr_init = xen_init_IRQ;
|
|
xen_hvm_init_time_ops();
|
|
xen_hvm_init_mmu_ops();
|
|
+#ifdef CONFIG_KEXEC_CORE
|
|
+ machine_ops.shutdown = xen_hvm_shutdown;
|
|
+ machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
|
|
+#endif
|
|
}
|
|
|
|
static bool __init xen_hvm_platform(void)
|
|
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
|
|
index b41994fd8460..2670bebb058b 100644
|
|
--- a/drivers/base/regmap/regmap-debugfs.c
|
|
+++ b/drivers/base/regmap/regmap-debugfs.c
|
|
@@ -23,8 +23,7 @@ static struct dentry *regmap_debugfs_root;
|
|
/* Calculate the length of a fixed format */
|
|
static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
|
|
{
|
|
- snprintf(buf, buf_size, "%x", max_val);
|
|
- return strlen(buf);
|
|
+ return snprintf(NULL, 0, "%x", max_val);
|
|
}
|
|
|
|
static ssize_t regmap_name_read_file(struct file *file,
|
|
@@ -419,7 +418,7 @@ static ssize_t regmap_access_read_file(struct file *file,
|
|
/* If we're in the region the user is trying to read */
|
|
if (p >= *ppos) {
|
|
/* ...but not beyond it */
|
|
- if (buf_pos >= count - 1 - tot_len)
|
|
+ if (buf_pos + tot_len + 1 >= count)
|
|
break;
|
|
|
|
/* Format the register */
|
|
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
|
|
index d752c96d6090..bdceb60998d3 100644
|
|
--- a/drivers/gpu/drm/drm_lock.c
|
|
+++ b/drivers/gpu/drm/drm_lock.c
|
|
@@ -58,6 +58,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
struct drm_master *master = file_priv->master;
|
|
int ret = 0;
|
|
|
|
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
|
|
+ return -EINVAL;
|
|
+
|
|
++file_priv->lock_count;
|
|
|
|
if (lock->context == DRM_KERNEL_CONTEXT) {
|
|
@@ -151,6 +154,9 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
struct drm_lock *lock = data;
|
|
struct drm_master *master = file_priv->master;
|
|
|
|
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
|
|
+ return -EINVAL;
|
|
+
|
|
if (lock->context == DRM_KERNEL_CONTEXT) {
|
|
DRM_ERROR("Process %d using kernel context %d\n",
|
|
task_pid_nr(current), lock->context);
|
|
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
|
|
index d1b30c66d604..727a9dc44b94 100644
|
|
--- a/drivers/infiniband/hw/qib/qib.h
|
|
+++ b/drivers/infiniband/hw/qib/qib.h
|
|
@@ -1467,27 +1467,22 @@ extern struct mutex qib_mutex;
|
|
* first to avoid possible serial port delays from printk.
|
|
*/
|
|
#define qib_early_err(dev, fmt, ...) \
|
|
- do { \
|
|
- dev_err(dev, fmt, ##__VA_ARGS__); \
|
|
- } while (0)
|
|
+ dev_err(dev, fmt, ##__VA_ARGS__)
|
|
|
|
#define qib_dev_err(dd, fmt, ...) \
|
|
- do { \
|
|
- dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
|
|
- qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
|
|
- } while (0)
|
|
+ dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
|
|
+ qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
|
|
|
|
-#define qib_dev_porterr(dd, port, fmt, ...) \
|
|
- do { \
|
|
- dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
|
|
- qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
|
|
- ##__VA_ARGS__); \
|
|
- } while (0)
|
|
+#define qib_dev_warn(dd, fmt, ...) \
|
|
+ dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
|
|
+ qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
|
|
|
|
+#define qib_dev_porterr(dd, port, fmt, ...) \
|
|
+ dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
|
|
+ qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
|
|
+ ##__VA_ARGS__)
|
|
#define qib_devinfo(pcidev, fmt, ...) \
|
|
- do { \
|
|
- dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \
|
|
- } while (0)
|
|
+ dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)
|
|
|
|
/*
|
|
* this is used for formatting hw error messages...
|
|
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
|
|
index 3b9afccaaade..eabe54738be6 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_keys.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_keys.c
|
|
@@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
|
|
* unrestricted LKEY.
|
|
*/
|
|
rkt->gen++;
|
|
+ /*
|
|
+ * bits are capped in qib_verbs.c to insure enough bits
|
|
+ * for generation number
|
|
+ */
|
|
mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
|
|
((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
|
|
<< 8);
|
|
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
|
|
index 904c384aa361..6c809bf50128 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_verbs.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
|
|
@@ -40,6 +40,7 @@
|
|
#include <linux/rculist.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/random.h>
|
|
+#include <linux/vmalloc.h>
|
|
|
|
#include "qib.h"
|
|
#include "qib_common.h"
|
|
@@ -2084,10 +2085,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
|
|
* the LKEY). The remaining bits act as a generation number or tag.
|
|
*/
|
|
spin_lock_init(&dev->lk_table.lock);
|
|
+ /* insure generation is at least 4 bits see keys.c */
|
|
+ if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
|
|
+ qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
|
|
+ ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
|
|
+ ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
|
|
+ }
|
|
dev->lk_table.max = 1 << ib_qib_lkey_table_size;
|
|
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
|
|
dev->lk_table.table = (struct qib_mregion __rcu **)
|
|
- __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
|
|
+ vmalloc(lk_tab_size);
|
|
if (dev->lk_table.table == NULL) {
|
|
ret = -ENOMEM;
|
|
goto err_lk;
|
|
@@ -2260,7 +2267,7 @@ err_tx:
|
|
sizeof(struct qib_pio_header),
|
|
dev->pio_hdrs, dev->pio_hdrs_phys);
|
|
err_hdrs:
|
|
- free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
|
|
+ vfree(dev->lk_table.table);
|
|
err_lk:
|
|
kfree(dev->qp_table);
|
|
err_qpt:
|
|
@@ -2314,8 +2321,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
|
|
sizeof(struct qib_pio_header),
|
|
dev->pio_hdrs, dev->pio_hdrs_phys);
|
|
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
|
|
- free_pages((unsigned long) dev->lk_table.table,
|
|
- get_order(lk_tab_size));
|
|
+ vfree(dev->lk_table.table);
|
|
kfree(dev->qp_table);
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
|
|
index aff8b2c17886..e4f9fff51890 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_verbs.h
|
|
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
|
|
@@ -645,6 +645,8 @@ struct qib_qpn_table {
|
|
struct qpn_map map[QPNMAP_ENTRIES];
|
|
};
|
|
|
|
+#define MAX_LKEY_TABLE_BITS 23
|
|
+
|
|
struct qib_lkey_table {
|
|
spinlock_t lock; /* protect changes in this struct */
|
|
u32 next; /* next unused index (speeds search) */
|
|
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
|
|
index 3ee198b65843..cc7ece1712b5 100644
|
|
--- a/drivers/macintosh/windfarm_core.c
|
|
+++ b/drivers/macintosh/windfarm_core.c
|
|
@@ -435,7 +435,7 @@ int wf_unregister_client(struct notifier_block *nb)
|
|
{
|
|
mutex_lock(&wf_lock);
|
|
blocking_notifier_chain_unregister(&wf_client_list, nb);
|
|
- wf_client_count++;
|
|
+ wf_client_count--;
|
|
if (wf_client_count == 0)
|
|
wf_stop_thread();
|
|
mutex_unlock(&wf_lock);
|
|
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
|
|
index b04d1f904d07..2eca9084defe 100644
|
|
--- a/drivers/md/dm-cache-policy-cleaner.c
|
|
+++ b/drivers/md/dm-cache-policy-cleaner.c
|
|
@@ -434,7 +434,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
|
|
static struct dm_cache_policy_type wb_policy_type = {
|
|
.name = "cleaner",
|
|
.version = {1, 0, 0},
|
|
- .hint_size = 0,
|
|
+ .hint_size = 4,
|
|
.owner = THIS_MODULE,
|
|
.create = wb_create
|
|
};
|
|
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
|
|
index 84cddccc0249..4805c15185c2 100644
|
|
--- a/drivers/md/dm-raid.c
|
|
+++ b/drivers/md/dm-raid.c
|
|
@@ -325,8 +325,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
|
|
*/
|
|
if (min_region_size > (1 << 13)) {
|
|
/* If not a power of 2, make it the next power of 2 */
|
|
- if (min_region_size & (min_region_size - 1))
|
|
- region_size = 1 << fls(region_size);
|
|
+ region_size = roundup_pow_of_two(min_region_size);
|
|
DMINFO("Choosing default region size of %lu sectors",
|
|
region_size);
|
|
} else {
|
|
diff --git a/drivers/md/md.c b/drivers/md/md.c
|
|
index 37ff00d014b4..7c45286e2662 100644
|
|
--- a/drivers/md/md.c
|
|
+++ b/drivers/md/md.c
|
|
@@ -5306,6 +5306,8 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
|
|
static void __md_stop(struct mddev *mddev)
|
|
{
|
|
mddev->ready = 0;
|
|
+ /* Ensure ->event_work is done */
|
|
+ flush_workqueue(md_misc_wq);
|
|
mddev->pers->stop(mddev);
|
|
if (mddev->pers->sync_request && mddev->to_remove == NULL)
|
|
mddev->to_remove = &md_redundancy_group;
|
|
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
|
|
index bf2b80d5c470..8731b6ea026b 100644
|
|
--- a/drivers/md/persistent-data/dm-btree-internal.h
|
|
+++ b/drivers/md/persistent-data/dm-btree-internal.h
|
|
@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
|
|
|
|
extern struct dm_block_validator btree_node_validator;
|
|
|
|
+/*
|
|
+ * Value type for upper levels of multi-level btrees.
|
|
+ */
|
|
+extern void init_le64_type(struct dm_transaction_manager *tm,
|
|
+ struct dm_btree_value_type *vt);
|
|
+
|
|
#endif /* DM_BTREE_INTERNAL_H */
|
|
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
|
|
index a03178e91a79..7c0d75547ccf 100644
|
|
--- a/drivers/md/persistent-data/dm-btree-remove.c
|
|
+++ b/drivers/md/persistent-data/dm-btree-remove.c
|
|
@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
|
|
return r;
|
|
}
|
|
|
|
-static struct dm_btree_value_type le64_type = {
|
|
- .context = NULL,
|
|
- .size = sizeof(__le64),
|
|
- .inc = NULL,
|
|
- .dec = NULL,
|
|
- .equal = NULL
|
|
-};
|
|
-
|
|
int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
|
|
uint64_t *keys, dm_block_t *new_root)
|
|
{
|
|
@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
|
|
int index = 0, r = 0;
|
|
struct shadow_spine spine;
|
|
struct btree_node *n;
|
|
+ struct dm_btree_value_type le64_vt;
|
|
|
|
+ init_le64_type(info->tm, &le64_vt);
|
|
init_shadow_spine(&spine, info);
|
|
for (level = 0; level < info->levels; level++) {
|
|
r = remove_raw(&spine, info,
|
|
(level == last_level ?
|
|
- &info->value_type : &le64_type),
|
|
+ &info->value_type : &le64_vt),
|
|
root, keys[level], (unsigned *)&index);
|
|
if (r < 0)
|
|
break;
|
|
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
|
|
index 1b5e13ec7f96..0dee514ba4c5 100644
|
|
--- a/drivers/md/persistent-data/dm-btree-spine.c
|
|
+++ b/drivers/md/persistent-data/dm-btree-spine.c
|
|
@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
|
|
{
|
|
return s->root;
|
|
}
|
|
+
|
|
+static void le64_inc(void *context, const void *value_le)
|
|
+{
|
|
+ struct dm_transaction_manager *tm = context;
|
|
+ __le64 v_le;
|
|
+
|
|
+ memcpy(&v_le, value_le, sizeof(v_le));
|
|
+ dm_tm_inc(tm, le64_to_cpu(v_le));
|
|
+}
|
|
+
|
|
+static void le64_dec(void *context, const void *value_le)
|
|
+{
|
|
+ struct dm_transaction_manager *tm = context;
|
|
+ __le64 v_le;
|
|
+
|
|
+ memcpy(&v_le, value_le, sizeof(v_le));
|
|
+ dm_tm_dec(tm, le64_to_cpu(v_le));
|
|
+}
|
|
+
|
|
+static int le64_equal(void *context, const void *value1_le, const void *value2_le)
|
|
+{
|
|
+ __le64 v1_le, v2_le;
|
|
+
|
|
+ memcpy(&v1_le, value1_le, sizeof(v1_le));
|
|
+ memcpy(&v2_le, value2_le, sizeof(v2_le));
|
|
+ return v1_le == v2_le;
|
|
+}
|
|
+
|
|
+void init_le64_type(struct dm_transaction_manager *tm,
|
|
+ struct dm_btree_value_type *vt)
|
|
+{
|
|
+ vt->context = tm;
|
|
+ vt->size = sizeof(__le64);
|
|
+ vt->inc = le64_inc;
|
|
+ vt->dec = le64_dec;
|
|
+ vt->equal = le64_equal;
|
|
+}
|
|
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
|
|
index e3ecb0b824b5..79233b051da0 100644
|
|
--- a/drivers/md/persistent-data/dm-btree.c
|
|
+++ b/drivers/md/persistent-data/dm-btree.c
|
|
@@ -651,12 +651,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
|
|
struct btree_node *n;
|
|
struct dm_btree_value_type le64_type;
|
|
|
|
- le64_type.context = NULL;
|
|
- le64_type.size = sizeof(__le64);
|
|
- le64_type.inc = NULL;
|
|
- le64_type.dec = NULL;
|
|
- le64_type.equal = NULL;
|
|
-
|
|
+ init_le64_type(info->tm, &le64_type);
|
|
init_shadow_spine(&spine, info);
|
|
|
|
for (level = 0; level < (info->levels - 1); level++) {
|
|
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
|
|
index bf79def40126..8822e880833b 100644
|
|
--- a/drivers/mtd/ubi/io.c
|
|
+++ b/drivers/mtd/ubi/io.c
|
|
@@ -931,6 +931,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
|
|
goto bad;
|
|
}
|
|
|
|
+ if (data_size > ubi->leb_size) {
|
|
+ ubi_err("bad data_size");
|
|
+ goto bad;
|
|
+ }
|
|
+
|
|
if (vol_type == UBI_VID_STATIC) {
|
|
/*
|
|
* Although from high-level point of view static volumes may
|
|
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
|
|
index d77b1c1d7c72..bebf49e0dbe9 100644
|
|
--- a/drivers/mtd/ubi/vtbl.c
|
|
+++ b/drivers/mtd/ubi/vtbl.c
|
|
@@ -651,6 +651,7 @@ static int init_volumes(struct ubi_device *ubi,
|
|
if (ubi->corr_peb_count)
|
|
ubi_err("%d PEBs are corrupted and not used",
|
|
ubi->corr_peb_count);
|
|
+ return -ENOSPC;
|
|
}
|
|
ubi->rsvd_pebs += reserved_pebs;
|
|
ubi->avail_pebs -= reserved_pebs;
|
|
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
|
|
index c08254016fe8..3375bfb1b246 100644
|
|
--- a/drivers/mtd/ubi/wl.c
|
|
+++ b/drivers/mtd/ubi/wl.c
|
|
@@ -1978,6 +1978,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
|
if (ubi->corr_peb_count)
|
|
ubi_err("%d PEBs are corrupted and not used",
|
|
ubi->corr_peb_count);
|
|
+ err = -ENOSPC;
|
|
goto out_free;
|
|
}
|
|
ubi->avail_pebs -= reserved_pebs;
|
|
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
|
|
index 6b5baf01512d..c0ed7c802819 100644
|
|
--- a/drivers/net/bonding/bond_main.c
|
|
+++ b/drivers/net/bonding/bond_main.c
|
|
@@ -876,6 +876,23 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
|
|
}
|
|
}
|
|
|
|
+static struct slave *bond_get_old_active(struct bonding *bond,
|
|
+ struct slave *new_active)
|
|
+{
|
|
+ struct slave *slave;
|
|
+ int i;
|
|
+
|
|
+ bond_for_each_slave(bond, slave, i) {
|
|
+ if (slave == new_active)
|
|
+ continue;
|
|
+
|
|
+ if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
|
|
+ return slave;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
/*
|
|
* bond_do_fail_over_mac
|
|
*
|
|
@@ -919,6 +936,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
|
|
write_unlock_bh(&bond->curr_slave_lock);
|
|
read_unlock(&bond->lock);
|
|
|
|
+ if (!old_active)
|
|
+ old_active = bond_get_old_active(bond, new_active);
|
|
+
|
|
if (old_active) {
|
|
memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
|
|
memcpy(saddr.sa_data, old_active->dev->dev_addr,
|
|
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
|
|
index 5f57e3d35e26..6adf9abdf955 100644
|
|
--- a/drivers/scsi/3w-9xxx.c
|
|
+++ b/drivers/scsi/3w-9xxx.c
|
|
@@ -225,6 +225,17 @@ static const struct file_operations twa_fops = {
|
|
.llseek = noop_llseek,
|
|
};
|
|
|
|
+/*
|
|
+ * The controllers use an inline buffer instead of a mapped SGL for small,
|
|
+ * single entry buffers. Note that we treat a zero-length transfer like
|
|
+ * a mapped SGL.
|
|
+ */
|
|
+static bool twa_command_mapped(struct scsi_cmnd *cmd)
|
|
+{
|
|
+ return scsi_sg_count(cmd) != 1 ||
|
|
+ scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
|
|
+}
|
|
+
|
|
/* This function will complete an aen request from the isr */
|
|
static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
|
|
{
|
|
@@ -1351,7 +1362,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
|
|
}
|
|
|
|
/* Now complete the io */
|
|
- scsi_dma_unmap(cmd);
|
|
+ if (twa_command_mapped(cmd))
|
|
+ scsi_dma_unmap(cmd);
|
|
cmd->scsi_done(cmd);
|
|
tw_dev->state[request_id] = TW_S_COMPLETED;
|
|
twa_free_request_id(tw_dev, request_id);
|
|
@@ -1594,7 +1606,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
|
|
struct scsi_cmnd *cmd = tw_dev->srb[i];
|
|
|
|
cmd->result = (DID_RESET << 16);
|
|
- scsi_dma_unmap(cmd);
|
|
+ if (twa_command_mapped(cmd))
|
|
+ scsi_dma_unmap(cmd);
|
|
cmd->scsi_done(cmd);
|
|
}
|
|
}
|
|
@@ -1777,12 +1790,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
|
|
retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
|
|
switch (retval) {
|
|
case SCSI_MLQUEUE_HOST_BUSY:
|
|
- scsi_dma_unmap(SCpnt);
|
|
+ if (twa_command_mapped(SCpnt))
|
|
+ scsi_dma_unmap(SCpnt);
|
|
twa_free_request_id(tw_dev, request_id);
|
|
break;
|
|
case 1:
|
|
SCpnt->result = (DID_ERROR << 16);
|
|
- scsi_dma_unmap(SCpnt);
|
|
+ if (twa_command_mapped(SCpnt))
|
|
+ scsi_dma_unmap(SCpnt);
|
|
done(SCpnt);
|
|
tw_dev->state[request_id] = TW_S_COMPLETED;
|
|
twa_free_request_id(tw_dev, request_id);
|
|
@@ -1843,8 +1858,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
|
|
/* Map sglist from scsi layer to cmd packet */
|
|
|
|
if (scsi_sg_count(srb)) {
|
|
- if ((scsi_sg_count(srb) == 1) &&
|
|
- (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
|
|
+ if (!twa_command_mapped(srb)) {
|
|
if (srb->sc_data_direction == DMA_TO_DEVICE ||
|
|
srb->sc_data_direction == DMA_BIDIRECTIONAL)
|
|
scsi_sg_copy_to_buffer(srb,
|
|
@@ -1917,7 +1931,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
|
|
{
|
|
struct scsi_cmnd *cmd = tw_dev->srb[request_id];
|
|
|
|
- if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
|
|
+ if (!twa_command_mapped(cmd) &&
|
|
(cmd->sc_data_direction == DMA_FROM_DEVICE ||
|
|
cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
|
|
if (scsi_sg_count(cmd) == 1) {
|
|
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
|
|
index 3668b1b23b5a..9acbc885239b 100644
|
|
--- a/drivers/scsi/scsi_error.c
|
|
+++ b/drivers/scsi/scsi_error.c
|
|
@@ -1849,8 +1849,17 @@ int scsi_error_handler(void *data)
|
|
* We never actually get interrupted because kthread_run
|
|
* disables signal delivery for the created thread.
|
|
*/
|
|
- while (!kthread_should_stop()) {
|
|
+ while (true) {
|
|
+ /*
|
|
+ * The sequence in kthread_stop() sets the stop flag first
|
|
+ * then wakes the process. To avoid missed wakeups, the task
|
|
+ * should always be in a non running state before the stop
|
|
+ * flag is checked
|
|
+ */
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
+ if (kthread_should_stop())
|
|
+ break;
|
|
+
|
|
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
|
|
shost->host_failed != shost->host_busy) {
|
|
SCSI_LOG_ERROR_RECOVERY(1,
|
|
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
|
|
index cc42ee5e19fb..787cfbaa7755 100644
|
|
--- a/drivers/spi/spi-pxa2xx.c
|
|
+++ b/drivers/spi/spi-pxa2xx.c
|
|
@@ -546,6 +546,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
|
|
if (!(sccr1_reg & SSCR1_TIE))
|
|
mask &= ~SSSR_TFS;
|
|
|
|
+ /* Ignore RX timeout interrupt if it is disabled */
|
|
+ if (!(sccr1_reg & SSCR1_TINTE))
|
|
+ mask &= ~SSSR_TINT;
|
|
+
|
|
if (!(status & mask))
|
|
return IRQ_NONE;
|
|
|
|
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
|
|
index 32b7bb111eb6..7c159634aaae 100644
|
|
--- a/drivers/spi/spi.c
|
|
+++ b/drivers/spi/spi.c
|
|
@@ -1030,8 +1030,7 @@ static struct class spi_master_class = {
|
|
*
|
|
* The caller is responsible for assigning the bus number and initializing
|
|
* the master's methods before calling spi_register_master(); and (after errors
|
|
- * adding the device) calling spi_master_put() and kfree() to prevent a memory
|
|
- * leak.
|
|
+ * adding the device) calling spi_master_put() to prevent a memory leak.
|
|
*/
|
|
struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
|
|
{
|
|
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
|
|
index e3960745f506..49cb69206896 100644
|
|
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
|
|
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
|
|
@@ -119,10 +119,21 @@ static int adl_pci7x3x_do_insn_bits(struct comedi_device *dev,
|
|
unsigned int bits = data[1];
|
|
|
|
if (mask) {
|
|
+ unsigned int val;
|
|
+
|
|
s->state &= ~mask;
|
|
s->state |= (bits & mask);
|
|
-
|
|
- outl(s->state, dev->iobase + reg);
|
|
+ val = s->state;
|
|
+ if (s->n_chan == 16) {
|
|
+ /*
|
|
+ * It seems the PCI-7230 needs the 16-bit DO state
|
|
+ * to be shifted left by 16 bits before being written
|
|
+ * to the 32-bit register. Set the value in both
|
|
+ * halves of the register to be sure.
|
|
+ */
|
|
+ val |= val << 16;
|
|
+ }
|
|
+ outl(val, dev->iobase + reg);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
|
|
index 4299cf45f947..5e1f16c36b49 100644
|
|
--- a/drivers/staging/speakup/fakekey.c
|
|
+++ b/drivers/staging/speakup/fakekey.c
|
|
@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void)
|
|
__this_cpu_write(reporting_keystroke, true);
|
|
input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
|
|
input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
|
|
+ input_sync(virt_keyboard);
|
|
__this_cpu_write(reporting_keystroke, false);
|
|
|
|
/* reenable preemption */
|
|
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
|
|
index 652438325197..85756bd36746 100644
|
|
--- a/drivers/usb/core/config.c
|
|
+++ b/drivers/usb/core/config.c
|
|
@@ -114,7 +114,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
|
|
cfgno, inum, asnum, ep->desc.bEndpointAddress);
|
|
ep->ss_ep_comp.bmAttributes = 16;
|
|
} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
|
|
- desc->bmAttributes > 2) {
|
|
+ USB_SS_MULT(desc->bmAttributes) > 3) {
|
|
dev_warn(ddev, "Isoc endpoint has Mult of %d in "
|
|
"config %d interface %d altsetting %d ep %d: "
|
|
"setting to 3\n", desc->bmAttributes + 1,
|
|
@@ -123,7 +123,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
|
|
}
|
|
|
|
if (usb_endpoint_xfer_isoc(&ep->desc))
|
|
- max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
|
|
+ max_tx = (desc->bMaxBurst + 1) *
|
|
+ (USB_SS_MULT(desc->bmAttributes)) *
|
|
usb_endpoint_maxp(&ep->desc);
|
|
else if (usb_endpoint_xfer_int(&ep->desc))
|
|
max_tx = usb_endpoint_maxp(&ep->desc) *
|
|
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
|
|
index b73f3031a660..d4db4ea4a92d 100644
|
|
--- a/drivers/usb/core/quirks.c
|
|
+++ b/drivers/usb/core/quirks.c
|
|
@@ -53,6 +53,13 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
|
|
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
|
|
|
|
+ /* Logitech ConferenceCam CC3000e */
|
|
+ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
|
|
+ { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
|
|
+
|
|
+ /* Logitech PTZ Pro Camera */
|
|
+ { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
|
|
+
|
|
/* Logitech Quickcam Fusion */
|
|
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
|
|
|
|
@@ -77,6 +84,12 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|
/* Philips PSC805 audio device */
|
|
{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
|
|
|
|
+ /* Plantronic Audio 655 DSP */
|
|
+ { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
|
|
+
|
|
+ /* Plantronic Audio 648 USB */
|
|
+ { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
|
|
+
|
|
/* Artisman Watchdog Dongle */
|
|
{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
|
|
USB_QUIRK_CONFIG_INTF_STRINGS },
|
|
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
|
|
index 31bed5f7d0eb..87e82e6b0c38 100644
|
|
--- a/drivers/usb/host/xhci-mem.c
|
|
+++ b/drivers/usb/host/xhci-mem.c
|
|
@@ -1473,10 +1473,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
|
|
* use Event Data TRBs, and we don't chain in a link TRB on short
|
|
* transfers, we're basically dividing by 1.
|
|
*
|
|
- * xHCI 1.0 specification indicates that the Average TRB Length should
|
|
- * be set to 8 for control endpoints.
|
|
+ * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
|
|
+ * should be set to 8 for control endpoints.
|
|
*/
|
|
- if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
|
|
+ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
|
|
ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
|
|
else
|
|
ep_ctx->tx_info |=
|
|
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
|
|
index fde0277adc2c..4ba6974dd4b6 100644
|
|
--- a/drivers/usb/host/xhci-ring.c
|
|
+++ b/drivers/usb/host/xhci-ring.c
|
|
@@ -3167,9 +3167,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
struct xhci_td *td;
|
|
struct scatterlist *sg;
|
|
int num_sgs;
|
|
- int trb_buff_len, this_sg_len, running_total;
|
|
+ int trb_buff_len, this_sg_len, running_total, ret;
|
|
unsigned int total_packet_count;
|
|
+ bool zero_length_needed;
|
|
bool first_trb;
|
|
+ int last_trb_num;
|
|
u64 addr;
|
|
bool more_trbs_coming;
|
|
|
|
@@ -3185,13 +3187,27 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
|
|
usb_endpoint_maxp(&urb->ep->desc));
|
|
|
|
- trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
ep_index, urb->stream_id,
|
|
num_trbs, urb, 0, mem_flags);
|
|
- if (trb_buff_len < 0)
|
|
- return trb_buff_len;
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
|
|
urb_priv = urb->hcpriv;
|
|
+
|
|
+ /* Deal with URB_ZERO_PACKET - need one more td/trb */
|
|
+ zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
|
|
+ urb_priv->length == 2;
|
|
+ if (zero_length_needed) {
|
|
+ num_trbs++;
|
|
+ xhci_dbg(xhci, "Creating zero length td.\n");
|
|
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
+ ep_index, urb->stream_id,
|
|
+ 1, urb, 1, mem_flags);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
td = urb_priv->td[0];
|
|
|
|
/*
|
|
@@ -3221,6 +3237,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
trb_buff_len = urb->transfer_buffer_length;
|
|
|
|
first_trb = true;
|
|
+ last_trb_num = zero_length_needed ? 2 : 1;
|
|
/* Queue the first TRB, even if it's zero-length */
|
|
do {
|
|
u32 field = 0;
|
|
@@ -3238,12 +3255,15 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
/* Chain all the TRBs together; clear the chain bit in the last
|
|
* TRB to indicate it's the last TRB in the chain.
|
|
*/
|
|
- if (num_trbs > 1) {
|
|
+ if (num_trbs > last_trb_num) {
|
|
field |= TRB_CHAIN;
|
|
- } else {
|
|
- /* FIXME - add check for ZERO_PACKET flag before this */
|
|
+ } else if (num_trbs == last_trb_num) {
|
|
td->last_trb = ep_ring->enqueue;
|
|
field |= TRB_IOC;
|
|
+ } else if (zero_length_needed && num_trbs == 1) {
|
|
+ trb_buff_len = 0;
|
|
+ urb_priv->td[1]->last_trb = ep_ring->enqueue;
|
|
+ field |= TRB_IOC;
|
|
}
|
|
|
|
/* Only set interrupt on short packet for IN endpoints */
|
|
@@ -3305,7 +3325,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
if (running_total + trb_buff_len > urb->transfer_buffer_length)
|
|
trb_buff_len =
|
|
urb->transfer_buffer_length - running_total;
|
|
- } while (running_total < urb->transfer_buffer_length);
|
|
+ } while (num_trbs > 0);
|
|
|
|
check_trb_math(urb, num_trbs, running_total);
|
|
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
|
|
@@ -3323,7 +3343,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
int num_trbs;
|
|
struct xhci_generic_trb *start_trb;
|
|
bool first_trb;
|
|
+ int last_trb_num;
|
|
bool more_trbs_coming;
|
|
+ bool zero_length_needed;
|
|
int start_cycle;
|
|
u32 field, length_field;
|
|
|
|
@@ -3354,7 +3376,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
num_trbs++;
|
|
running_total += TRB_MAX_BUFF_SIZE;
|
|
}
|
|
- /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
|
|
|
|
ret = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
ep_index, urb->stream_id,
|
|
@@ -3363,6 +3384,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
return ret;
|
|
|
|
urb_priv = urb->hcpriv;
|
|
+
|
|
+ /* Deal with URB_ZERO_PACKET - need one more td/trb */
|
|
+ zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
|
|
+ urb_priv->length == 2;
|
|
+ if (zero_length_needed) {
|
|
+ num_trbs++;
|
|
+ xhci_dbg(xhci, "Creating zero length td.\n");
|
|
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
+ ep_index, urb->stream_id,
|
|
+ 1, urb, 1, mem_flags);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
td = urb_priv->td[0];
|
|
|
|
/*
|
|
@@ -3384,7 +3419,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
trb_buff_len = urb->transfer_buffer_length;
|
|
|
|
first_trb = true;
|
|
-
|
|
+ last_trb_num = zero_length_needed ? 2 : 1;
|
|
/* Queue the first TRB, even if it's zero-length */
|
|
do {
|
|
u32 remainder = 0;
|
|
@@ -3401,12 +3436,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
/* Chain all the TRBs together; clear the chain bit in the last
|
|
* TRB to indicate it's the last TRB in the chain.
|
|
*/
|
|
- if (num_trbs > 1) {
|
|
+ if (num_trbs > last_trb_num) {
|
|
field |= TRB_CHAIN;
|
|
- } else {
|
|
- /* FIXME - add check for ZERO_PACKET flag before this */
|
|
+ } else if (num_trbs == last_trb_num) {
|
|
td->last_trb = ep_ring->enqueue;
|
|
field |= TRB_IOC;
|
|
+ } else if (zero_length_needed && num_trbs == 1) {
|
|
+ trb_buff_len = 0;
|
|
+ urb_priv->td[1]->last_trb = ep_ring->enqueue;
|
|
+ field |= TRB_IOC;
|
|
}
|
|
|
|
/* Only set interrupt on short packet for IN endpoints */
|
|
@@ -3444,7 +3482,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
trb_buff_len = urb->transfer_buffer_length - running_total;
|
|
if (trb_buff_len > TRB_MAX_BUFF_SIZE)
|
|
trb_buff_len = TRB_MAX_BUFF_SIZE;
|
|
- } while (running_total < urb->transfer_buffer_length);
|
|
+ } while (num_trbs > 0);
|
|
|
|
check_trb_math(urb, num_trbs, running_total);
|
|
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
|
|
@@ -3511,8 +3549,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
if (start_cycle == 0)
|
|
field |= 0x1;
|
|
|
|
- /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
|
|
- if (xhci->hci_version == 0x100) {
|
|
+ /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
|
|
+ if (xhci->hci_version >= 0x100) {
|
|
if (urb->transfer_buffer_length > 0) {
|
|
if (setup->bRequestType & USB_DIR_IN)
|
|
field |= TRB_TX_TYPE(TRB_DATA_IN);
|
|
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
|
|
index 1f901fc25590..a3431e90345f 100644
|
|
--- a/drivers/usb/host/xhci.c
|
|
+++ b/drivers/usb/host/xhci.c
|
|
@@ -139,7 +139,8 @@ static int xhci_start(struct xhci_hcd *xhci)
|
|
"waited %u microseconds.\n",
|
|
XHCI_MAX_HALT_USEC);
|
|
if (!ret)
|
|
- xhci->xhc_state &= ~XHCI_STATE_HALTED;
|
|
+ xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -1299,6 +1300,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
|
|
|
|
if (usb_endpoint_xfer_isoc(&urb->ep->desc))
|
|
size = urb->number_of_packets;
|
|
+ else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
|
|
+ urb->transfer_buffer_length > 0 &&
|
|
+ urb->transfer_flags & URB_ZERO_PACKET &&
|
|
+ !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
|
|
+ size = 2;
|
|
else
|
|
size = 1;
|
|
|
|
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
|
|
index 096438e4fb0c..c918075e5eae 100644
|
|
--- a/drivers/usb/serial/option.c
|
|
+++ b/drivers/usb/serial/option.c
|
|
@@ -276,6 +276,10 @@ static void option_instat_callback(struct urb *urb);
|
|
#define ZTE_PRODUCT_MF622 0x0001
|
|
#define ZTE_PRODUCT_MF628 0x0015
|
|
#define ZTE_PRODUCT_MF626 0x0031
|
|
+#define ZTE_PRODUCT_ZM8620_X 0x0396
|
|
+#define ZTE_PRODUCT_ME3620_MBIM 0x0426
|
|
+#define ZTE_PRODUCT_ME3620_X 0x1432
|
|
+#define ZTE_PRODUCT_ME3620_L 0x1433
|
|
#define ZTE_PRODUCT_AC2726 0xfff1
|
|
#define ZTE_PRODUCT_CDMA_TECH 0xfffe
|
|
#define ZTE_PRODUCT_AC8710T 0xffff
|
|
@@ -549,6 +553,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = {
|
|
.sendsetup = BIT(1) | BIT(2) | BIT(3),
|
|
};
|
|
|
|
+static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
|
|
+ .reserved = BIT(2) | BIT(3) | BIT(4),
|
|
+};
|
|
+
|
|
+static const struct option_blacklist_info zte_me3620_xl_blacklist = {
|
|
+ .reserved = BIT(3) | BIT(4) | BIT(5),
|
|
+};
|
|
+
|
|
+static const struct option_blacklist_info zte_zm8620_x_blacklist = {
|
|
+ .reserved = BIT(3) | BIT(4) | BIT(5),
|
|
+};
|
|
+
|
|
static const struct option_blacklist_info huawei_cdc12_blacklist = {
|
|
.reserved = BIT(1) | BIT(2),
|
|
};
|
|
@@ -1579,6 +1595,14 @@ static const struct usb_device_id option_ids[] = {
|
|
.driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
|
|
.driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
|
|
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
|
|
+ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
|
|
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
|
|
+ .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
|
|
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
|
|
+ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
|
|
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
|
|
+ .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
|
|
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
|
|
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
|
|
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
|
|
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
|
|
index 5e3dd9f87ff5..ae79c2245a73 100644
|
|
--- a/drivers/usb/serial/whiteheat.c
|
|
+++ b/drivers/usb/serial/whiteheat.c
|
|
@@ -81,6 +81,8 @@ static int whiteheat_firmware_download(struct usb_serial *serial,
|
|
static int whiteheat_firmware_attach(struct usb_serial *serial);
|
|
|
|
/* function prototypes for the Connect Tech WhiteHEAT serial converter */
|
|
+static int whiteheat_probe(struct usb_serial *serial,
|
|
+ const struct usb_device_id *id);
|
|
static int whiteheat_attach(struct usb_serial *serial);
|
|
static void whiteheat_release(struct usb_serial *serial);
|
|
static int whiteheat_port_probe(struct usb_serial_port *port);
|
|
@@ -117,6 +119,7 @@ static struct usb_serial_driver whiteheat_device = {
|
|
.description = "Connect Tech - WhiteHEAT",
|
|
.id_table = id_table_std,
|
|
.num_ports = 4,
|
|
+ .probe = whiteheat_probe,
|
|
.attach = whiteheat_attach,
|
|
.release = whiteheat_release,
|
|
.port_probe = whiteheat_port_probe,
|
|
@@ -218,6 +221,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial)
|
|
/*****************************************************************************
|
|
* Connect Tech's White Heat serial driver functions
|
|
*****************************************************************************/
|
|
+
|
|
+static int whiteheat_probe(struct usb_serial *serial,
|
|
+ const struct usb_device_id *id)
|
|
+{
|
|
+ struct usb_host_interface *iface_desc;
|
|
+ struct usb_endpoint_descriptor *endpoint;
|
|
+ size_t num_bulk_in = 0;
|
|
+ size_t num_bulk_out = 0;
|
|
+ size_t min_num_bulk;
|
|
+ unsigned int i;
|
|
+
|
|
+ iface_desc = serial->interface->cur_altsetting;
|
|
+
|
|
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
|
|
+ endpoint = &iface_desc->endpoint[i].desc;
|
|
+ if (usb_endpoint_is_bulk_in(endpoint))
|
|
+ ++num_bulk_in;
|
|
+ if (usb_endpoint_is_bulk_out(endpoint))
|
|
+ ++num_bulk_out;
|
|
+ }
|
|
+
|
|
+ min_num_bulk = COMMAND_PORT + 1;
|
|
+ if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
|
|
+ return -ENODEV;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int whiteheat_attach(struct usb_serial *serial)
|
|
{
|
|
struct usb_serial_port *command_port;
|
|
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
|
|
index d20db6437723..f22beda91ffc 100644
|
|
--- a/fs/btrfs/inode.c
|
|
+++ b/fs/btrfs/inode.c
|
|
@@ -4650,7 +4650,8 @@ void btrfs_evict_inode(struct inode *inode)
|
|
goto no_delete;
|
|
}
|
|
/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
|
|
- btrfs_wait_ordered_range(inode, 0, (u64)-1);
|
|
+ if (!special_file(inode->i_mode))
|
|
+ btrfs_wait_ordered_range(inode, 0, (u64)-1);
|
|
|
|
if (root->fs_info->log_root_recovering) {
|
|
BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
|
|
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
|
|
index e12f258a5ffa..66202da4c961 100644
|
|
--- a/fs/cifs/smb2ops.c
|
|
+++ b/fs/cifs/smb2ops.c
|
|
@@ -48,9 +48,13 @@ change_conf(struct TCP_Server_Info *server)
|
|
break;
|
|
default:
|
|
server->echoes = true;
|
|
- server->oplocks = true;
|
|
+ if (enable_oplocks) {
|
|
+ server->oplocks = true;
|
|
+ server->oplock_credits = 1;
|
|
+ } else
|
|
+ server->oplocks = false;
|
|
+
|
|
server->echo_credits = 1;
|
|
- server->oplock_credits = 1;
|
|
}
|
|
server->credits -= server->echo_credits + server->oplock_credits;
|
|
return 0;
|
|
diff --git a/fs/dcache.c b/fs/dcache.c
|
|
index f1e801785976..17222fa5bdc6 100644
|
|
--- a/fs/dcache.c
|
|
+++ b/fs/dcache.c
|
|
@@ -2534,6 +2534,8 @@ static int prepend_path(const struct path *path,
|
|
struct dentry *dentry = path->dentry;
|
|
struct vfsmount *vfsmnt = path->mnt;
|
|
struct mount *mnt = real_mount(vfsmnt);
|
|
+ char *orig_buffer = *buffer;
|
|
+ int orig_len = *buflen;
|
|
bool slash = false;
|
|
int error = 0;
|
|
|
|
@@ -2541,6 +2543,14 @@ static int prepend_path(const struct path *path,
|
|
struct dentry * parent;
|
|
|
|
if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
|
|
+ /* Escaped? */
|
|
+ if (dentry != vfsmnt->mnt_root) {
|
|
+ *buffer = orig_buffer;
|
|
+ *buflen = orig_len;
|
|
+ slash = false;
|
|
+ error = 3;
|
|
+ goto global_root;
|
|
+ }
|
|
/* Global root? */
|
|
if (!mnt_has_parent(mnt))
|
|
goto global_root;
|
|
diff --git a/fs/namei.c b/fs/namei.c
|
|
index 036c21246d6a..157c3dbacf6c 100644
|
|
--- a/fs/namei.c
|
|
+++ b/fs/namei.c
|
|
@@ -473,6 +473,24 @@ void path_put(const struct path *path)
|
|
}
|
|
EXPORT_SYMBOL(path_put);
|
|
|
|
+/**
|
|
+ * path_connected - Verify that a path->dentry is below path->mnt.mnt_root
|
|
+ * @path: nameidate to verify
|
|
+ *
|
|
+ * Rename can sometimes move a file or directory outside of a bind
|
|
+ * mount, path_connected allows those cases to be detected.
|
|
+ */
|
|
+static bool path_connected(const struct path *path)
|
|
+{
|
|
+ struct vfsmount *mnt = path->mnt;
|
|
+
|
|
+ /* Only bind mounts can have disconnected paths */
|
|
+ if (mnt->mnt_root == mnt->mnt_sb->s_root)
|
|
+ return true;
|
|
+
|
|
+ return is_subdir(path->dentry, mnt->mnt_root);
|
|
+}
|
|
+
|
|
/*
|
|
* Path walking has 2 modes, rcu-walk and ref-walk (see
|
|
* Documentation/filesystems/path-lookup.txt). In situations when we can't
|
|
@@ -1148,6 +1166,8 @@ static int follow_dotdot_rcu(struct nameidata *nd)
|
|
goto failed;
|
|
nd->path.dentry = parent;
|
|
nd->seq = seq;
|
|
+ if (unlikely(!path_connected(&nd->path)))
|
|
+ goto failed;
|
|
break;
|
|
}
|
|
if (!follow_up_rcu(&nd->path))
|
|
@@ -1231,7 +1251,7 @@ static void follow_mount(struct path *path)
|
|
}
|
|
}
|
|
|
|
-static void follow_dotdot(struct nameidata *nd)
|
|
+static int follow_dotdot(struct nameidata *nd)
|
|
{
|
|
set_root(nd);
|
|
|
|
@@ -1246,6 +1266,10 @@ static void follow_dotdot(struct nameidata *nd)
|
|
/* rare case of legitimate dget_parent()... */
|
|
nd->path.dentry = dget_parent(nd->path.dentry);
|
|
dput(old);
|
|
+ if (unlikely(!path_connected(&nd->path))) {
|
|
+ path_put(&nd->path);
|
|
+ return -ENOENT;
|
|
+ }
|
|
break;
|
|
}
|
|
if (!follow_up(&nd->path))
|
|
@@ -1253,6 +1277,7 @@ static void follow_dotdot(struct nameidata *nd)
|
|
}
|
|
follow_mount(&nd->path);
|
|
nd->inode = nd->path.dentry->d_inode;
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
@@ -1476,7 +1501,7 @@ static inline int handle_dots(struct nameidata *nd, int type)
|
|
if (follow_dotdot_rcu(nd))
|
|
return -ECHILD;
|
|
} else
|
|
- follow_dotdot(nd);
|
|
+ return follow_dotdot(nd);
|
|
}
|
|
return 0;
|
|
}
|
|
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
|
|
index aa023283cc8a..789814f27438 100644
|
|
--- a/fs/udf/inode.c
|
|
+++ b/fs/udf/inode.c
|
|
@@ -1495,6 +1495,16 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
|
|
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
|
|
}
|
|
|
|
+ /*
|
|
+ * Sanity check length of allocation descriptors and extended attrs to
|
|
+ * avoid integer overflows
|
|
+ */
|
|
+ if (iinfo->i_lenEAttr > inode->i_sb->s_blocksize || iinfo->i_lenAlloc > inode->i_sb->s_blocksize)
|
|
+ return;
|
|
+ /* Now do exact checks */
|
|
+ if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > inode->i_sb->s_blocksize)
|
|
+ return;
|
|
+
|
|
switch (fe->icbTag.fileType) {
|
|
case ICBTAG_FILE_TYPE_DIRECTORY:
|
|
inode->i_op = &udf_dir_inode_operations;
|
|
diff --git a/include/linux/security.h b/include/linux/security.h
|
|
index 4686491852a7..4e50307c4c6d 100644
|
|
--- a/include/linux/security.h
|
|
+++ b/include/linux/security.h
|
|
@@ -2394,7 +2394,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
|
|
unsigned long arg4,
|
|
unsigned long arg5)
|
|
{
|
|
- return cap_task_prctl(option, arg2, arg3, arg3, arg5);
|
|
+ return cap_task_prctl(option, arg2, arg3, arg4, arg5);
|
|
}
|
|
|
|
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
|
|
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
|
|
index 9ce083960a25..f18490985fc8 100644
|
|
--- a/include/xen/interface/sched.h
|
|
+++ b/include/xen/interface/sched.h
|
|
@@ -107,5 +107,13 @@ struct sched_watchdog {
|
|
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
|
|
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
|
|
#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
|
|
+/*
|
|
+ * Domain asked to perform 'soft reset' for it. The expected behavior is to
|
|
+ * reset internal Xen state for the domain returning it to the point where it
|
|
+ * was created but leaving the domain's memory contents and vCPU contexts
|
|
+ * intact. This will allow the domain to start over and set up all Xen specific
|
|
+ * interfaces again.
|
|
+ */
|
|
+#define SHUTDOWN_soft_reset 5
|
|
|
|
#endif /* __XEN_PUBLIC_SCHED_H__ */
|
|
diff --git a/ipc/msg.c b/ipc/msg.c
|
|
index 52770bfde2a5..32aaaab15c5c 100644
|
|
--- a/ipc/msg.c
|
|
+++ b/ipc/msg.c
|
|
@@ -202,13 +202,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
|
|
return retval;
|
|
}
|
|
|
|
- /* ipc_addid() locks msq upon success. */
|
|
- id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
|
|
- if (id < 0) {
|
|
- ipc_rcu_putref(msq, msg_rcu_free);
|
|
- return id;
|
|
- }
|
|
-
|
|
msq->q_stime = msq->q_rtime = 0;
|
|
msq->q_ctime = get_seconds();
|
|
msq->q_cbytes = msq->q_qnum = 0;
|
|
@@ -218,6 +211,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
|
|
INIT_LIST_HEAD(&msq->q_receivers);
|
|
INIT_LIST_HEAD(&msq->q_senders);
|
|
|
|
+ /* ipc_addid() locks msq upon success. */
|
|
+ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
|
|
+ if (id < 0) {
|
|
+ ipc_rcu_putref(msq, msg_rcu_free);
|
|
+ return id;
|
|
+ }
|
|
+
|
|
ipc_unlock_object(&msq->q_perm);
|
|
rcu_read_unlock();
|
|
|
|
diff --git a/ipc/shm.c b/ipc/shm.c
|
|
index 6dc55af8a29b..08b14f69d6cf 100644
|
|
--- a/ipc/shm.c
|
|
+++ b/ipc/shm.c
|
|
@@ -544,12 +544,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
|
|
if (IS_ERR(file))
|
|
goto no_file;
|
|
|
|
- id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
|
|
- if (id < 0) {
|
|
- error = id;
|
|
- goto no_id;
|
|
- }
|
|
-
|
|
shp->shm_cprid = task_tgid_vnr(current);
|
|
shp->shm_lprid = 0;
|
|
shp->shm_atim = shp->shm_dtim = 0;
|
|
@@ -559,6 +553,12 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
|
|
shp->shm_file = file;
|
|
shp->shm_creator = current;
|
|
|
|
+ id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
|
|
+ if (id < 0) {
|
|
+ error = id;
|
|
+ goto no_id;
|
|
+ }
|
|
+
|
|
/*
|
|
* shmid gets reported as "inode#" in /proc/pid/maps.
|
|
* proc-ps tools use this. Changing this will break them.
|
|
diff --git a/ipc/util.c b/ipc/util.c
|
|
index 7684f41bce76..735342570a87 100644
|
|
--- a/ipc/util.c
|
|
+++ b/ipc/util.c
|
|
@@ -292,6 +292,10 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
|
|
rcu_read_lock();
|
|
spin_lock(&new->lock);
|
|
|
|
+ current_euid_egid(&euid, &egid);
|
|
+ new->cuid = new->uid = euid;
|
|
+ new->gid = new->cgid = egid;
|
|
+
|
|
id = idr_alloc(&ids->ipcs_idr, new,
|
|
(next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
|
|
GFP_NOWAIT);
|
|
@@ -304,10 +308,6 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
|
|
|
|
ids->in_use++;
|
|
|
|
- current_euid_egid(&euid, &egid);
|
|
- new->cuid = new->uid = euid;
|
|
- new->gid = new->cgid = egid;
|
|
-
|
|
if (next_id < 0) {
|
|
new->seq = ids->seq++;
|
|
if (ids->seq > ids->seq_max)
|
|
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
|
|
index 19ed5c425c3b..349e5bbdb31f 100644
|
|
--- a/kernel/irq/proc.c
|
|
+++ b/kernel/irq/proc.c
|
|
@@ -12,6 +12,7 @@
|
|
#include <linux/seq_file.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/kernel_stat.h>
|
|
+#include <linux/mutex.h>
|
|
|
|
#include "internals.h"
|
|
|
|
@@ -309,18 +310,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
|
|
|
|
void register_irq_proc(unsigned int irq, struct irq_desc *desc)
|
|
{
|
|
+ static DEFINE_MUTEX(register_lock);
|
|
char name [MAX_NAMELEN];
|
|
|
|
- if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
|
|
+ if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
|
|
return;
|
|
|
|
+ /*
|
|
+ * irq directories are registered only when a handler is
|
|
+ * added, not when the descriptor is created, so multiple
|
|
+ * tasks might try to register at the same time.
|
|
+ */
|
|
+ mutex_lock(®ister_lock);
|
|
+
|
|
+ if (desc->dir)
|
|
+ goto out_unlock;
|
|
+
|
|
memset(name, 0, MAX_NAMELEN);
|
|
sprintf(name, "%d", irq);
|
|
|
|
/* create /proc/irq/1234 */
|
|
desc->dir = proc_mkdir(name, root_irq_dir);
|
|
if (!desc->dir)
|
|
- return;
|
|
+ goto out_unlock;
|
|
|
|
#ifdef CONFIG_SMP
|
|
/* create /proc/irq/<irq>/smp_affinity */
|
|
@@ -341,6 +353,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
|
|
|
|
proc_create_data("spurious", 0444, desc->dir,
|
|
&irq_spurious_proc_fops, (void *)(long)irq);
|
|
+
|
|
+out_unlock:
|
|
+ mutex_unlock(®ister_lock);
|
|
}
|
|
|
|
void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
|
|
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
|
|
index d9bc87ca062b..e9fd382bf25a 100644
|
|
--- a/mm/hugetlb.c
|
|
+++ b/mm/hugetlb.c
|
|
@@ -2573,6 +2573,14 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
continue;
|
|
|
|
/*
|
|
+ * Shared VMAs have their own reserves and do not affect
|
|
+ * MAP_PRIVATE accounting but it is possible that a shared
|
|
+ * VMA is using the same page so check and skip such VMAs.
|
|
+ */
|
|
+ if (iter_vma->vm_flags & VM_MAYSHARE)
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
* Unmap the page from other VMAs without their own reserves.
|
|
* They get marked to be SIGKILLed if they fault in these
|
|
* areas. This is because a future no-page fault on this VMA
|
|
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
|
|
index 627e517077e4..84340a2605ed 100644
|
|
--- a/net/core/fib_rules.c
|
|
+++ b/net/core/fib_rules.c
|
|
@@ -606,7 +606,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
|
|
err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
|
|
cb->nlh->nlmsg_seq, RTM_NEWRULE,
|
|
NLM_F_MULTI, ops);
|
|
- if (err)
|
|
+ if (err < 0)
|
|
break;
|
|
skip:
|
|
idx++;
|
|
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
|
|
index e476cc7dc801..19f9aa4e698a 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_sync.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_sync.c
|
|
@@ -599,7 +599,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
|
|
pkts = atomic_add_return(1, &cp->in_pkts);
|
|
else
|
|
pkts = sysctl_sync_threshold(ipvs);
|
|
- ip_vs_sync_conn(net, cp->control, pkts);
|
|
+ ip_vs_sync_conn(net, cp, pkts);
|
|
}
|
|
}
|
|
|
|
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
|
|
index 1692e7534759..c3d204973dbc 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_xmit.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
|
|
@@ -129,7 +129,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
|
|
|
|
memset(&fl4, 0, sizeof(fl4));
|
|
fl4.daddr = daddr;
|
|
- fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
|
|
fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
|
|
FLOWI_FLAG_KNOWN_NH : 0;
|
|
|
|
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
|
|
index c63b618cd619..95578da760d5 100644
|
|
--- a/net/netfilter/nf_conntrack_expect.c
|
|
+++ b/net/netfilter/nf_conntrack_expect.c
|
|
@@ -202,7 +202,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
|
|
a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
|
|
}
|
|
|
|
- return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
|
|
+ return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
|
|
+ nf_ct_zone(a->master) == nf_ct_zone(b->master);
|
|
}
|
|
|
|
static inline int expect_matches(const struct nf_conntrack_expect *a,
|
|
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
|
|
index 885683a3b0bd..e0406211716b 100644
|
|
--- a/sound/arm/Kconfig
|
|
+++ b/sound/arm/Kconfig
|
|
@@ -9,6 +9,14 @@ menuconfig SND_ARM
|
|
Drivers that are implemented on ASoC can be found in
|
|
"ALSA for SoC audio support" section.
|
|
|
|
+config SND_PXA2XX_LIB
|
|
+ tristate
|
|
+ select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
|
|
+ select SND_DMAENGINE_PCM
|
|
+
|
|
+config SND_PXA2XX_LIB_AC97
|
|
+ bool
|
|
+
|
|
if SND_ARM
|
|
|
|
config SND_ARMAACI
|
|
@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
|
|
tristate
|
|
select SND_PCM
|
|
|
|
-config SND_PXA2XX_LIB
|
|
- tristate
|
|
- select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
|
|
-
|
|
-config SND_PXA2XX_LIB_AC97
|
|
- bool
|
|
-
|
|
config SND_PXA2XX_AC97
|
|
tristate "AC97 driver for the Intel PXA2xx chip"
|
|
depends on ARCH_PXA
|
|
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
|
|
index 489a9abf112b..6a530afbb7e9 100644
|
|
--- a/sound/soc/dwc/designware_i2s.c
|
|
+++ b/sound/soc/dwc/designware_i2s.c
|
|
@@ -100,10 +100,10 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream)
|
|
|
|
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
|
for (i = 0; i < 4; i++)
|
|
- i2s_write_reg(dev->i2s_base, TOR(i), 0);
|
|
+ i2s_read_reg(dev->i2s_base, TOR(i));
|
|
} else {
|
|
for (i = 0; i < 4; i++)
|
|
- i2s_write_reg(dev->i2s_base, ROR(i), 0);
|
|
+ i2s_read_reg(dev->i2s_base, ROR(i));
|
|
}
|
|
}
|
|
|
|
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
|
|
index 4d2e46fae77c..20a57c0060b2 100644
|
|
--- a/sound/soc/pxa/Kconfig
|
|
+++ b/sound/soc/pxa/Kconfig
|
|
@@ -1,7 +1,6 @@
|
|
config SND_PXA2XX_SOC
|
|
tristate "SoC Audio for the Intel PXA2xx chip"
|
|
depends on ARCH_PXA
|
|
- select SND_ARM
|
|
select SND_PXA2XX_LIB
|
|
help
|
|
Say Y or M if you want to add support for codecs attached to
|
|
@@ -24,7 +23,6 @@ config SND_PXA2XX_AC97
|
|
config SND_PXA2XX_SOC_AC97
|
|
tristate
|
|
select AC97_BUS
|
|
- select SND_ARM
|
|
select SND_PXA2XX_LIB_AC97
|
|
select SND_SOC_AC97_BUS
|
|
|
|
diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
|
|
index daf61abc3670..646b66703bd8 100644
|
|
--- a/sound/synth/emux/emux_oss.c
|
|
+++ b/sound/synth/emux/emux_oss.c
|
|
@@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu)
|
|
struct snd_seq_oss_reg *arg;
|
|
struct snd_seq_device *dev;
|
|
|
|
- if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
|
|
+ /* using device#1 here for avoiding conflicts with OPL3 */
|
|
+ if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
|
|
sizeof(struct snd_seq_oss_reg), &dev) < 0)
|
|
return;
|
|
|
|
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
|
|
index 326068a593a5..bb34199d5451 100644
|
|
--- a/tools/perf/util/header.c
|
|
+++ b/tools/perf/util/header.c
|
|
@@ -1729,7 +1729,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
|
|
if (ph->needs_swap)
|
|
nr = bswap_32(nr);
|
|
|
|
- ph->env.nr_cpus_online = nr;
|
|
+ ph->env.nr_cpus_avail = nr;
|
|
|
|
ret = readn(fd, &nr, sizeof(nr));
|
|
if (ret != sizeof(nr))
|
|
@@ -1738,7 +1738,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
|
|
if (ph->needs_swap)
|
|
nr = bswap_32(nr);
|
|
|
|
- ph->env.nr_cpus_avail = nr;
|
|
+ ph->env.nr_cpus_online = nr;
|
|
return 0;
|
|
}
|
|
|