build/patch/kernel/pine64-default/patch-3.10.106-107.patch

8594 lines
269 KiB
Diff
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index daf83824fda5..ed0c7e3ba8da 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2889,6 +2889,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
spia_pedr=
spia_peddr=
+ stack_guard_gap= [MM]
+ override the default stack gap protection. The value
+ is in page units and it defines how many pages prior
+ to (for stacks growing down) resp. after (for stacks
+ growing up) the main stack are reserved for no other
+ mapping. Default value is 256 pages.
+
stacktrace [FTRACE]
Enabled the stack tracer on boot up.
diff --git a/Makefile b/Makefile
index 2f87f67fb9f7..752b1c67daa0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 10
-SUBLEVEL = 106
+SUBLEVEL = 107
EXTRAVERSION = -bsp-1.2
NAME = TOSSUG Baby Fish
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 116d3e09b5b5..e6b365d9e0ad 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -228,8 +228,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
if (state.fault)
goto fault;
+ /* clear any remanants of delay slot */
if (delay_mode(regs)) {
- regs->ret = regs->bta;
+ regs->ret = regs->bta & ~1U;
regs->status32 &= ~STATUS_DE_MASK;
} else {
regs->ret += state.instr_len;
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 2e06d56e987b..cf4ae6958240 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index c914357c0d89..d3c206e78870 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -59,6 +59,7 @@
#size-cells = <1>;
compatible = "m25p64";
spi-max-frequency = <30000000>;
+ m25p,fast-read;
reg = <0>;
partition@0 {
label = "U-Boot-SPL";
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index dba62cb1ad08..f389107947e0 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -58,6 +58,9 @@
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+/* Qualcomm implemented cores */
+#define ARM_CPU_PART_SCORPION 0x510002d0
+
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 1fd749ee4a1b..b0b69e9ce660 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1066,6 +1066,22 @@ static int __init arch_hw_breakpoint_init(void)
return 0;
}
+ /*
+ * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
+ * whenever a WFI is issued, even if the core is not powered down, in
+ * violation of the architecture. When DBGPRSR.SPD is set, accesses to
+ * breakpoint and watchpoint registers are treated as undefined, so
+ * this results in boot time and runtime failures when these are
+ * accessed and we unexpectedly take a trap.
+ *
+ * It's not clear if/how this can be worked around, so we blacklist
+ * Scorpion CPUs to avoid these issues.
+ */
+ if ((read_cpuid_id() & 0xff00fff0) == ARM_CPU_PART_SCORPION) {
+ pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
+ return 0;
+ }
+
has_ossr = core_has_os_save_restore();
/* Determine how many BRPs/WRPs are available. */
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 5ef506c6f492..984509eb44b9 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 81edd31bb4ac..810ae2db40ef 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -258,8 +258,7 @@ static int __init xen_guest_init(void)
* for secondary CPUs as they are brought up.
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
*/
- xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
- sizeof(struct vcpu_info));
+ xen_vcpu_info = alloc_percpu(struct vcpu_info);
if (xen_vcpu_info == NULL)
return -ENOMEM;
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
index 3c494e84444d..a511ac16a8e3 100644
--- a/arch/c6x/kernel/ptrace.c
+++ b/arch/c6x/kernel/ptrace.c
@@ -69,46 +69,6 @@ static int gpr_get(struct task_struct *target,
0, sizeof(*regs));
}
-static int gpr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
- struct pt_regs *regs = task_pt_regs(target);
-
- /* Don't copyin TSR or CSR */
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &regs,
- 0, PT_TSR * sizeof(long));
- if (ret)
- return ret;
-
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- PT_TSR * sizeof(long),
- (PT_TSR + 1) * sizeof(long));
- if (ret)
- return ret;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &regs,
- (PT_TSR + 1) * sizeof(long),
- PT_CSR * sizeof(long));
- if (ret)
- return ret;
-
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- PT_CSR * sizeof(long),
- (PT_CSR + 1) * sizeof(long));
- if (ret)
- return ret;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &regs,
- (PT_CSR + 1) * sizeof(long), -1);
- return ret;
-}
-
enum c6x_regset {
REGSET_GPR,
};
@@ -120,7 +80,6 @@ static const struct user_regset c6x_regsets[] = {
.size = sizeof(u32),
.align = sizeof(u32),
.get = gpr_get,
- .set = gpr_set
},
};
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index 836f14707a62..efa59f1f8022 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
addr = PAGE_ALIGN(addr);
vma = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
goto success;
}
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 7841f2290385..9d523375f68a 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -192,20 +192,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767)
-extern unsigned long __must_check __copy_user_zeroing(void *to,
- const void __user *from,
- unsigned long n);
+extern unsigned long raw_copy_from_user(void *to, const void __user *from,
+ unsigned long n);
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
- return __copy_user_zeroing(to, from, n);
- memset(to, 0, n);
- return n;
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
+#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
#define __copy_from_user_inatomic __copy_from_user
extern unsigned long __must_check __copy_user(void __user *to,
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
index 7563628822bd..5e2dc7defd2c 100644
--- a/arch/metag/kernel/ptrace.c
+++ b/arch/metag/kernel/ptrace.c
@@ -24,6 +24,16 @@
* user_regset definitions.
*/
+static unsigned long user_txstatus(const struct pt_regs *regs)
+{
+ unsigned long data = (unsigned long)regs->ctx.Flags;
+
+ if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
+ data |= USER_GP_REGS_STATUS_CATCH_BIT;
+
+ return data;
+}
+
int metag_gp_regs_copyout(const struct pt_regs *regs,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
@@ -62,9 +72,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs,
if (ret)
goto out;
/* TXSTATUS */
- data = (unsigned long)regs->ctx.Flags;
- if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
- data |= USER_GP_REGS_STATUS_CATCH_BIT;
+ data = user_txstatus(regs);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&data, 4*25, 4*26);
if (ret)
@@ -119,6 +127,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs,
if (ret)
goto out;
/* TXSTATUS */
+ data = user_txstatus(regs);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&data, 4*25, 4*26);
if (ret)
@@ -244,6 +253,8 @@ int metag_rp_state_copyin(struct pt_regs *regs,
unsigned long long *ptr;
int ret, i;
+ if (count < 4*13)
+ return -EINVAL;
/* Read the entire pipeline before making any changes */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&rp, 0, 4*13);
@@ -303,7 +314,7 @@ static int metag_tls_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
int ret;
- void __user *tls;
+ void __user *tls = target->thread.tls_ptr;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index dfe77b26beaa..2792fc621088 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -29,7 +29,6 @@
COPY \
"1:\n" \
" .section .fixup,\"ax\"\n" \
- " MOV D1Ar1,#0\n" \
FIXUP \
" MOVT D1Ar1,#HI(1b)\n" \
" JUMP D1Ar1,#LO(1b)\n" \
@@ -661,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_user_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"2: SETB [%0++],D1Ar1\n", \
- "3: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
+ "3: ADD %2,%2,#1\n", \
" .long 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"2: SETW [%0++],D1Ar1\n" COPY, \
- "3: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
+ "3: ADD %2,%2,#2\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \
@@ -680,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_from_user_2x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \
- "5: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
+ "5: ADD %2,%2,#1\n", \
" .long 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"2: SETD [%0++],D1Ar1\n" COPY, \
- "3: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
+ "3: ADD %2,%2,#4\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
-#define __asm_copy_from_user_5(to, from, ret) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "4: SETB [%0++],D1Ar1\n", \
- "5: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 4b,5b\n")
-
-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "4: SETW [%0++],D1Ar1\n" COPY, \
- "5: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_6(to, from, ret) \
- __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_7(to, from, ret) \
- __asm_copy_from_user_6x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "6: SETB [%0++],D1Ar1\n", \
- "7: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 6b,7b\n")
-
-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "4: SETD [%0++],D1Ar1\n" COPY, \
- "5: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_8(to, from, ret) \
- __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_9(to, from, ret) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "6: SETB [%0++],D1Ar1\n", \
- "7: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 6b,7b\n")
-
-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "6: SETW [%0++],D1Ar1\n" COPY, \
- "7: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_10(to, from, ret) \
- __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_11(to, from, ret) \
- __asm_copy_from_user_10x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "8: SETB [%0++],D1Ar1\n", \
- "9: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 8b,9b\n")
-
-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "6: SETD [%0++],D1Ar1\n" COPY, \
- "7: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_12(to, from, ret) \
- __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_13(to, from, ret) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "8: SETB [%0++],D1Ar1\n", \
- "9: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 8b,9b\n")
-
-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "8: SETW [%0++],D1Ar1\n" COPY, \
- "9: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_14(to, from, ret) \
- __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_15(to, from, ret) \
- __asm_copy_from_user_14x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "10: SETB [%0++],D1Ar1\n", \
- "11: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 10b,11b\n")
-
-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "8: SETD [%0++],D1Ar1\n" COPY, \
- "9: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_16(to, from, ret) \
- __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
-
#define __asm_copy_from_user_8x64(to, from, ret) \
asm volatile ( \
" GETL D0Ar2,D1Ar1,[%1++]\n" \
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
"1:\n" \
" .section .fixup,\"ax\"\n" \
- " MOV D1Ar1,#0\n" \
- " MOV D0Ar2,#0\n" \
"3: ADD %2,%2,#8\n" \
- " SETL [%0++],D0Ar2,D1Ar1\n" \
" MOVT D0Ar2,#HI(1b)\n" \
" JUMP D0Ar2,#LO(1b)\n" \
" .previous\n" \
@@ -878,11 +756,12 @@ EXPORT_SYMBOL(__copy_user);
"SUB %1, %1, D0Ar2\n")
-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
- userland. The return-value is the number of bytes that were
- inaccessible. */
-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
- unsigned long n)
+/*
+ * Copy from user to kernel. The return-value is the number of bytes that were
+ * inaccessible.
+ */
+unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
+ unsigned long n)
{
register char *dst asm ("A0.2") = pdst;
register const char __user *src asm ("A1.2") = psrc;
@@ -895,7 +774,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
@@ -903,14 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
@@ -918,7 +797,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
@@ -934,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
@@ -950,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
#endif
@@ -960,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
n -= 4;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
/* If we get here, there were no memory read faults. */
@@ -986,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
/* If we get here, retn correctly reflects the number of failing
bytes. */
return retn;
-
- copy_exception_bytes:
- /* We already have "retn" bytes cleared, and need to clear the
- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
- memset is preferred here, since this isn't speed-critical code and
- we'd rather have this a leaf-function than calling memset. */
- {
- char *endp;
- for (endp = dst + n; dst < endp; dst++)
- *dst = 0;
- }
-
- return retn + n;
}
-EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(raw_copy_from_user);
#define __asm_clear_8x64(to, ret) \
asm volatile ( \
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index ac3d2b8a20d4..d48cf440010c 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -155,7 +155,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr,
" daddu %0, %4 \n"
" dsll32 $1, %0, 0 \n"
" daddu %0, $1 \n"
+ " sltu $1, %0, $1 \n"
" dsra32 %0, %0, 0 \n"
+ " addu %0, $1 \n"
#endif
" .set pop"
: "=r" (sum)
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index 93aa302948d7..c68312947ed9 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -15,12 +15,22 @@ static int crashing_cpu = -1;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
#ifdef CONFIG_SMP
-static void crash_shutdown_secondary(void *ignore)
+static void crash_shutdown_secondary(void *passed_regs)
{
- struct pt_regs *regs;
+ struct pt_regs *regs = passed_regs;
int cpu = smp_processor_id();
- regs = task_pt_regs(current);
+ /*
+ * If we are passed registers, use those. Otherwise get the
+ * regs from the last interrupt, which should be correct, as
+ * we are in an interrupt. But if the regs are not there,
+ * pull them from the top of the stack. They are probably
+ * wrong, but we need something to keep from crashing again.
+ */
+ if (!regs)
+ regs = get_irq_regs();
+ if (!regs)
+ regs = task_pt_regs(current);
if (!cpu_online(cpu))
return;
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 7e5fe2790d8a..0bb42959948e 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 8c9b631d2a78..8c00e6c06266 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -6,7 +6,7 @@
#endif
#include <linux/compiler.h>
-#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
+#include <asm/types.h>
#include <asm/byteorder.h>
#include <linux/atomic.h>
@@ -16,6 +16,12 @@
* to include/asm-i386/bitops.h or kerneldoc
*/
+#if __BITS_PER_LONG == 64
+#define SHIFT_PER_LONG 6
+#else
+#define SHIFT_PER_LONG 5
+#endif
+
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
index 75196b415d3f..540c94de4427 100644
--- a/arch/parisc/include/uapi/asm/bitsperlong.h
+++ b/arch/parisc/include/uapi/asm/bitsperlong.h
@@ -9,10 +9,8 @@
*/
#if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__)
#define __BITS_PER_LONG 64
-#define SHIFT_PER_LONG 6
#else
#define __BITS_PER_LONG 32
-#define SHIFT_PER_LONG 5
#endif
#include <asm-generic/bitsperlong.h>
diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h
index e78403b129ef..928e1bbac98f 100644
--- a/arch/parisc/include/uapi/asm/swab.h
+++ b/arch/parisc/include/uapi/asm/swab.h
@@ -1,6 +1,7 @@
#ifndef _PARISC_SWAB_H
#define _PARISC_SWAB_H
+#include <asm/bitsperlong.h>
#include <linux/types.h>
#include <linux/compiler.h>
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
}
#define __arch_swab32 __arch_swab32
-#if BITS_PER_LONG > 32
+#if __BITS_PER_LONG > 32
/*
** From "PA-RISC 2.0 Architecture", HP Professional Books.
** See Appendix I page 8 , "Endian Byte Swapping".
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
return x;
}
#define __arch_swab64 __arch_swab64
-#endif /* BITS_PER_LONG > 32 */
+#endif /* __BITS_PER_LONG > 32 */
#endif /* _PARISC_SWAB_H */
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 56a4a5d205af..a008b872d4b7 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -273,6 +273,14 @@ checkbin:
echo 'disable kernel modules' ; \
false ; \
fi
+ @if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \
+ && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \
+ echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \
+ echo 'in some circumstances.' ; \
+ echo -n '*** Please use a different binutils version.' ; \
+ false ; \
+ fi
+
CLEAN_FILES += $(TOUT)
diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S
index b6fcbaf5027b..3dc44b05fb97 100644
--- a/arch/powerpc/boot/ps3-head.S
+++ b/arch/powerpc/boot/ps3-head.S
@@ -57,11 +57,6 @@ __system_reset_overlay:
bctr
1:
- /* Save the value at addr zero for a null pointer write check later. */
-
- li r4, 0
- lwz r3, 0(r4)
-
/* Primary delays then goes to _zimage_start in wrapper. */
or 31, 31, 31 /* db16cyc */
diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
index 9954d98871d0..029ea3ce1588 100644
--- a/arch/powerpc/boot/ps3.c
+++ b/arch/powerpc/boot/ps3.c
@@ -119,13 +119,12 @@ void ps3_copy_vectors(void)
flush_cache((void *)0x100, 512);
}
-void platform_init(unsigned long null_check)
+void platform_init(void)
{
const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */
void *chosen;
unsigned long ft_addr;
u64 rm_size;
- unsigned long val;
console_ops.write = ps3_console_write;
platform_ops.exit = ps3_exit;
@@ -153,11 +152,6 @@ void platform_init(unsigned long null_check)
printf(" flat tree at 0x%lx\n\r", ft_addr);
- val = *(unsigned long *)0;
-
- if (val != null_check)
- printf("null check failed: %lx != %lx\n\r", val, null_check);
-
((kernel_entry_t)0)(ft_addr, 0, NULL);
ps3_exit();
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 52e5758ea368..b3bab9575d31 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -25,6 +25,7 @@
#include <asm/cputable.h>
#include <asm/emulated_ops.h>
#include <asm/switch_to.h>
+#include <asm/disassemble.h>
struct aligninfo {
unsigned char len;
@@ -764,14 +765,25 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
- /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
- if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
- nb = 8;
- flags = LD+SW;
- } else if (IS_XFORM(instruction) &&
- ((instruction >> 1) & 0x3ff) == 660) {
- nb = 8;
- flags = ST+SW;
+ /*
+ * Handle some cases which give overlaps in the DSISR values.
+ */
+ if (IS_XFORM(instruction)) {
+ switch (get_xop(instruction)) {
+ case 532: /* ldbrx */
+ nb = 8;
+ flags = LD+SW;
+ break;
+ case 660: /* stdbrx */
+ nb = 8;
+ flags = ST+SW;
+ break;
+ case 20: /* lwarx */
+ case 84: /* ldarx */
+ case 116: /* lharx */
+ case 276: /* lqarx */
+ return 0; /* not emulated ever */
+ }
}
/* Byteswap little endian loads and stores */
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index f0b47d1a6b0e..7531f9abf10d 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -228,8 +228,10 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
rcu_read_lock();
bp = __get_cpu_var(bp_per_reg);
- if (!bp)
+ if (!bp) {
+ rc = NOTIFY_DONE;
goto out;
+ }
info = counter_arch_bp(bp);
/*
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 631a2650e4e4..50b482bcbea2 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -511,7 +511,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
advance = 0;
printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
"(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
- kvmppc_core_queue_program(vcpu, 0);
}
}
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 7ce9cf3b6988..887365a82c01 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
if ((mm->task_size - len) < addr)
return 0;
vma = find_vma(mm, addr);
- return (!vma || (addr + len) <= vma->vm_start);
+ return (!vma || (addr + len) <= vm_start_gap(vma));
}
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6b499870662f..52ef30cfedf0 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -43,14 +43,17 @@ extern void execve_tail(void);
#ifndef CONFIG_64BIT
#define TASK_SIZE (1UL << 31)
+#define TASK_MAX_SIZE (1UL << 31)
#define TASK_UNMAPPED_BASE (1UL << 30)
#else /* CONFIG_64BIT */
-#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
+#define TASK_SIZE_OF(tsk) ((tsk)->mm ? \
+ (tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(1UL << 30) : (1UL << 41))
#define TASK_SIZE TASK_SIZE_OF(current)
+#define TASK_MAX_SIZE (1UL << 53)
#endif /* CONFIG_64BIT */
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index a938b548f07e..14a77e6d8fc7 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -335,7 +335,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
if ((from | to | len) & (PMD_SIZE - 1))
return -EINVAL;
- if (len == 0 || from + len > PGDIR_SIZE ||
+ if (len == 0 || from + len > TASK_MAX_SIZE ||
from + len < from || to + len < to)
return -EINVAL;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index f8e69d5bc0a9..aae199b3e046 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -416,7 +416,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->dma_table = dma_alloc_cpu_table();
if (!zdev->dma_table) {
rc = -ENOMEM;
- goto out_clean;
+ goto out;
}
zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
@@ -429,7 +429,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
bitmap_order);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
- goto out_reg;
+ goto free_dma_table;
}
rc = zpci_register_ioat(zdev,
@@ -438,12 +438,16 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->start_dma + zdev->iommu_size - 1,
(u64) zdev->dma_table);
if (rc)
- goto out_reg;
- return 0;
+ goto free_bitmap;
-out_reg:
+ return 0;
+free_bitmap:
+ vfree(zdev->iommu_bitmap);
+ zdev->iommu_bitmap = NULL;
+free_dma_table:
dma_free_cpu_table(zdev->dma_table);
-out_clean:
+ zdev->dma_table = NULL;
+out:
return rc;
}
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 6777177807c2..7df7d5944188 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 7ff45e4ba681..875ddf00dab4 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -308,7 +308,7 @@ static int genregs64_set(struct task_struct *target,
}
if (!ret) {
- unsigned long y;
+ unsigned long y = regs->y;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&y,
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 666510b39870..66e1047c835f 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -119,7 +119,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -182,7 +182,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index d2b59441ebdd..ce4937025e97 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -118,7 +118,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, HPAGE_SIZE);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 0f83ed4602b2..d0dac73a2d80 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -110,7 +110,7 @@ static int tile_gpr_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct pt_regs regs;
+ struct pt_regs regs = *task_pt_regs(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
sizeof(regs));
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 650ccff8378c..c75eac7a2316 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -297,7 +297,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (current->mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 4bcf841e4701..3deb8e533359 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -218,6 +218,29 @@ static int ghash_async_final(struct ahash_request *req)
}
}
+static int ghash_async_import(struct ahash_request *req, const void *in)
+{
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ ghash_async_init(req);
+ memcpy(dctx, in, sizeof(*dctx));
+ return 0;
+
+}
+
+static int ghash_async_export(struct ahash_request *req, void *out)
+{
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ memcpy(out, dctx, sizeof(*dctx));
+ return 0;
+
+}
+
static int ghash_async_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -285,8 +308,11 @@ static struct ahash_alg ghash_async_alg = {
.final = ghash_async_final,
.setkey = ghash_async_setkey,
.digest = ghash_async_digest,
+ .export = ghash_async_export,
+ .import = ghash_async_import,
.halg = {
.digestsize = GHASH_DIGEST_SIZE,
+ .statesize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-clmulni",
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 01f15b227d7e..2fa7f4f6ecb3 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -272,7 +272,7 @@ struct task_struct;
#define ARCH_DLINFO_IA32(vdso_enabled) \
do { \
- if (vdso_enabled) { \
+ if (VDSO_CURRENT_BASE) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
} \
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9cb52767999a..338a4ae486bc 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -51,7 +51,7 @@ static const char * const th_names[] = {
"load_store",
"insn_fetch",
"combined_unit",
- "",
+ "decode_unit",
"northbridge",
"execution_unit",
};
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 6ed8f16fd61b..cc89b36e556a 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -122,7 +122,8 @@ GLOBAL(ftrace_graph_call)
jmp ftrace_stub
#endif
-GLOBAL(ftrace_stub)
+/* This is weak to keep gas from relaxing the jumps */
+WEAK(ftrace_stub)
retq
END(ftrace_caller)
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 1ffc32dbe450..8c43930ce1a7 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -744,6 +744,18 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long return_hooker = (unsigned long)
&return_to_handler;
+ /*
+ * When resuming from suspend-to-ram, this function can be indirectly
+ * called from early CPU startup code while the CPU is in real mode,
+ * which would fail miserably. Make sure the stack pointer is a
+ * virtual address.
+ *
+ * This check isn't as accurate as virt_addr_valid(), but it should be
+ * good enough for this purpose, and it's fast.
+ */
+ if (unlikely((long)__builtin_frame_address(0) >= 0))
+ return;
+
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 30277e27431a..d050393d3be2 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -127,7 +127,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (end - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -166,7 +166,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 7e9ca58ae875..d9016e4a80f9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1047,10 +1047,10 @@ static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12,
return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
}
-static inline bool is_exception(u32 intr_info)
+static inline bool is_nmi(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
- == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
+ == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
}
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
@@ -3074,7 +3074,7 @@ static void fix_rmode_seg(int seg, struct kvm_segment *save)
}
vmcs_write16(sf->selector, var.selector);
- vmcs_write32(sf->base, var.base);
+ vmcs_writel(sf->base, var.base);
vmcs_write32(sf->limit, var.limit);
vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
}
@@ -4716,7 +4716,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
if (is_machine_check(intr_info))
return handle_machine_check(vcpu);
- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
+ if (is_nmi(intr_info))
return 1; /* already handled by vmx_vcpu_run() */
if (is_no_device(intr_info)) {
@@ -6507,7 +6507,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
- if (!is_exception(intr_info))
+ if (is_nmi(intr_info))
return 0;
else if (is_page_fault(intr_info))
return enable_ept;
@@ -6803,8 +6803,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
kvm_machine_check();
/* We need to handle NMIs before interrupts are enabled */
- if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
- (exit_intr_info & INTR_INFO_VALID_MASK)) {
+ if (is_nmi(exit_intr_info)) {
kvm_before_handle_nmi(&vmx->vcpu);
asm("int $2");
kvm_after_handle_nmi(&vmx->vcpu);
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index ae1aa71d0115..6adf3d963320 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -341,7 +341,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 7a5bf1b76e2f..40940780c665 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -475,21 +475,40 @@ void __init init_mem_mapping(void)
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number.
*
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
+ * On x86, access has to be given to the first megabyte of RAM because that
+ * area traditionally contains BIOS code and data regions used by X, dosemu,
+ * and similar apps. Since they map the entire memory range, the whole range
+ * must be allowed (for mapping), but any areas that would otherwise be
+ * disallowed are flagged as being "zero filled" instead of rejected.
+ * Access has to be given to non-kernel-ram areas as well, these contain the
+ * PCI mmio resources as well as potential bios/acpi data regions.
*/
int devmem_is_allowed(unsigned long pagenr)
{
- if (pagenr < 256)
- return 1;
- if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+ if (page_is_ram(pagenr)) {
+ /*
+ * For disallowed memory regions in the low 1MB range,
+ * request that the page be shown as all zeros.
+ */
+ if (pagenr < 256)
+ return 2;
+
+ return 0;
+ }
+
+ /*
+ * This must follow RAM test, since System RAM is considered a
+ * restricted resource under CONFIG_STRICT_IOMEM.
+ */
+ if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
+ /* Low 1MB bypasses iomem restrictions. */
+ if (pagenr < 256)
+ return 1;
+
return 0;
- if (!page_is_ram(pagenr))
- return 1;
- return 0;
+ }
+
+ return 1;
}
void free_init_pages(char *what, unsigned long begin, unsigned long end)
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 75f9e5d80d02..7da1b9a234a6 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -67,22 +67,21 @@ static int mmap_is_legacy(void)
static unsigned long mmap_rnd(void)
{
- unsigned long rnd = 0;
+ unsigned long rnd;
/*
- * 8 bits of randomness in 32bit mmaps, 20 address space bits
- * 28 bits of randomness in 64bit mmaps, 40 address space bits
- */
- if (current->flags & PF_RANDOMIZE) {
- if (mmap_is_ia32())
- rnd = get_random_int() % (1<<8);
- else
- rnd = get_random_int() % (1<<28);
- }
+ * 8 bits of randomness in 32bit mmaps, 20 address space bits
+ * 28 bits of randomness in 64bit mmaps, 40 address space bits
+ */
+ if (mmap_is_ia32())
+ rnd = (unsigned long)get_random_int() % (1<<8);
+ else
+ rnd = (unsigned long)get_random_int() % (1<<28);
+
return rnd << PAGE_SHIFT;
}
-static unsigned long mmap_base(void)
+static unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
@@ -91,19 +90,7 @@ static unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
-}
-
-/*
- * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
- * does, but not when emulating X86_32
- */
-static unsigned long mmap_legacy_base(void)
-{
- if (mmap_is_ia32())
- return TASK_UNMAPPED_BASE;
- else
- return TASK_UNMAPPED_BASE + mmap_rnd();
+ return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}
/*
@@ -112,14 +99,19 @@ static unsigned long mmap_legacy_base(void)
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
- mm->mmap_legacy_base = mmap_legacy_base();
- mm->mmap_base = mmap_base();
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = mmap_rnd();
+
+ mm->mmap_legacy_base = TASK_UNMAPPED_BASE + random_factor;
if (mmap_is_legacy()) {
mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
+ mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index a3b0265c2ca7..63462c8db802 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -118,6 +118,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
},
},
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
+ {
+ .callback = set_nouse_crs,
+ .ident = "Supermicro X8DTH",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
+ DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
+ },
+ },
/* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
{
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 13e8935e2eab..e3600eb618c1 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -338,11 +338,11 @@ static int xen_vcpuop_set_next_event(unsigned long delta,
WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
single.timeout_abs_ns = get_abs_timeout(delta);
- single.flags = VCPU_SSHOTTMR_future;
+ /* Get an event anyway, even if the timeout is already expired */
+ single.flags = 0;
ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
-
- BUG_ON(ret != 0 && ret != -ETIME);
+ BUG_ON(ret != 0);
return ret;
}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 14c6c3a6f04b..8164f05d2372 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -160,6 +160,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
__tagtable(BP_TAG_INITRD, parse_tag_initrd);
+#endif /* CONFIG_BLK_DEV_INITRD */
+
#ifdef CONFIG_OF
static int __init parse_tag_fdt(const bp_tag_t *tag)
@@ -180,8 +182,6 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start,
#endif /* CONFIG_OF */
-#endif /* CONFIG_BLK_DEV_INITRD */
-
static int __init parse_tag_cmdline(const bp_tag_t* tag)
{
strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 5d3f7a119ed1..1ff0b92eeae7 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -86,7 +86,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
- if (!vmm || addr + len <= vmm->vm_start)
+ if (!vmm || addr + len <= vm_start_gap(vmm))
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
diff --git a/block/bsg.c b/block/bsg.c
index 420a5a9f1b23..76801e57f556 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -675,6 +675,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
dprintk("%s: write %Zd bytes\n", bd->name, count);
+ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+ return -EINVAL;
+
bsg_set_block(bd, file);
bytes_written = 0;
diff --git a/block/genhd.c b/block/genhd.c
index 7af2f6a18d9b..afd8206e530d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -662,7 +662,6 @@ void del_gendisk(struct gendisk *disk)
kobject_put(disk->part0.holder_dir);
kobject_put(disk->slave_dir);
- disk->driverfs_dev = NULL;
if (!sysfs_deprecated)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 1b4988b4bc11..9bfbb51aa75e 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -175,6 +175,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(WRITE_16, filter->write_ok);
__set_bit(WRITE_LONG, filter->write_ok);
__set_bit(WRITE_LONG_2, filter->write_ok);
+ __set_bit(WRITE_SAME, filter->write_ok);
+ __set_bit(WRITE_SAME_16, filter->write_ok);
+ __set_bit(WRITE_SAME_32, filter->write_ok);
__set_bit(ERASE, filter->write_ok);
__set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
__set_bit(MODE_SELECT, filter->write_ok);
diff --git a/crypto/Makefile b/crypto/Makefile
index b54916590d3a..139e7e0a06b4 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
+CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
@@ -72,6 +73,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o
obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
+CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
diff --git a/crypto/algapi.c b/crypto/algapi.c
index daf2f653b131..8ea7a5dc3839 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -337,6 +337,7 @@ int crypto_register_alg(struct crypto_alg *alg)
struct crypto_larval *larval;
int err;
+ alg->cra_flags &= ~CRYPTO_ALG_DEAD;
err = crypto_check_alg(alg);
if (err)
return err;
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index d11d431251f7..63e154017f53 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -195,7 +195,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
- char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
+ char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1];
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index d85fab975514..acbe1b978431 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -606,6 +606,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
inst->alg.halg.digestsize = salg->digestsize;
+ inst->alg.halg.statesize = salg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 97c949abfabb..2af5b5a7d7e2 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -2,7 +2,6 @@
# Makefile for the Linux ACPI interpreter
#
-ccflags-y := -Os
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
#
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 288bb270f8ed..9954200c32d0 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -211,6 +211,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
+ cur_state = 0;
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
acpi_handle handle = resource->device.handle;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 0dc9ff61d7c2..e3ecaf4d64f4 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1263,6 +1263,9 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
union acpi_object *dod = NULL;
union acpi_object *obj;
+ if (!video->cap._DOD)
+ return AE_NOT_EXIST;
+
status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
if (!ACPI_SUCCESS(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index b256ff5b6579..d9f45c821ac4 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4097,6 +4097,9 @@ static int mv_platform_probe(struct platform_device *pdev)
host->iomap = NULL;
hpriv->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
+ if (!hpriv->base)
+ return -ENOMEM;
+
hpriv->base -= SATAHC0_REG_BASE;
#if defined(CONFIG_HAVE_CLK)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index f72f52b4b1dd..65e36873656b 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -432,8 +432,11 @@ static int bcma_device_probe(struct device *dev)
drv);
int err = 0;
+ get_device(dev);
if (adrv->probe)
err = adrv->probe(core);
+ if (err)
+ put_device(dev);
return err;
}
@@ -446,6 +449,7 @@ static int bcma_device_remove(struct device *dev)
if (adrv->remove)
adrv->remove(core);
+ put_device(dev);
return 0;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 30878924e65b..8ccb96ae3fd9 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -579,9 +579,12 @@ config TELCLOCK
controlling the behavior of this hardware.
config DEVPORT
- bool
+ bool "/dev/port character device"
depends on ISA || PCI
default y
+ help
+ Say Y here if you want to support the /dev/port device. The /dev/port
+ device is similar to /dev/mem, but for I/O ports.
source "drivers/s390/char/Kconfig"
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 598ece77ee9e..40d2e99c6ba7 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -61,6 +61,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
#endif
#ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return devmem_is_allowed(pfn);
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -76,6 +80,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1;
}
#else
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return 1;
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
return 1;
@@ -117,23 +125,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
while (count > 0) {
unsigned long remaining;
+ int allowed;
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, count))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
+ if (allowed == 2) {
+ /* Show zeros for restricted memory. */
+ remaining = clear_user(buf, sz);
+ } else {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr)
+ return -EFAULT;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr)
- return -EFAULT;
+ remaining = copy_to_user(buf, ptr, sz);
+
+ unxlate_dev_mem_ptr(p, ptr);
+ }
- remaining = copy_to_user(buf, ptr, sz);
- unxlate_dev_mem_ptr(p, ptr);
if (remaining)
return -EFAULT;
@@ -173,30 +189,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
#endif
while (count > 0) {
+ int allowed;
+
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr) {
- if (written)
- break;
- return -EFAULT;
- }
+ /* Skip actual writing when a page is marked as restricted. */
+ if (allowed == 1) {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr) {
+ if (written)
+ break;
+ return -EFAULT;
+ }
- copied = copy_from_user(ptr, buf, sz);
- unxlate_dev_mem_ptr(p, ptr);
- if (copied) {
- written += sz - copied;
- if (written)
- break;
- return -EFAULT;
+ copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
+ if (copied) {
+ written += sz - copied;
+ if (written)
+ break;
+ return -EFAULT;
+ }
}
buf += sz;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ec3bd62eeaf6..d69c63fdae67 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1129,6 +1129,8 @@ static int put_chars(u32 vtermno, const char *buf, int count)
{
struct port *port;
struct scatterlist sg[1];
+ void *data;
+ int ret;
if (unlikely(early_put_chars))
return early_put_chars(vtermno, buf, count);
@@ -1137,8 +1139,14 @@ static int put_chars(u32 vtermno, const char *buf, int count)
if (!port)
return -EPIPE;
- sg_init_one(sg, buf, count);
- return __send_to_port(port, sg, 1, count, (void *)buf, false);
+ data = kmemdup(buf, count, GFP_ATOMIC);
+ if (!data)
+ return -ENOMEM;
+
+ sg_init_one(sg, data, count);
+ ret = __send_to_port(port, sg, 1, count, data, false);
+ kfree(data);
+ return ret;
}
/*
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 66f6cf5064ec..d85c41800952 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -437,9 +437,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
char *buf)
{
unsigned int cur_freq = __cpufreq_get(policy->cpu);
- if (!cur_freq)
- return sprintf(buf, "<unknown>");
- return sprintf(buf, "%u\n", cur_freq);
+
+ if (cur_freq)
+ return sprintf(buf, "%u\n", cur_freq);
+
+ return sprintf(buf, "<unknown>\n");
}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 62834322b337..4e7c97aa9e59 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -120,7 +120,8 @@ static int ast_get_dram_info(struct drm_device *dev)
ast_write32(ast, 0x10000, 0xfc600309);
do {
- ;
+ if (pci_channel_offline(dev->pdev))
+ return -EIO;
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
@@ -343,7 +344,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
ast_detect_chip(dev);
if (ast->chip != AST1180) {
- ast_get_dram_info(dev);
+ ret = ast_get_dram_info(dev);
+ if (ret)
+ goto out_free;
ast->vram_size = ast_get_vram_info(dev);
DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 977cfb35837a..d3464f35f427 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -53,13 +53,9 @@ ast_is_vga_enabled(struct drm_device *dev)
/* TODO 1180 */
} else {
ch = ast_io_read8(ast, 0x43);
- if (ch) {
- ast_open_key(ast);
- ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
- return ch & 0x04;
- }
+ return !!(ch & 0x01);
}
- return 0;
+ return false;
}
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 973056b86207..b16e051e48f0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -224,6 +224,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
uint32_t mpllP;
pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+ mpllP = (mpllP >> 8) & 0xf;
if (!mpllP)
mpllP = 4;
@@ -234,7 +235,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
uint32_t clock;
pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
- return clock;
+ return clock / 1000;
}
ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0ac0a88860a4..f1672f388983 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1866,7 +1866,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
struct ttm_buffer_object *bo;
int ret = -EBUSY;
int put_count;
- uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) {
@@ -1904,7 +1903,8 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
if (unlikely(ret != 0))
goto out;
- if ((bo->mem.placement & swap_placement) != swap_placement) {
+ if (bo->mem.mem_type != TTM_PL_SYSTEM ||
+ bo->ttm->caching_state != tt_cached) {
struct ttm_mem_reg evict_mem;
evict_mem = bo->mem;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index c509d40c4897..17a503ff260f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -69,8 +69,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
}
default:
- DRM_ERROR("Illegal vmwgfx get param request: %d\n",
- param->param);
return -EINVAL;
}
@@ -90,7 +88,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
void *bounce;
int ret;
- if (unlikely(arg->pad64 != 0)) {
+ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 582814339748..12969378c06e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -677,11 +677,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
128;
num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
num_sizes += req->mip_levels[i];
+ }
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+ num_sizes == 0)
return -EINVAL;
size = vmw_user_surface_size + 128 +
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index c4ef3bc726e3..e299576004ce 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
return rdesc;
+ if (*rsize < 4)
+ return rdesc;
+
for (i = 0; i < *rsize - 4; i++)
if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
__u8 tmp;
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 12fc48c968e6..34dbb9d6d852 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -790,7 +790,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
- .driver_data = LG_FF2 },
+ .driver_data = LG_NOGET | LG_FF2 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
.driver_data = LG_FF3 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index ccc2f36bb334..6584a4d6b880 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -326,6 +326,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
if (ret)
return ret;
+ /*
+ * The HID over I2C specification states that if a DEVICE needs time
+ * after the PWR_ON request, it should utilise CLOCK stretching.
+ * However, it has been observered that the Windows driver provides a
+ * 1ms sleep between the PWR_ON and RESET requests and that some devices
+ * rely on this.
+ */
+ usleep_range(1000, 5000);
+
i2c_hid_dbg(ihid, "resetting...\n");
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 05e6a7d13d4e..50e6ba9548b4 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -114,7 +114,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
struct vmbus_channel_msginfo *open_info = NULL;
void *in, *out;
unsigned long flags;
- int ret, t, err = 0;
+ int ret, err = 0;
newchannel->onchannel_callback = onchannelcallback;
newchannel->channel_callback_context = context;
@@ -204,11 +204,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
goto error1;
}
- t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
- if (t == 0) {
- err = -ETIMEDOUT;
- goto error1;
- }
+ wait_for_completion(&open_info->waitevent);
if (open_info->response.open_result.status)
@@ -391,7 +387,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body;
struct vmbus_channel_msginfo *msginfo = NULL;
- struct vmbus_channel_msginfo *submsginfo;
+ struct vmbus_channel_msginfo *submsginfo, *tmp;
u32 msgcount;
struct list_head *curr;
u32 next_gpadl_handle;
@@ -453,6 +449,13 @@ cleanup:
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ if (msgcount > 1) {
+ list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
+ msglistentry) {
+ kfree(submsginfo);
+ }
+ }
+
kfree(msginfo);
return ret;
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index b1039552b623..4e4cb3db3239 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -154,7 +154,7 @@ int hv_init(void)
/* See if the hypercall page is already set */
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
if (!virtaddr)
goto cleanup;
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 694173f662d1..d285165435d7 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -673,7 +673,7 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
- if ((start_pfn >= has->end_pfn))
+ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
/*
* If the current hot add-request extends beyond
@@ -728,7 +728,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
- if ((start_pfn >= has->end_pfn))
+ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
old_covered_state = has->covered_end_pfn;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index c3ccdea3d180..fa3ecec524fa 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -328,7 +328,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
unsigned long arg)
{
struct i2c_smbus_ioctl_data data_arg;
- union i2c_smbus_data temp;
+ union i2c_smbus_data temp = {};
int datasize, res;
if (copy_from_user(&data_arg,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 71c2c7116802..818cac9bbd8a 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2772,6 +2772,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
struct iw_cm_conn_param iw_param;
int ret;
+ if (!conn_param)
+ return -EINVAL;
+
ret = cma_modify_qp_rtr(id_priv, conn_param);
if (ret)
return ret;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index f362883c94e3..3736c1759524 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -188,6 +188,17 @@ static void joydev_detach_client(struct joydev *joydev,
synchronize_rcu();
}
+static void joydev_refresh_state(struct joydev *joydev)
+{
+ struct input_dev *dev = joydev->handle.dev;
+ int i, val;
+
+ for (i = 0; i < joydev->nabs; i++) {
+ val = input_abs_get_val(dev, joydev->abspam[i]);
+ joydev->abs[i] = joydev_correct(val, &joydev->corr[i]);
+ }
+}
+
static int joydev_open_device(struct joydev *joydev)
{
int retval;
@@ -202,6 +213,8 @@ static int joydev_open_device(struct joydev *joydev)
retval = input_open_device(&joydev->handle);
if (retval)
joydev->open--;
+ else
+ joydev_refresh_state(joydev);
}
mutex_unlock(&joydev->mutex);
@@ -823,7 +836,6 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
j = joydev->abspam[i];
if (input_abs_get_max(dev, j) == input_abs_get_min(dev, j)) {
joydev->corr[i].type = JS_CORR_NONE;
- joydev->abs[i] = input_abs_get_val(dev, j);
continue;
}
joydev->corr[i].type = JS_CORR_BROKEN;
@@ -838,10 +850,6 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
if (t) {
joydev->corr[i].coef[2] = (1 << 29) / t;
joydev->corr[i].coef[3] = (1 << 29) / t;
-
- joydev->abs[i] =
- joydev_correct(input_abs_get_val(dev, j),
- joydev->corr + i);
}
}
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index d96aa27dfcdc..db64adfbe1af 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf,
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
epirq = &interface->endpoint[0].desc;
epout = &interface->endpoint[1].desc;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 685e125d6366..24e5683d6c91 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -901,6 +901,12 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
input_dev->name = xpad_device[i].name;
input_dev->phys = xpad->phys;
usb_to_input_id(udev, &input_dev->id);
+
+ if (xpad->xtype == XTYPE_XBOX360W) {
+ /* x360w controllers and the receiver have different ids */
+ input_dev->id.product = 0x02a1;
+ }
+
input_dev->dev.parent = &intf->dev;
input_set_drvdata(input_dev, xpad);
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index f7f3e9a9fd3f..e13713b7658c 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -88,7 +88,8 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
struct mpr121_touchkey *mpr121 = dev_id;
struct i2c_client *client = mpr121->client;
struct input_dev *input = mpr121->input_dev;
- unsigned int key_num, key_val, pressed;
+ unsigned long bit_changed;
+ unsigned int key_num;
int reg;
reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR);
@@ -106,18 +107,22 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
reg &= TOUCH_STATUS_MASK;
/* use old press bit to figure out which bit changed */
- key_num = ffs(reg ^ mpr121->statusbits) - 1;
- pressed = reg & (1 << key_num);
+ bit_changed = reg ^ mpr121->statusbits;
mpr121->statusbits = reg;
+ for_each_set_bit(key_num, &bit_changed, mpr121->keycount) {
+ unsigned int key_val, pressed;
- key_val = mpr121->keycodes[key_num];
+ pressed = reg & BIT(key_num);
+ key_val = mpr121->keycodes[key_num];
- input_event(input, EV_MSC, MSC_SCAN, key_num);
- input_report_key(input, key_val, pressed);
- input_sync(input);
+ input_event(input, EV_MSC, MSC_SCAN, key_num);
+ input_report_key(input, key_val, pressed);
+
+ dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
+ pressed ? "pressed" : "released");
- dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
- pressed ? "pressed" : "released");
+ }
+ input_sync(input);
out:
return IRQ_HANDLED;
@@ -230,6 +235,7 @@ static int mpr_touchkey_probe(struct i2c_client *client,
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_dev->keycode = mpr121->keycodes;
input_dev->keycodesize = sizeof(mpr121->keycodes[0]);
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 55c15304ddbc..92c742420e20 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -274,6 +274,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
bool irq_is_gpio = false;
int irq;
int error, row_shift, max_keys;
+ unsigned long trigger = 0;
/* Copy the platform data */
if (pdata) {
@@ -286,6 +287,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
cols = pdata->cols;
rep = pdata->rep;
irq_is_gpio = pdata->irq_is_gpio;
+ trigger = IRQF_TRIGGER_FALLING;
} else {
struct device_node *np = dev->of_node;
int err;
@@ -360,9 +362,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
irq = gpio_to_irq(irq);
error = devm_request_threaded_irq(dev, irq, NULL, tca8418_irq_handler,
- IRQF_TRIGGER_FALLING |
- IRQF_SHARED |
- IRQF_ONESHOT,
+ trigger | IRQF_SHARED | IRQF_ONESHOT,
client->name, keypad_data);
if (error) {
dev_err(dev, "Unable to claim irq %d; error %d\n",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 875e680e90c2..566ced8b3bb7 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -120,6 +120,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
},
},
{
+ /* Dell Embedded Box PC 3000 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+ },
+ },
+ {
/* OQO Model 01 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
@@ -580,6 +587,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
},
},
+ {
+ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+ },
+ },
{ }
};
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 3fba74b9b602..f0d532684afd 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -123,6 +123,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
struct input_dev *input_dev;
int error = -ENOMEM;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
input_dev = input_allocate_device();
if (!kbtab || !input_dev)
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index a82e542ffc21..fecbf1d2f60b 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -11304,7 +11304,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
((CAPI_MSG *) msg)->header.ncci = 0;
((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
- PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
+ ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
+ ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
if (w != _QUEUE_FULL)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 7409d79729ee..53ce281e4129 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1283,12 +1283,15 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
if (!cc->key_size && strcmp(key, "-"))
goto out;
+ /* clear the flag since following operations may invalidate previously valid key */
+ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+
if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
goto out;
- set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
-
r = crypt_setkey_allcpus(cc);
+ if (!r)
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
out:
/* Hex key string not needed after here, so wipe it. */
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index f03fabd2b37b..f169afac0266 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -97,6 +97,12 @@ static int linear_mergeable_bvec(struct request_queue *q,
return maxsectors << 9;
}
+/*
+ * In linear_congested() conf->raid_disks is used as a copy of
+ * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks
+ * and conf->disks[] are created in linear_conf(), they are always
+ * consitent with each other, but mddev->raid_disks does not.
+ */
static int linear_congested(void *data, int bits)
{
struct mddev *mddev = data;
@@ -109,7 +115,7 @@ static int linear_congested(void *data, int bits)
rcu_read_lock();
conf = rcu_dereference(mddev->private);
- for (i = 0; i < mddev->raid_disks && !ret ; i++) {
+ for (i = 0; i < conf->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
@@ -196,6 +202,19 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
conf->disks[i-1].end_sector +
conf->disks[i].rdev->sectors;
+ /*
+ * conf->raid_disks is copy of mddev->raid_disks. The reason to
+ * keep a copy of mddev->raid_disks in struct linear_conf is,
+ * mddev->raid_disks may not be consistent with pointers number of
+ * conf->disks[] when it is updated in linear_add() and used to
+ * iterate old conf->disks[] earray in linear_congested().
+ * Here conf->raid_disks is always consitent with number of
+ * pointers in conf->disks[] array, and mddev->private is updated
+ * with rcu_assign_pointer() in linear_addr(), such race can be
+ * avoided.
+ */
+ conf->raid_disks = raid_disks;
+
return conf;
out:
@@ -252,10 +271,18 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
if (!newconf)
return -ENOMEM;
+ /* newconf->raid_disks already keeps a copy of * the increased
+ * value of mddev->raid_disks, WARN_ONCE() is just used to make
+ * sure of this. It is possible that oldconf is still referenced
+ * in linear_congested(), therefore kfree_rcu() is used to free
+ * oldconf until no one uses it anymore.
+ */
oldconf = rcu_dereference_protected(mddev->private,
lockdep_is_held(
&mddev->reconfig_mutex));
mddev->raid_disks++;
+ WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
+ "copied raid_disks doesn't match mddev->raid_disks");
rcu_assign_pointer(mddev->private, newconf);
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
diff --git a/drivers/md/linear.h b/drivers/md/linear.h
index b685ddd7d7f7..8d392e6098b3 100644
--- a/drivers/md/linear.h
+++ b/drivers/md/linear.h
@@ -10,6 +10,7 @@ struct linear_conf
{
struct rcu_head rcu;
sector_t array_sectors;
+ int raid_disks; /* a copy of mddev->raid_disks */
struct dev_info disks[0];
};
#endif
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 056d09c33af1..c79d6480fbed 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -679,15 +679,13 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
r = sm_ll_new_metadata(&smm->ll, tm);
+ if (!r) {
+ r = sm_ll_extend(&smm->ll, nr_blocks);
+ }
+ memcpy(&smm->sm, &ops, sizeof(smm->sm));
if (r)
return r;
- r = sm_ll_extend(&smm->ll, nr_blocks);
- if (r)
- return r;
-
- memcpy(&smm->sm, &ops, sizeof(smm->sm));
-
/*
* Now we need to update the newly created data structures with the
* allocated blocks that they were built from.
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 63d42ae56a1c..a8315aaba9fe 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if (best_dist_disk < 0) {
if (is_badblock(rdev, this_sector, sectors,
&first_bad, &bad_sectors)) {
- if (first_bad < this_sector)
+ if (first_bad <= this_sector)
/* Cannot use this */
continue;
best_good_sectors = first_bad - this_sector;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9ee3c460fa37..8f5c890f9983 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5616,6 +5616,15 @@ static int run(struct mddev *mddev)
stripe = (stripe | (stripe-1)) + 1;
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
+
+ /*
+ * We use 16-bit counter of active stripes in bi_phys_segments
+ * (minus one for over-loaded initialization)
+ */
+ blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
+ blk_queue_max_discard_sectors(mddev->queue,
+ 0xfffe * STRIPE_SECTORS);
+
/*
* unaligned part of discard request will be ignored, so can't
* guarantee discard_zeroes_data
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 03761c6f472f..8e7c78567138 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -206,20 +206,28 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev)
static int smsusb_sendrequest(void *context, void *buffer, size_t size)
{
struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
- struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
- int dummy;
+ struct sms_msg_hdr *phdr;
+ int dummy, ret;
if (dev->state != SMSUSB_ACTIVE)
return -ENOENT;
+ phdr = kmalloc(size, GFP_KERNEL);
+ if (!phdr)
+ return -ENOMEM;
+ memcpy(phdr, buffer, size);
+
sms_debug("sending %s(%d) size: %d",
smscore_translate_msg(phdr->msg_type), phdr->msg_type,
phdr->msg_length);
smsendian_handle_tx_message((struct sms_msg_data *) phdr);
- smsendian_handle_message_header((struct sms_msg_hdr *)buffer);
- return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
- buffer, size, &dummy, 1000);
+ smsendian_handle_message_header((struct sms_msg_hdr *)phdr);
+ ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
+ phdr, size, &dummy, 1000);
+
+ kfree(phdr);
+ return ret;
}
static char *smsusb1_fw_lkup[] = {
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 363cdbf4ac8d..5422093d135c 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1533,6 +1533,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain)
return buffer;
}
+static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ if (chain == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&chain->entities);
+ mutex_init(&chain->ctrl_mutex);
+ chain->dev = dev;
+ v4l2_prio_init(&chain->prio);
+
+ return chain;
+}
+
+/*
+ * Fallback heuristic for devices that don't connect units and terminals in a
+ * valid chain.
+ *
+ * Some devices have invalid baSourceID references, causing uvc_scan_chain()
+ * to fail, but if we just take the entities we can find and put them together
+ * in the most sensible chain we can think of, turns out they do work anyway.
+ * Note: This heuristic assumes there is a single chain.
+ *
+ * At the time of writing, devices known to have such a broken chain are
+ * - Acer Integrated Camera (5986:055a)
+ * - Realtek rtl157a7 (0bda:57a7)
+ */
+static int uvc_scan_fallback(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+ struct uvc_entity *iterm = NULL;
+ struct uvc_entity *oterm = NULL;
+ struct uvc_entity *entity;
+ struct uvc_entity *prev;
+
+ /*
+ * Start by locating the input and output terminals. We only support
+ * devices with exactly one of each for now.
+ */
+ list_for_each_entry(entity, &dev->entities, list) {
+ if (UVC_ENTITY_IS_ITERM(entity)) {
+ if (iterm)
+ return -EINVAL;
+ iterm = entity;
+ }
+
+ if (UVC_ENTITY_IS_OTERM(entity)) {
+ if (oterm)
+ return -EINVAL;
+ oterm = entity;
+ }
+ }
+
+ if (iterm == NULL || oterm == NULL)
+ return -EINVAL;
+
+ /* Allocate the chain and fill it. */
+ chain = uvc_alloc_chain(dev);
+ if (chain == NULL)
+ return -ENOMEM;
+
+ if (uvc_scan_chain_entity(chain, oterm) < 0)
+ goto error;
+
+ prev = oterm;
+
+ /*
+ * Add all Processing and Extension Units with two pads. The order
+ * doesn't matter much, use reverse list traversal to connect units in
+ * UVC descriptor order as we build the chain from output to input. This
+ * leads to units appearing in the order meant by the manufacturer for
+ * the cameras known to require this heuristic.
+ */
+ list_for_each_entry_reverse(entity, &dev->entities, list) {
+ if (entity->type != UVC_VC_PROCESSING_UNIT &&
+ entity->type != UVC_VC_EXTENSION_UNIT)
+ continue;
+
+ if (entity->num_pads != 2)
+ continue;
+
+ if (uvc_scan_chain_entity(chain, entity) < 0)
+ goto error;
+
+ prev->baSourceID[0] = entity->id;
+ prev = entity;
+ }
+
+ if (uvc_scan_chain_entity(chain, iterm) < 0)
+ goto error;
+
+ prev->baSourceID[0] = iterm->id;
+
+ list_add_tail(&chain->list, &dev->chains);
+
+ uvc_trace(UVC_TRACE_PROBE,
+ "Found a video chain by fallback heuristic (%s).\n",
+ uvc_print_chain(chain));
+
+ return 0;
+
+error:
+ kfree(chain);
+ return -EINVAL;
+}
+
/*
* Scan the device for video chains and register video devices.
*
@@ -1555,15 +1663,10 @@ static int uvc_scan_device(struct uvc_device *dev)
if (term->chain.next || term->chain.prev)
continue;
- chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ chain = uvc_alloc_chain(dev);
if (chain == NULL)
return -ENOMEM;
- INIT_LIST_HEAD(&chain->entities);
- mutex_init(&chain->ctrl_mutex);
- chain->dev = dev;
- v4l2_prio_init(&chain->prio);
-
term->flags |= UVC_ENTITY_FLAG_DEFAULT;
if (uvc_scan_chain(chain, term) < 0) {
@@ -1577,6 +1680,9 @@ static int uvc_scan_device(struct uvc_device *dev)
list_add_tail(&chain->list, &dev->chains);
}
+ if (list_empty(&dev->chains))
+ uvc_scan_fallback(dev);
+
if (list_empty(&dev->chains)) {
uvc_printk(KERN_INFO, "No valid video chain found.\n");
return -1;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 836e2ac36a0d..16d7f939a747 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1220,7 +1220,9 @@ clock_set:
return;
}
timeout--;
- mdelay(1);
+ spin_unlock_irq(&host->lock);
+ usleep_range(900, 1100);
+ spin_lock_irq(&host->lock);
}
clk |= SDHCI_CLOCK_CARD_EN;
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 9279a9174f84..04e2e4308890 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -159,12 +159,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
last_trx_part = curr_part - 1;
- /*
- * We have whole TRX scanned, skip to the next part. Use
- * roundown (not roundup), as the loop will increase
- * offset in next step.
- */
- offset = rounddown(offset + trx->length, blocksize);
+ /* Jump to the end of TRX */
+ offset = roundup(offset + trx->length, blocksize);
+ /* Next loop iteration will increase the offset */
+ offset -= blocksize;
continue;
}
}
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index f9fa3fad728e..2051f28ddac6 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -139,15 +139,13 @@ static int __init init_msp_flash(void)
}
msp_maps[i].bankwidth = 1;
- msp_maps[i].name = kmalloc(7, GFP_KERNEL);
+ msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL);
if (!msp_maps[i].name) {
iounmap(msp_maps[i].virt);
kfree(msp_parts[i]);
goto cleanup_loop;
}
- msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7);
-
for (j = 0; j < pcnt; j++) {
part_name[5] = '0' + i;
part_name[7] = '0' + j;
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0134ba32a057..39712560b4c1 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
return err;
}
- if (bytes == 0) {
- err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
- if (err)
- return err;
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+ if (bytes == 0) {
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index b374be7891a2..b905e5e840f7 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -109,6 +109,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
dev->irq = pdev->irq;
priv->base = addr;
+ priv->device = &pdev->dev;
if (!c_can_pci_data->freq) {
dev_err(&pdev->dev, "no clock frequency defined\n");
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f21fc37ec578..2c19b4ffe823 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -962,7 +962,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
HECC_DEF_NAPI_WEIGHT);
- clk_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
+ goto probe_exit_clk;
+ }
+
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev, "register_candev() failed\n");
@@ -995,7 +1000,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
struct ti_hecc_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
clk_put(priv->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iounmap(priv->base);
@@ -1021,7 +1026,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
priv->can.state = CAN_STATE_SLEEPING;
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return 0;
}
@@ -1030,8 +1035,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct ti_hecc_priv *priv = netdev_priv(dev);
+ int err;
- clk_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 3a220d2f2ee1..9a82890f64e5 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -817,23 +817,25 @@ lbl_free_candev:
static void peak_usb_disconnect(struct usb_interface *intf)
{
struct peak_usb_device *dev;
+ struct peak_usb_device *dev_prev_siblings;
/* unregister as many netdev devices as siblings */
- for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) {
+ for (dev = usb_get_intfdata(intf); dev; dev = dev_prev_siblings) {
struct net_device *netdev = dev->netdev;
char name[IFNAMSIZ];
+ dev_prev_siblings = dev->prev_siblings;
dev->state &= ~PCAN_USB_STATE_CONNECTED;
strncpy(name, netdev->name, IFNAMSIZ);
unregister_netdev(netdev);
- free_candev(netdev);
kfree(dev->cmd_buf);
dev->next_siblings = NULL;
if (dev->adapter->dev_free)
dev->adapter->dev_free(dev);
+ free_candev(netdev);
dev_info(&intf->dev, "%s removed\n", name);
}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index cbd388eea682..f8b84fed537b 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -956,8 +956,8 @@ static int usb_8dev_probe(struct usb_interface *intf,
for (i = 0; i < MAX_TX_URBS; i++)
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
- priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg),
- GFP_KERNEL);
+ priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg),
+ GFP_KERNEL);
if (!priv->cmd_msg_buffer)
goto cleanup_candev;
@@ -971,7 +971,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
if (err) {
netdev_err(netdev,
"couldn't register CAN device: %d\n", err);
- goto cleanup_cmd_msg_buffer;
+ goto cleanup_candev;
}
err = usb_8dev_cmd_version(priv, &version);
@@ -992,9 +992,6 @@ static int usb_8dev_probe(struct usb_interface *intf,
cleanup_unregister_candev:
unregister_netdev(priv->netdev);
-cleanup_cmd_msg_buffer:
- kfree(priv->cmd_msg_buffer);
-
cleanup_candev:
free_candev(netdev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ce1a91618677..9c19f49f0f54 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1792,8 +1792,16 @@ static void bnx2x_get_ringparam(struct net_device *dev,
ering->rx_max_pending = MAX_RX_AVAIL;
+ /* If size isn't already set, we give an estimation of the number
+ * of buffers we'll have. We're neglecting some possible conditions
+ * [we couldn't know for certain at this point if number of queues
+ * might shrink] but the number would be correct for the likely
+ * scenario.
+ */
if (bp->rx_ring_size)
ering->rx_pending = bp->rx_ring_size;
+ else if (BNX2X_NUM_RX_QUEUES(bp))
+ ering->rx_pending = MAX_RX_AVAIL / BNX2X_NUM_RX_QUEUES(bp);
else
ering->rx_pending = MAX_RX_AVAIL;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 07f7ef05c3f2..d18ee75bdd54 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -193,6 +193,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
return 0;
hw_cons = *(tcb->hw_consumer_index);
+ rmb();
cons = tcb->consumer_index;
q_depth = tcb->q_depth;
@@ -2903,13 +2904,12 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
BNA_QE_INDX_INC(prod, q_depth);
tcb->producer_index = prod;
- smp_mb();
+ wmb();
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
return NETDEV_TX_OK;
bna_txq_prod_indx_doorbell(tcb);
- smp_mb();
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 5dec66a96793..583ebff31160 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -87,6 +87,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw)
s32 ret_val = 0;
u16 phy_id;
+ /* ensure PHY page selection to fix misconfigured i210 */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+ phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0);
+
ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
if (ret_val)
goto out;
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 31bbbca341a7..922f7dd6028e 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -557,7 +557,8 @@ fatal_error:
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int queue, len;
+ int queue;
+ unsigned int len;
struct cpmac_desc *desc;
struct cpmac_priv *priv = netdev_priv(dev);
@@ -567,7 +568,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_padto(skb, ETH_ZLEN)))
return NETDEV_TX_OK;
- len = max(skb->len, ETH_ZLEN);
+ len = max_t(unsigned int, skb->len, ETH_ZLEN);
queue = skb_get_queue_mapping(skb);
netif_stop_subqueue(dev, queue);
@@ -1241,7 +1242,7 @@ int cpmac_init(void)
goto fail_alloc;
}
-#warning FIXME: unhardcode gpio&reset bits
+ /* FIXME: unhardcode gpio&reset bits */
ar7_gpio_disable(26);
ar7_gpio_disable(27);
ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 59e9c56e5b8a..493460424a00 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -48,6 +48,9 @@ struct net_device_context {
struct work_struct work;
};
+/* Restrict GSO size to account for NVGRE */
+#define NETVSC_GSO_MAX_SIZE 62768
+
#define RING_SIZE_MIN 64
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
@@ -436,6 +439,7 @@ static int netvsc_probe(struct hv_device *dev,
SET_ETHTOOL_OPS(net, &ethtool_ops);
SET_NETDEV_DEV(net, &dev->device);
+ netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
ret = register_netdev(net);
if (ret != 0) {
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 8fc46fcaee54..1c51abbecedb 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -678,7 +678,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
size_t linear;
if (q->flags & IFF_VNET_HDR) {
- vnet_hdr_len = q->vnet_hdr_sz;
+ vnet_hdr_len = ACCESS_ONCE(q->vnet_hdr_sz);
err = -EINVAL;
if (len < vnet_hdr_len)
@@ -809,7 +809,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
- vnet_hdr_len = q->vnet_hdr_sz;
+ vnet_hdr_len = ACCESS_ONCE(q->vnet_hdr_sz);
if ((len -= vnet_hdr_len) < 0)
return -EINVAL;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ea6ada39db15..7bbc43fbb720 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1087,9 +1087,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
if (tun->flags & TUN_VNET_HDR) {
- if (len < tun->vnet_hdr_sz)
+ int vnet_hdr_sz = ACCESS_ONCE(tun->vnet_hdr_sz);
+
+ if (len < vnet_hdr_sz)
return -EINVAL;
- len -= tun->vnet_hdr_sz;
+ len -= vnet_hdr_sz;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
return -EFAULT;
@@ -1100,7 +1102,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (gso.hdr_len > len)
return -EINVAL;
- offset += tun->vnet_hdr_sz;
+ offset += vnet_hdr_sz;
}
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@ -1275,7 +1277,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
int vnet_hdr_sz = 0;
if (tun->flags & TUN_VNET_HDR)
- vnet_hdr_sz = tun->vnet_hdr_sz;
+ vnet_hdr_sz = ACCESS_ONCE(tun->vnet_hdr_sz);
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) < 0)
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 8d5cac2d8e33..57da4c10c695 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -779,7 +779,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
struct net_device *netdev;
struct catc *catc;
u8 broadcast[6];
- int i, pktsz;
+ int pktsz, ret;
if (usb_set_interface(usbdev,
intf->altsetting->desc.bInterfaceNumber, 1)) {
@@ -814,12 +814,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
(!catc->rx_urb) || (!catc->irq_urb)) {
dev_err(&intf->dev, "No free urbs available.\n");
- usb_free_urb(catc->ctrl_urb);
- usb_free_urb(catc->tx_urb);
- usb_free_urb(catc->rx_urb);
- usb_free_urb(catc->irq_urb);
- free_netdev(netdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail_free;
}
/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
@@ -847,15 +843,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
catc->irq_buf, 2, catc_irq_done, catc, 1);
if (!catc->is_f5u011) {
+ u32 *buf;
+ int i;
+
dev_dbg(dev, "Checking memory size\n");
- i = 0x12345678;
- catc_write_mem(catc, 0x7a80, &i, 4);
- i = 0x87654321;
- catc_write_mem(catc, 0xfa80, &i, 4);
- catc_read_mem(catc, 0x7a80, &i, 4);
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fail_free;
+ }
+
+ *buf = 0x12345678;
+ catc_write_mem(catc, 0x7a80, buf, 4);
+ *buf = 0x87654321;
+ catc_write_mem(catc, 0xfa80, buf, 4);
+ catc_read_mem(catc, 0x7a80, buf, 4);
- switch (i) {
+ switch (*buf) {
case 0x12345678:
catc_set_reg(catc, TxBufCount, 8);
catc_set_reg(catc, RxBufCount, 32);
@@ -870,6 +875,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
dev_dbg(dev, "32k Memory\n");
break;
}
+
+ kfree(buf);
dev_dbg(dev, "Getting MAC from SEEROM.\n");
@@ -916,16 +923,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
usb_set_intfdata(intf, catc);
SET_NETDEV_DEV(netdev, &intf->dev);
- if (register_netdev(netdev) != 0) {
- usb_set_intfdata(intf, NULL);
- usb_free_urb(catc->ctrl_urb);
- usb_free_urb(catc->tx_urb);
- usb_free_urb(catc->rx_urb);
- usb_free_urb(catc->irq_urb);
- free_netdev(netdev);
- return -EIO;
- }
+ ret = register_netdev(netdev);
+ if (ret)
+ goto fail_clear_intfdata;
+
return 0;
+
+fail_clear_intfdata:
+ usb_set_intfdata(intf, NULL);
+fail_free:
+ usb_free_urb(catc->ctrl_urb);
+ usb_free_urb(catc->tx_urb);
+ usb_free_urb(catc->rx_urb);
+ usb_free_urb(catc->irq_urb);
+ free_netdev(netdev);
+ return ret;
}
static void catc_disconnect(struct usb_interface *intf)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d0815855d877..e782dd7183db 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2862,7 +2862,6 @@ vmxnet3_tx_timeout(struct net_device *netdev)
netdev_err(adapter->netdev, "tx hang\n");
schedule_work(&adapter->work);
- netif_wake_queue(adapter->netdev);
}
@@ -2889,6 +2888,7 @@ vmxnet3_reset_work(struct work_struct *data)
}
rtnl_unlock();
+ netif_wake_queue(adapter->netdev);
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
}
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 06f86f435711..1b8422c4ef9b 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -511,8 +511,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
return -EOPNOTSUPP;
default:
- WARN_ON(1);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
mutex_lock(&ah->lock);
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 6307a4e36c85..f8639003da95 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len,
spin_lock_bh(&local->baplock);
res = hfa384x_setup_bap(dev, BAP0, rid, 0);
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+ if (res)
+ goto unlock;
+
+ res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+ if (res)
+ goto unlock;
if (le16_to_cpu(rec.len) == 0) {
/* RID not available */
res = -ENODATA;
+ goto unlock;
}
rlen = (le16_to_cpu(rec.len) - 1) * 2;
- if (!res && exact_len && rlen != len) {
+ if (exact_len && rlen != len) {
printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
"rid=0x%04x, len=%d (expected %d)\n",
dev->name, rid, rlen, len);
res = -ENODATA;
}
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, buf, len);
+ res = hfa384x_from_bap(dev, BAP0, buf, len);
+unlock:
spin_unlock_bh(&local->baplock);
mutex_unlock(&local->rid_bap_mtx);
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 3ad79736b255..3fc7d0845480 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -823,6 +823,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct urb *urb;
/* should after adapter start and interrupt enable. */
set_hal_stop(rtlhal);
@@ -830,6 +831,23 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
/* Enable software */
SET_USB_STOP(rtlusb);
rtl_usb_deinit(hw);
+
+ /* free pre-allocated URBs from rtl_usb_start() */
+ usb_kill_anchored_urbs(&rtlusb->rx_submitted);
+
+ tasklet_kill(&rtlusb->rx_work_tasklet);
+ cancel_work_sync(&rtlpriv->works.lps_change_work);
+
+ flush_workqueue(rtlpriv->works.rtl_wq);
+
+ skb_queue_purge(&rtlusb->rx_queue);
+
+ while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+ }
+
rtlpriv->cfg->ops->hw_disable(hw);
}
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 3492ec9a33b7..a7d64f94c3cd 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -274,7 +274,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- return true;
+ return pin->configs &
+ (SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN);
case PIN_CONFIG_BIAS_PULL_UP:
return pin->configs & SH_PFC_PIN_CFG_PULL_UP;
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 59a8d325a697..e4d9a903ca3c 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1860,11 +1860,24 @@ static int acer_wmi_enable_lm(void)
return status;
}
+#define ACER_WMID_ACCEL_HID "BST0001"
+
static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level,
void *ctx, void **retval)
{
+ struct acpi_device *dev;
+
+ if (!strcmp(ctx, "SENR")) {
+ if (acpi_bus_get_device(ah, &dev))
+ return AE_OK;
+ if (strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
+ return AE_OK;
+ } else
+ return AE_OK;
+
*(acpi_handle *)retval = ah;
- return AE_OK;
+
+ return AE_CTRL_TERMINATE;
}
static int __init acer_wmi_get_handle(const char *name, const char *prop,
@@ -1878,8 +1891,7 @@ static int __init acer_wmi_get_handle(const char *name, const char *prop,
handle = NULL;
status = acpi_get_devices(prop, acer_wmi_get_handle_cb,
(void *)name, &handle);
-
- if (ACPI_SUCCESS(status)) {
+ if (ACPI_SUCCESS(status) && handle) {
*ah = handle;
return 0;
} else {
@@ -1891,7 +1903,7 @@ static int __init acer_wmi_accel_setup(void)
{
int err;
- err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle);
+ err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle);
if (err)
return err;
@@ -2262,10 +2274,11 @@ static int __init acer_wmi_init(void)
err = acer_wmi_input_setup();
if (err)
return err;
+ err = acer_wmi_accel_setup();
+ if (err && err != -ENODEV)
+ pr_warn("Cannot enable accelerometer\n");
}
- acer_wmi_accel_setup();
-
err = platform_driver_register(&acer_platform_driver);
if (err) {
pr_err("Unable to register platform driver\n");
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 42bd57da239d..09198941ee22 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -763,9 +763,23 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
*/
static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
{
+ struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
+ struct rtc_time tm;
+ ktime_t now;
+
timer->enabled = 1;
+ __rtc_read_time(rtc, &tm);
+ now = rtc_tm_to_ktime(tm);
+
+ /* Skip over expired timers */
+ while (next) {
+ if (next->expires.tv64 >= now.tv64)
+ break;
+ next = timerqueue_iterate_next(next);
+ }
+
timerqueue_add(&rtc->timerqueue, &timer->node);
- if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
+ if (!next) {
struct rtc_wkalrm alarm;
int err;
alarm.time = rtc_ktime_to_tm(timer->node.expires);
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index f40afdd0e5f5..b6e220f5963d 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -15,6 +15,7 @@
#include <linux/bitrev.h>
#include <linux/bcd.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#define S35390A_CMD_STATUS1 0
#define S35390A_CMD_STATUS2 1
@@ -34,10 +35,14 @@
#define S35390A_ALRM_BYTE_HOURS 1
#define S35390A_ALRM_BYTE_MINS 2
+/* flags for STATUS1 */
#define S35390A_FLAG_POC 0x01
#define S35390A_FLAG_BLD 0x02
+#define S35390A_FLAG_INT2 0x04
#define S35390A_FLAG_24H 0x40
#define S35390A_FLAG_RESET 0x80
+
+/* flag for STATUS2 */
#define S35390A_FLAG_TEST 0x01
#define S35390A_INT2_MODE_MASK 0xF0
@@ -94,19 +99,63 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len)
return 0;
}
-static int s35390a_reset(struct s35390a *s35390a)
+/*
+ * Returns <0 on error, 0 if rtc is setup fine and 1 if the chip was reset.
+ * To keep the information if an irq is pending, pass the value read from
+ * STATUS1 to the caller.
+ */
+static int s35390a_reset(struct s35390a *s35390a, char *status1)
{
- char buf[1];
-
- if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0)
- return -EIO;
-
- if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD)))
+ char buf;
+ int ret;
+ unsigned initcount = 0;
+
+ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, status1, 1);
+ if (ret < 0)
+ return ret;
+
+ if (*status1 & S35390A_FLAG_POC)
+ /*
+ * Do not communicate for 0.5 seconds since the power-on
+ * detection circuit is in operation.
+ */
+ msleep(500);
+ else if (!(*status1 & S35390A_FLAG_BLD))
+ /*
+ * If both POC and BLD are unset everything is fine.
+ */
return 0;
- buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H);
- buf[0] &= 0xf0;
- return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
+ /*
+ * At least one of POC and BLD are set, so reinitialise chip. Keeping
+ * this information in the hardware to know later that the time isn't
+ * valid is unfortunately not possible because POC and BLD are cleared
+ * on read. So the reset is best done now.
+ *
+ * The 24H bit is kept over reset, so set it already here.
+ */
+initialize:
+ *status1 = S35390A_FLAG_24H;
+ buf = S35390A_FLAG_RESET | S35390A_FLAG_24H;
+ ret = s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
+
+ if (ret < 0)
+ return ret;
+
+ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
+ if (ret < 0)
+ return ret;
+
+ if (buf & (S35390A_FLAG_POC | S35390A_FLAG_BLD)) {
+ /* Try up to five times to reset the chip */
+ if (initcount < 5) {
+ ++initcount;
+ goto initialize;
+ } else
+ return -EIO;
+ }
+
+ return 1;
}
static int s35390a_disable_test_mode(struct s35390a *s35390a)
@@ -265,6 +314,20 @@ static int s35390a_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
char buf[3], sts;
int i, err;
+ /*
+ * initialize all members to -1 to signal the core that they are not
+ * defined by the hardware.
+ */
+ alm->time.tm_sec = -1;
+ alm->time.tm_min = -1;
+ alm->time.tm_hour = -1;
+ alm->time.tm_mday = -1;
+ alm->time.tm_mon = -1;
+ alm->time.tm_year = -1;
+ alm->time.tm_wday = -1;
+ alm->time.tm_yday = -1;
+ alm->time.tm_isdst = -1;
+
err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts));
if (err < 0)
return err;
@@ -327,11 +390,11 @@ static struct i2c_driver s35390a_driver;
static int s35390a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int err;
+ int err, err_reset;
unsigned int i;
struct s35390a *s35390a;
struct rtc_time tm;
- char buf[1];
+ char buf, status1;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
err = -ENODEV;
@@ -360,29 +423,35 @@ static int s35390a_probe(struct i2c_client *client,
}
}
- err = s35390a_reset(s35390a);
- if (err < 0) {
+ err_reset = s35390a_reset(s35390a, &status1);
+ if (err_reset < 0) {
+ err = err_reset;
dev_err(&client->dev, "error resetting chip\n");
goto exit_dummy;
}
- err = s35390a_disable_test_mode(s35390a);
- if (err < 0) {
- dev_err(&client->dev, "error disabling test mode\n");
- goto exit_dummy;
- }
-
- err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
- if (err < 0) {
- dev_err(&client->dev, "error checking 12/24 hour mode\n");
- goto exit_dummy;
- }
- if (buf[0] & S35390A_FLAG_24H)
+ if (status1 & S35390A_FLAG_24H)
s35390a->twentyfourhour = 1;
else
s35390a->twentyfourhour = 0;
- if (s35390a_get_datetime(client, &tm) < 0)
+ if (status1 & S35390A_FLAG_INT2) {
+ /* disable alarm (and maybe test mode) */
+ buf = 0;
+ err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &buf, 1);
+ if (err < 0) {
+ dev_err(&client->dev, "error disabling alarm");
+ goto exit_dummy;
+ }
+ } else {
+ err = s35390a_disable_test_mode(s35390a);
+ if (err < 0) {
+ dev_err(&client->dev, "error disabling test mode\n");
+ goto exit_dummy;
+ }
+ }
+
+ if (err_reset > 0 || s35390a_get_datetime(client, &tm) < 0)
dev_warn(&client->dev, "clock needs to be set\n");
device_set_wakeup_capable(&client->dev, 1);
@@ -395,6 +464,10 @@ static int s35390a_probe(struct i2c_client *client,
err = PTR_ERR(s35390a->rtc);
goto exit_dummy;
}
+
+ if (status1 & S35390A_FLAG_INT2)
+ rtc_update_irq(s35390a->rtc, 1, RTC_AF);
+
return 0;
exit_dummy:
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 9b3a24e8d3a0..5e41e8453acd 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -873,7 +873,7 @@ static int __init vmlogrdr_init(void)
goto cleanup;
for (i=0; i < MAXMINOR; ++i ) {
- sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
+ sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sys_ser[i].buffer) {
rc = -ENOMEM;
break;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e6e0679ec882..b08b1e1a45e5 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -10909,6 +10909,7 @@ static struct pci_driver lpfc_driver = {
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = lpfc_pci_remove_one,
+ .shutdown = lpfc_pci_remove_one,
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 2da1959ff2f6..03c8783180a8 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -736,8 +736,8 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
mv_dprintk("device %016llx not ready.\n",
SAS_ADDR(dev->sas_addr));
- rc = SAS_PHY_DOWN;
- return rc;
+ rc = SAS_PHY_DOWN;
+ return rc;
}
tei.port = dev->port->lldd_port;
if (tei.port && !tei.port->port_attached && !tmf) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 66c495d21016..40fe8a77236a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3301,7 +3301,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
sizeof(struct ct6_dsd), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ctx_cachep)
- goto fail_free_gid_list;
+ goto fail_free_srb_mempool;
}
ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
ctx_cachep);
@@ -3454,7 +3454,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
GFP_KERNEL);
if (!ha->loop_id_map)
- goto fail_async_pd;
+ goto fail_loop_id_map;
else {
qla2x00_set_reserved_loop_ids(ha);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
@@ -3463,6 +3463,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
return 0;
+fail_loop_id_map:
+ dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
fail_async_pd:
dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
fail_ex_init_cb:
@@ -3490,6 +3492,10 @@ fail_free_ms_iocb:
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
+
+ if (ha->sns_cmd)
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
+ ha->sns_cmd, ha->sns_cmd_dma);
fail_dma_pool:
if (IS_QLA82XX(ha) || ql2xenabledif) {
dma_pool_destroy(ha->fcp_cmnd_dma_pool);
@@ -3507,10 +3513,12 @@ fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
fail_free_ctx_mempool:
- mempool_destroy(ha->ctx_mempool);
+ if (ha->ctx_mempool)
+ mempool_destroy(ha->ctx_mempool);
ha->ctx_mempool = NULL;
fail_free_srb_mempool:
- mempool_destroy(ha->srb_mempool);
+ if (ha->srb_mempool)
+ mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
fail_free_gid_list:
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 60031e15d562..dc1c2f4520f2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1009,8 +1009,12 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
{
struct request *rq = cmd->request;
+ int error;
- int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
+ if (WARN_ON_ONCE(!rq->nr_phys_segments))
+ return -EINVAL;
+
+ error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
if (error)
goto err_exit;
@@ -1102,11 +1106,7 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
* submit a request without an attached bio.
*/
if (req->bio) {
- int ret;
-
- BUG_ON(!req->nr_phys_segments);
-
- ret = scsi_init_io(cmd, GFP_ATOMIC);
+ int ret = scsi_init_io(cmd, GFP_ATOMIC);
if (unlikely(ret))
return ret;
} else {
@@ -1150,11 +1150,6 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
return ret;
}
- /*
- * Filesystem requests must transfer data.
- */
- BUG_ON(!req->nr_phys_segments);
-
cmd = scsi_get_cmd_from_req(sdev, req);
if (unlikely(!cmd))
return BLKPREP_DEFER;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 135d7b56fbe6..53da653988ec 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -865,10 +865,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
struct request_queue *rq = sdev->request_queue;
struct scsi_target *starget = sdev->sdev_target;
- error = scsi_device_set_state(sdev, SDEV_RUNNING);
- if (error)
- return error;
-
error = scsi_target_add(starget);
if (error)
return error;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4afce0e838a2..880a300baf7a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1354,11 +1354,15 @@ static int media_not_present(struct scsi_disk *sdkp,
**/
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
- struct scsi_disk *sdkp = scsi_disk(disk);
- struct scsi_device *sdp = sdkp->device;
+ struct scsi_disk *sdkp = scsi_disk_get(disk);
+ struct scsi_device *sdp;
struct scsi_sense_hdr *sshdr = NULL;
int retval;
+ if (!sdkp)
+ return 0;
+
+ sdp = sdkp->device;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
/*
@@ -1415,6 +1419,7 @@ out:
kfree(sshdr);
retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
sdp->changed = 0;
+ scsi_disk_put(sdkp);
return retval;
}
@@ -1919,6 +1924,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#define READ_CAPACITY_RETRIES_ON_RESET 10
+/*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+{
+ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+ if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
+ return false;
+
+ return true;
+}
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@@ -1984,7 +2005,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
return -ENODEV;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -2070,7 +2091,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
return sector_size;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 1f65e32db285..0b27d293dd83 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -568,6 +568,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
sg_io_hdr_t *hp;
unsigned char cmnd[MAX_COMMAND_SIZE];
+ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+ return -EINVAL;
+
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
@@ -766,8 +769,14 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
return k; /* probably out of space --> ENOMEM */
}
if (sdp->detached) {
- if (srp->bio)
+ if (srp->bio) {
+ if (srp->rq->cmd != srp->rq->__cmd)
+ kfree(srp->rq->cmd);
+
blk_end_request_all(srp->rq, -EIO);
+ srp->rq = NULL;
+ }
+
sg_finish_rem_req(srp);
return -ENODEV;
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 1ac9943cbb93..c1f23abd754a 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -855,6 +855,7 @@ static void get_capabilities(struct scsi_cd *cd)
unsigned char *buffer;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
+ unsigned int ms_len = 128;
int rc, n;
static const char *loadmech[] =
@@ -881,10 +882,11 @@ static void get_capabilities(struct scsi_cd *cd)
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
/* ask for mode page 0x2a */
- rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);
- if (!scsi_status_is_good(rc)) {
+ if (!scsi_status_is_good(rc) || data.length > ms_len ||
+ data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 913b91c78a22..58d898cdff0a 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -204,6 +204,7 @@ enum storvsc_request_type {
#define SRB_STATUS_SUCCESS 0x01
#define SRB_STATUS_ABORTED 0x02
#define SRB_STATUS_ERROR 0x04
+#define SRB_STATUS_DATA_OVERRUN 0x12
/*
* This is the end of Protocol specific defines.
@@ -795,6 +796,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
switch (vm_srb->srb_status) {
case SRB_STATUS_ERROR:
/*
+ * Let upper layer deal with error when
+ * sense message is present.
+ */
+
+ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
+ break;
+ /*
* If there is an error; offline the device since all
* error recovery strategies would have already been
* deployed on the host side. However, if the command
@@ -859,6 +867,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
struct stor_mem_pools *memp = scmnd->device->hostdata;
+ u32 data_transfer_length;
struct Scsi_Host *host;
struct storvsc_device *stor_dev;
struct hv_device *dev = host_dev->dev;
@@ -867,6 +876,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
host = stor_dev->host;
vm_srb = &cmd_request->vstor_packet.vm_srb;
+ data_transfer_length = vm_srb->data_transfer_length;
if (cmd_request->bounce_sgl_count) {
if (vm_srb->data_in == READ_TYPE)
copy_from_bounce_buffer(scsi_sglist(scmnd),
@@ -885,13 +895,20 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
scsi_print_sense_hdr("storvsc", &sense_hdr);
}
- if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
+ if (vm_srb->srb_status != SRB_STATUS_SUCCESS) {
storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
sense_hdr.ascq);
+ /*
+ * The Windows driver set data_transfer_length on
+ * SRB_STATUS_DATA_OVERRUN. On other errors, this value
+ * is untouched. In these cases we set it to 0.
+ */
+ if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN)
+ data_transfer_length = 0;
+ }
scsi_set_resid(scmnd,
- cmd_request->data_buffer.len -
- vm_srb->data_transfer_length);
+ cmd_request->data_buffer.len - data_transfer_length);
scsi_done_fn = scmnd->scsi_done;
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index a8dc95ebf2d6..7700cef5e177 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -846,6 +846,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
if (err) {
ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n",
err);
+ goto out_free;
} else {
ssb_dbg("Using SPROM revision %d provided by platform\n",
sprom->revision);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 30be6c9bdbc6..ff3ca598e539 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -806,22 +806,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
/*
- * The GlobalSAN iSCSI Initiator for MacOSX does
- * not respond to MaxBurstLength, FirstBurstLength,
- * DefaultTime2Wait or DefaultTime2Retain parameter keys.
- * So, we set them to 'reply optional' here, and assume the
- * the defaults from iscsi_parameters.h if the initiator
- * is not RFC compliant and the keys are not negotiated.
- */
- if (!strcmp(param->name, MAXBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, FIRSTBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2WAIT))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2RETAIN))
- SET_PSTATE_REPLY_OPTIONAL(param);
- /*
* Required for gPXE iSCSI boot client
*/
if (!strcmp(param->name, MAXCONNECTIONS))
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 016e882356d6..eeeea38d4b2e 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -722,21 +722,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
{
struct se_cmd *se_cmd = NULL;
int rc;
+ bool op_scsi = false;
/*
* Determine if a struct se_cmd is associated with
* this struct iscsi_cmd.
*/
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
- se_cmd = &cmd->se_cmd;
- __iscsit_free_cmd(cmd, true, shutdown);
+ op_scsi = true;
/*
* Fallthrough
*/
case ISCSI_OP_SCSI_TMFUNC:
- rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
- if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
- __iscsit_free_cmd(cmd, true, shutdown);
+ se_cmd = &cmd->se_cmd;
+ __iscsit_free_cmd(cmd, op_scsi, shutdown);
+ rc = transport_generic_free_cmd(se_cmd, shutdown);
+ if (!rc && shutdown && se_cmd->se_sess) {
+ __iscsit_free_cmd(cmd, op_scsi, shutdown);
target_put_sess_cmd(se_cmd->se_sess, se_cmd);
}
break;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 244776bec1c7..79fed114a2e0 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -157,7 +157,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
buf = kzalloc(12, GFP_KERNEL);
if (!buf)
- return;
+ goto out_free;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = MODE_SENSE;
@@ -172,9 +172,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
* If MODE_SENSE still returns zero, set the default value to 1024.
*/
sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+out_free:
if (!sdev->sector_size)
sdev->sector_size = 1024;
-out_free:
+
kfree(buf);
}
@@ -317,9 +318,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
sd->lun, sd->queue_depth);
}
- dev->dev_attrib.hw_block_size = sd->sector_size;
+ dev->dev_attrib.hw_block_size =
+ min_not_zero((int)sd->sector_size, 512);
dev->dev_attrib.hw_max_sectors =
- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ min_not_zero((unsigned)sd->host->max_sectors, queue_max_hw_sectors(q));
dev->dev_attrib.hw_queue_depth = sd->queue_depth;
/*
@@ -342,8 +344,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
/*
* For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
*/
- if (sd->type == TYPE_TAPE)
+ if (sd->type == TYPE_TAPE) {
pscsi_tape_read_blocksize(dev, sd);
+ dev->dev_attrib.hw_block_size = sd->sector_size;
+ }
return 0;
}
@@ -409,7 +413,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -436,28 +440,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
return 0;
}
-/*
- * Called with struct Scsi_Host->host_lock called.
- */
-static int pscsi_create_type_other(struct se_device *dev,
- struct scsi_device *sd)
- __releases(sh->host_lock)
-{
- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
- struct Scsi_Host *sh = sd->host;
- int ret;
-
- spin_unlock_irq(sh->host_lock);
- ret = pscsi_add_device_to_list(dev, sd);
- if (ret)
- return ret;
-
- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
- sd->channel, sd->id, sd->lun);
- return 0;
-}
-
static int pscsi_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
@@ -545,11 +527,8 @@ static int pscsi_configure_device(struct se_device *dev)
case TYPE_DISK:
ret = pscsi_create_type_disk(dev, sd);
break;
- case TYPE_ROM:
- ret = pscsi_create_type_rom(dev, sd);
- break;
default:
- ret = pscsi_create_type_other(dev, sd);
+ ret = pscsi_create_type_nondisk(dev, sd);
break;
}
@@ -606,8 +585,7 @@ static void pscsi_free_device(struct se_device *dev)
else if (pdv->pdv_lld_host)
scsi_host_put(pdv->pdv_lld_host);
- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
- scsi_device_put(sd);
+ scsi_device_put(sd);
pdv->pdv_sd = NULL;
}
@@ -1125,7 +1103,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects;
- dump_stack();
return 0;
}
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index d6080c3831ef..ce2e5d508fe7 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -823,7 +823,7 @@ static int receive_data(enum port_type index, struct nozomi *dc)
struct tty_struct *tty = tty_port_tty_get(&port->port);
int i, ret;
- read_mem32((u32 *) &size, addr, 4);
+ size = __le32_to_cpu(readl(addr));
/* DBG1( "%d bytes port: %d", size, index); */
if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 98b8423793fd..9243dd729dd4 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -55,6 +55,7 @@ struct serial_private {
unsigned int nr;
void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES];
struct pci_serial_quirk *quirk;
+ const struct pciserial_board *board;
int line[0];
};
@@ -3374,6 +3375,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
}
}
priv->nr = i;
+ priv->board = board;
return priv;
err_deinit:
@@ -3384,7 +3386,7 @@ err_out:
}
EXPORT_SYMBOL_GPL(pciserial_init_ports);
-void pciserial_remove_ports(struct serial_private *priv)
+void pciserial_detach_ports(struct serial_private *priv)
{
struct pci_serial_quirk *quirk;
int i;
@@ -3404,7 +3406,11 @@ void pciserial_remove_ports(struct serial_private *priv)
quirk = find_quirk(priv->dev);
if (quirk->exit)
quirk->exit(priv->dev);
+}
+void pciserial_remove_ports(struct serial_private *priv)
+{
+ pciserial_detach_ports(priv);
kfree(priv);
}
EXPORT_SYMBOL_GPL(pciserial_remove_ports);
@@ -4943,7 +4949,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev,
return PCI_ERS_RESULT_DISCONNECT;
if (priv)
- pciserial_suspend_ports(priv);
+ pciserial_detach_ports(priv);
pci_disable_device(dev);
@@ -4968,9 +4974,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
static void serial8250_io_resume(struct pci_dev *dev)
{
struct serial_private *priv = pci_get_drvdata(dev);
+ const struct pciserial_board *board;
- if (priv)
- pciserial_resume_ports(priv);
+ if (!priv)
+ return;
+
+ board = priv->board;
+ kfree(priv);
+ priv = pciserial_init_ports(dev, board);
+
+ if (!IS_ERR(priv)) {
+ pci_set_drvdata(dev, priv);
+ }
}
static const struct pci_error_handlers serial8250_err_handler = {
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index b11e99797fd8..876ce8823edc 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -930,6 +930,7 @@ static struct of_device_id msm_match_table[] = {
{ .compatible = "qcom,msm-uart" },
{}
};
+MODULE_DEVICE_TABLE(of, msm_match_table);
static struct platform_driver msm_platform_driver = {
.remove = msm_serial_remove,
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index b51c15408ff3..602c9a74bec6 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -881,8 +881,8 @@ static const struct input_device_id sysrq_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT,
- .evbit = { BIT_MASK(EV_KEY) },
- .keybit = { BIT_MASK(KEY_LEFTALT) },
+ .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
},
{ },
};
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index b364845de5ad..802df033e24c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -543,19 +543,18 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
acm->control->needs_remote_wakeup = 1;
acm->ctrlurb->dev = acm->dev;
- if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
+ retval = usb_submit_urb(acm->ctrlurb, GFP_KERNEL);
+ if (retval) {
dev_err(&acm->control->dev,
"%s - usb_submit_urb(ctrl irq) failed\n", __func__);
goto error_submit_urb;
}
acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
- if (acm_set_control(acm, acm->ctrlout) < 0 &&
- (acm->ctrl_caps & USB_CDC_CAP_LINE))
+ retval = acm_set_control(acm, acm->ctrlout);
+ if (retval < 0 && (acm->ctrl_caps & USB_CDC_CAP_LINE))
goto error_set_control;
- usb_autopm_put_interface(acm->control);
-
/*
* Unthrottle device in case the TTY was closed while throttled.
*/
@@ -564,9 +563,12 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
acm->throttle_req = 0;
spin_unlock_irq(&acm->read_lock);
- if (acm_submit_read_urbs(acm, GFP_KERNEL))
+ retval = acm_submit_read_urbs(acm, GFP_KERNEL);
+ if (retval)
goto error_submit_read_urbs;
+ usb_autopm_put_interface(acm->control);
+
mutex_unlock(&acm->mutex);
return 0;
@@ -583,7 +585,8 @@ error_submit_urb:
error_get_interface:
disconnected:
mutex_unlock(&acm->mutex);
- return retval;
+
+ return usb_translate_errors(retval);
}
static void acm_port_destruct(struct tty_port *port)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4e5156d212dd..55a8e84469ec 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2579,8 +2579,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (ret < 0)
return ret;
- /* The port state is unknown until the reset completes. */
- if (!(portstatus & USB_PORT_STAT_RESET))
+ /*
+ * The port state is unknown until the reset completes.
+ *
+ * On top of that, some chips may require additional time
+ * to re-establish a connection after the reset is complete,
+ * so also wait for the connection to be re-established.
+ */
+ if (!(portstatus & USB_PORT_STAT_RESET) &&
+ (portstatus & USB_PORT_STAT_CONNECTION))
break;
/* switch to the long delay after two short delay failures */
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 5a2eaf401b00..8f96e7d1d4da 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -241,6 +241,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status)
{
struct dwc3 *dwc = dep->dwc;
+ unsigned int unmap_after_complete = false;
int i;
if (req->queued) {
@@ -265,11 +266,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- if (dwc->ep0_bounced && dep->number <= 1)
+ /*
+ * NOTICE we don't want to unmap before calling ->complete() if we're
+ * dealing with a bounced ep0 request. If we unmap it here, we would end
+ * up overwritting the contents of req->buf and this could confuse the
+ * gadget driver.
+ */
+ if (dwc->ep0_bounced && dep->number <= 1) {
dwc->ep0_bounced = false;
-
- usb_gadget_unmap_request(&dwc->gadget, &req->request,
- req->direction);
+ unmap_after_complete = true;
+ } else {
+ usb_gadget_unmap_request(&dwc->gadget,
+ &req->request, req->direction);
+ }
dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
req, dep->name, req->request.actual,
@@ -278,6 +287,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
spin_unlock(&dwc->lock);
req->request.complete(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
+
+ if (unmap_after_complete)
+ usb_gadget_unmap_request(&dwc->gadget,
+ &req->request, req->direction);
}
static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index b3f25c302e35..40ac1abe3ad4 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -48,23 +48,23 @@ struct dwc3;
#define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget))
/* DEPCFG parameter 1 */
-#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0)
+#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0)
#define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8)
#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9)
#define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10)
#define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11)
#define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13)
-#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16)
+#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
#define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24)
-#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25)
+#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
#define DWC3_DEPCFG_BULK_BASED (1 << 30)
#define DWC3_DEPCFG_FIFO_BASED (1 << 31)
/* DEPCFG parameter 0 */
-#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1)
-#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3)
-#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17)
-#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22)
+#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1)
+#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3)
+#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17)
+#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22)
#define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26)
/* This applies for core versions earlier than 1.94a */
#define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 584e43c9748f..a9142a46ae82 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -125,11 +125,16 @@ int config_ep_by_speed(struct usb_gadget *g,
ep_found:
/* commit results */
- _ep->maxpacket = usb_endpoint_maxp(chosen_desc);
+ _ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff;
_ep->desc = chosen_desc;
_ep->comp_desc = NULL;
_ep->maxburst = 0;
- _ep->mult = 0;
+ _ep->mult = 1;
+
+ if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
+ usb_endpoint_xfer_int(_ep->desc)))
+ _ep->mult = ((usb_endpoint_maxp(_ep->desc) & 0x1800) >> 11) + 1;
+
if (!want_comp_desc)
return 0;
@@ -146,7 +151,7 @@ ep_found:
switch (usb_endpoint_type(_ep->desc)) {
case USB_ENDPOINT_XFER_ISOC:
/* mult: bits 1:0 of bmAttributes */
- _ep->mult = comp_desc->bmAttributes & 0x3;
+ _ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
_ep->maxburst = comp_desc->bMaxBurst + 1;
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 3384486c2884..ff30171b6926 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm)
{
struct usb_composite_dev *cdev = acm->port.func.config->cdev;
int status;
+ __le16 serial_state;
spin_lock(&acm->lock);
if (acm->notify_req) {
DBG(cdev, "acm ttyGS%d serial state %04x\n",
acm->port_num, acm->serial_state);
+ serial_state = cpu_to_le16(acm->serial_state);
status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
- 0, &acm->serial_state, sizeof(acm->serial_state));
+ 0, &serial_state, sizeof(acm->serial_state));
} else {
acm->pending = true;
status = 0;
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 42a30903d4fd..c9e552bdf051 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1200,7 +1200,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
/* data and/or status stage for control request */
} else if (dev->state == STATE_DEV_SETUP) {
- /* IN DATA+STATUS caller makes len <= wLength */
+ len = min_t(size_t, len, dev->setup_wLength);
if (dev->setup_in) {
retval = setup_req (dev->gadget->ep0, dev->req, len);
if (retval == 0) {
@@ -1834,10 +1834,12 @@ static struct usb_gadget_driver probe_driver = {
* such as configuration notifications.
*/
-static int is_valid_config (struct usb_config_descriptor *config)
+static int is_valid_config(struct usb_config_descriptor *config,
+ unsigned int total)
{
return config->bDescriptorType == USB_DT_CONFIG
&& config->bLength == USB_DT_CONFIG_SIZE
+ && total >= USB_DT_CONFIG_SIZE
&& config->bConfigurationValue != 0
&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
@@ -1854,7 +1856,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
u32 tag;
char *kbuf;
- if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
+ if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
+ (len > PAGE_SIZE * 4))
return -EINVAL;
/* we might need to change message format someday */
@@ -1878,7 +1881,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
/* full or low speed config */
dev->config = (void *) kbuf;
total = le16_to_cpu(dev->config->wTotalLength);
- if (!is_valid_config (dev->config) || total >= length)
+ if (!is_valid_config(dev->config, total) ||
+ total > length - USB_DT_DEVICE_SIZE)
goto fail;
kbuf += total;
length -= total;
@@ -1887,10 +1891,13 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
if (kbuf [1] == USB_DT_CONFIG) {
dev->hs_config = (void *) kbuf;
total = le16_to_cpu(dev->hs_config->wTotalLength);
- if (!is_valid_config (dev->hs_config) || total >= length)
+ if (!is_valid_config(dev->hs_config, total) ||
+ total > length - USB_DT_DEVICE_SIZE)
goto fail;
kbuf += total;
length -= total;
+ } else {
+ dev->hs_config = NULL;
}
/* could support multiple configs, using another encoding! */
diff --git a/drivers/usb/gadget/uvc_video.c b/drivers/usb/gadget/uvc_video.c
index 71e896d4c5ae..43e8c65fd9ed 100644
--- a/drivers/usb/gadget/uvc_video.c
+++ b/drivers/usb/gadget/uvc_video.c
@@ -240,7 +240,7 @@ uvc_video_alloc_requests(struct uvc_video *video)
req_size = video->ep->maxpacket
* max_t(unsigned int, video->ep->maxburst, 1)
- * (video->ep->mult + 1);
+ * (video->ep->mult);
for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL);
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 0f228c46eeda..ad458ef4b7e9 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd)
if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
uhci->wait_for_hp = 1;
+ /* Intel controllers use non-PME wakeup signalling */
+ if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
+ device_set_run_wake(uhci_dev(uhci), 1);
+
/* Set up pointers to PCI-specific functions */
uhci->reset_hc = uhci_pci_reset_hc;
uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index b07e0754d784..f4348a9ff385 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -925,6 +925,40 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
xhci->devs[slot_id] = NULL;
}
+/*
+ * Free a virt_device structure.
+ * If the virt_device added a tt_info (a hub) and has children pointing to
+ * that tt_info, then free the child first. Recursive.
+ * We can't rely on udev at this point to find child-parent relationships.
+ */
+void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
+{
+ struct xhci_virt_device *vdev;
+ struct list_head *tt_list_head;
+ struct xhci_tt_bw_info *tt_info, *next;
+ int i;
+
+ vdev = xhci->devs[slot_id];
+ if (!vdev)
+ return;
+
+ tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+ /* is this a hub device that added a tt_info to the tts list */
+ if (tt_info->slot_id == slot_id) {
+ /* are any devices using this tt_info? */
+ for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+ vdev = xhci->devs[i];
+ if (vdev && (vdev->tt_info == tt_info))
+ xhci_free_virt_devices_depth_first(
+ xhci, i);
+ }
+ }
+ }
+ /* we are now at a leaf device */
+ xhci_free_virt_device(xhci, slot_id);
+}
+
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags)
{
@@ -1804,8 +1838,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
}
- for (i = 1; i < MAX_HC_SLOTS; ++i)
- xhci_free_virt_device(xhci, i);
+ for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
+ xhci_free_virt_devices_depth_first(xhci, i);
if (xhci->segment_pool)
dma_pool_destroy(xhci->segment_pool);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 2320e20d5be7..cae9881145f6 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -224,6 +224,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ xhci->xhc_state |= XHCI_STATE_REMOVING;
if (xhci->shared_hcd) {
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6e70ce976769..411db91b8305 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -174,6 +174,8 @@ static int xhci_plat_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ xhci->xhc_state |= XHCI_STATE_REMOVING;
+
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 507677b9bdc7..0e7dccc271db 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -139,7 +139,8 @@ static int xhci_start(struct xhci_hcd *xhci)
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (!ret)
- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+ /* clear state flags. Including dying, halted or removing */
+ xhci->xhc_state = 0;
return ret;
}
@@ -2693,7 +2694,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_DYING)
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_REMOVING))
return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index deb2537ae75c..15e796faa0a8 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1493,6 +1493,7 @@ struct xhci_hcd {
*/
#define XHCI_STATE_DYING (1 << 0)
#define XHCI_STATE_HALTED (1 << 1)
+#define XHCI_STATE_REMOVING (1 << 2)
/* Statistics */
int error_bitmask;
unsigned int quirks;
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 20814d528c15..2dd6830fdf7b 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1393,8 +1393,7 @@ static int download_fw(struct edgeport_serial *serial)
dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__);
- /* return an error on purpose */
- return -ENODEV;
+ return 1;
}
stayinbootmode:
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index dc55bc254c5c..a1d0fc476146 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -344,8 +344,9 @@ static long vfio_pci_ioctl(void *device_data,
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
+ size_t size;
u8 *data = NULL;
- int ret = 0;
+ int max, ret = 0;
minsz = offsetofend(struct vfio_irq_set, count);
@@ -353,23 +354,31 @@ static long vfio_pci_ioctl(void *device_data,
return -EFAULT;
if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
+ hdr.count >= (U32_MAX - hdr.start) ||
hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
VFIO_IRQ_SET_ACTION_TYPE_MASK))
return -EINVAL;
- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
- size_t size;
- int max = vfio_pci_get_irq_count(vdev, hdr.index);
+ max = vfio_pci_get_irq_count(vdev, hdr.index);
+ if (hdr.start >= max || hdr.start + hdr.count > max)
+ return -EINVAL;
- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
- size = sizeof(uint8_t);
- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
- size = sizeof(int32_t);
- else
- return -EINVAL;
+ switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
+ case VFIO_IRQ_SET_DATA_NONE:
+ size = 0;
+ break;
+ case VFIO_IRQ_SET_DATA_BOOL:
+ size = sizeof(uint8_t);
+ break;
+ case VFIO_IRQ_SET_DATA_EVENTFD:
+ size = sizeof(int32_t);
+ break;
+ default:
+ return -EINVAL;
+ }
- if (hdr.argsz - minsz < hdr.count * size ||
- hdr.start >= max || hdr.start + hdr.count > max)
+ if (size) {
+ if (hdr.argsz - minsz < hdr.count * size)
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 4bc704e1b7c7..bfe72a991fa6 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -468,7 +468,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
if (!is_irq_none(vdev))
return -EINVAL;
- vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
if (!vdev->ctx)
return -ENOMEM;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index a92783e480e6..ca55f93b0f62 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1196,6 +1196,8 @@ static void fbcon_free_font(struct display *p, bool freefont)
p->userfont = 0;
}
+static void set_vc_hi_font(struct vc_data *vc, bool set);
+
static void fbcon_deinit(struct vc_data *vc)
{
struct display *p = &fb_display[vc->vc_num];
@@ -1231,6 +1233,9 @@ finished:
if (free_font)
vc->vc_font.data = NULL;
+ if (vc->vc_hi_font_mask)
+ set_vc_hi_font(vc, false);
+
if (!con_is_bound(&fb_con))
fbcon_exit();
@@ -2466,32 +2471,10 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
return 0;
}
-static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
- const u8 * data, int userfont)
+/* set/clear vc_hi_font_mask and update vc attrs accordingly */
+static void set_vc_hi_font(struct vc_data *vc, bool set)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[vc->vc_num];
- int resize;
- int cnt;
- char *old_data = NULL;
-
- if (CON_IS_VISIBLE(vc) && softback_lines)
- fbcon_set_origin(vc);
-
- resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
- if (p->userfont)
- old_data = vc->vc_font.data;
- if (userfont)
- cnt = FNTCHARCNT(data);
- else
- cnt = 256;
- vc->vc_font.data = (void *)(p->fontdata = data);
- if ((p->userfont = userfont))
- REFCOUNT(data)++;
- vc->vc_font.width = w;
- vc->vc_font.height = h;
- if (vc->vc_hi_font_mask && cnt == 256) {
+ if (!set) {
vc->vc_hi_font_mask = 0;
if (vc->vc_can_do_color) {
vc->vc_complement_mask >>= 1;
@@ -2514,7 +2497,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
((c & 0xfe00) >> 1) | (c & 0xff);
vc->vc_attr >>= 1;
}
- } else if (!vc->vc_hi_font_mask && cnt == 512) {
+ } else {
vc->vc_hi_font_mask = 0x100;
if (vc->vc_can_do_color) {
vc->vc_complement_mask <<= 1;
@@ -2546,8 +2529,38 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
} else
vc->vc_video_erase_char = c & ~0x100;
}
-
}
+}
+
+static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ const u8 * data, int userfont)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct display *p = &fb_display[vc->vc_num];
+ int resize;
+ int cnt;
+ char *old_data = NULL;
+
+ if (CON_IS_VISIBLE(vc) && softback_lines)
+ fbcon_set_origin(vc);
+
+ resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
+ if (p->userfont)
+ old_data = vc->vc_font.data;
+ if (userfont)
+ cnt = FNTCHARCNT(data);
+ else
+ cnt = 256;
+ vc->vc_font.data = (void *)(p->fontdata = data);
+ if ((p->userfont = userfont))
+ REFCOUNT(data)++;
+ vc->vc_font.width = w;
+ vc->vc_font.height = h;
+ if (vc->vc_hi_font_mask && cnt == 256)
+ set_vc_hi_font(vc, false);
+ else if (!vc->vc_hi_font_mask && cnt == 512)
+ set_vc_hi_font(vc, true);
if (resize) {
int cols, rows;
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index cd005c227a23..d026bbb4e501 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
break;
case XenbusStateInitWait:
-InitWait:
xenbus_switch_state(dev, XenbusStateConnected);
break;
@@ -654,7 +653,8 @@ InitWait:
* get Connected twice here.
*/
if (dev->state != XenbusStateConnected)
- goto InitWait; /* no InitWait seen yet, fudge it */
+ /* no InitWait seen yet, fudge it */
+ xenbus_switch_state(dev, XenbusStateConnected);
if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
"request-update", "%d", &val) < 0)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 148e8ea1bc96..3d42cde02864 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -349,6 +349,8 @@ static int init_vqs(struct virtio_balloon *vb)
* Prime this virtqueue with one buffer so the hypervisor can
* use it to signal us later.
*/
+ update_balloon_stats(vb);
+
sg_init_one(&sg, vb->stats, sizeof vb->stats);
if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
< 0)
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 8ca1030675a6..131501b3b11b 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -464,7 +464,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
- *pci_base = (dma_addr_t)vme_base + pci_offset;
+ *pci_base = (dma_addr_t)*vme_base + pci_offset;
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
*enabled = 0;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 019fc5a68a14..f26f38ccd194 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1843,14 +1843,6 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_delayed_node *delayed_node;
int ret = 0;
- /*
- * we don't do delayed inode updates during log recovery because it
- * leads to enospc problems. This means we also can't do
- * delayed inode refs
- */
- if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
- return -EAGAIN;
-
delayed_node = btrfs_get_or_create_delayed_node(inode);
if (IS_ERR(delayed_node))
return PTR_ERR(delayed_node);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index be7e31a933e5..7f0d9be6e9bc 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4661,11 +4661,20 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
lock_page(page);
}
locked_pages++;
+ }
+ /*
+ * We need to firstly lock all pages to make sure that
+ * the uptodate bit of our pages won't be affected by
+ * clear_extent_buffer_uptodate().
+ */
+ for (i = start_i; i < num_pages; i++) {
+ page = eb->pages[i];
if (!PageUptodate(page)) {
num_reads++;
all_uptodate = 0;
}
}
+
if (all_uptodate) {
if (start_i == 0)
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 37e4a72a7d1c..ae4e35bdc2cd 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -45,6 +45,9 @@
#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
#define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
#define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
+#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible
+ * root mountable
+ */
struct cifs_sb_info {
struct rb_root tlink_tree;
@@ -65,5 +68,6 @@ struct cifs_sb_info {
char *mountdata; /* options received at mount time or via DFS refs */
struct backing_dev_info bdi;
struct delayed_work prune_tlinks;
+ char *prepath;
};
#endif /* _CIFS_FS_SB_H */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 3752b9f6d9e4..e4e2152b7888 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -565,6 +565,9 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
char *s, *p;
char sep;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ return dget(sb->s_root);
+
full_path = cifs_build_path_to_root(vol, cifs_sb,
cifs_sb_master_tcon(cifs_sb));
if (full_path == NULL)
@@ -644,10 +647,14 @@ cifs_do_mount(struct file_system_type *fs_type,
cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
if (cifs_sb->mountdata == NULL) {
root = ERR_PTR(-ENOMEM);
- goto out_cifs_sb;
+ goto out_free;
}
- cifs_setup_cifs_sb(volume_info, cifs_sb);
+ rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
+ if (rc) {
+ root = ERR_PTR(rc);
+ goto out_free;
+ }
rc = cifs_mount(cifs_sb, volume_info);
if (rc) {
@@ -655,7 +662,7 @@ cifs_do_mount(struct file_system_type *fs_type,
cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
rc);
root = ERR_PTR(rc);
- goto out_mountdata;
+ goto out_free;
}
mnt_data.vol = volume_info;
@@ -698,9 +705,9 @@ out:
cifs_cleanup_volume_info(volume_info);
return root;
-out_mountdata:
+out_free:
+ kfree(cifs_sb->prepath);
kfree(cifs_sb->mountdata);
-out_cifs_sb:
kfree(cifs_sb);
out_nls:
unload_nls(volume_info->local_nls);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index f74dfa89c4c4..b7f589918571 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -576,6 +576,8 @@ struct TCP_Server_Info {
#ifdef CONFIG_CIFS_SMB2
unsigned int max_read;
unsigned int max_write;
+ struct delayed_work reconnect; /* reconnect workqueue job */
+ struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
#endif /* CONFIG_CIFS_SMB2 */
};
@@ -750,6 +752,7 @@ cap_unix(struct cifs_ses *ses)
struct cifs_tcon {
struct list_head tcon_list;
int tc_count;
+ struct list_head rlist; /* reconnect list */
struct list_head openFileList;
struct cifs_ses *ses; /* pointer to session associated with */
char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
@@ -823,7 +826,6 @@ struct cifs_tcon {
bool need_reconnect:1; /* connection reset, tid now invalid */
#ifdef CONFIG_CIFS_SMB2
bool print:1; /* set if connection to printer share */
- bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
__u32 capabilities;
__u32 share_flags;
__u32 maximal_access;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index dda188a94332..871a30966736 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -174,7 +174,7 @@ extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
extern int cifs_readv_from_socket(struct TCP_Server_Info *server,
struct kvec *iov_orig, unsigned int nr_segs,
unsigned int to_read);
-extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb);
extern int cifs_match_super(struct super_block *, void *);
extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info);
@@ -194,6 +194,9 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid,
struct tcon_link *tlink,
struct cifs_pending_open *open);
extern void cifs_del_pending_open(struct cifs_pending_open *open);
+extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
+ int from_reconnect);
+extern void cifs_put_tcon(struct cifs_tcon *tcon);
#if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)
extern void cifs_dfs_release_automount_timer(void);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 7c33afd7d5d3..417ce0a497f4 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -52,6 +52,9 @@
#include "nterr.h"
#include "rfc1002pdu.h"
#include "fscache.h"
+#ifdef CONFIG_CIFS_SMB2
+#include "smb2proto.h"
+#endif
#define CIFS_PORT 445
#define RFC1001_PORT 139
@@ -2070,8 +2073,8 @@ cifs_find_tcp_session(struct smb_vol *vol)
return NULL;
}
-static void
-cifs_put_tcp_session(struct TCP_Server_Info *server)
+void
+cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
{
struct task_struct *task;
@@ -2088,6 +2091,19 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
cancel_delayed_work_sync(&server->echo);
+#ifdef CONFIG_CIFS_SMB2
+ if (from_reconnect)
+ /*
+ * Avoid deadlock here: reconnect work calls
+ * cifs_put_tcp_session() at its end. Need to be sure
+ * that reconnect work does nothing with server pointer after
+ * that step.
+ */
+ cancel_delayed_work(&server->reconnect);
+ else
+ cancel_delayed_work_sync(&server->reconnect);
+#endif
+
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
@@ -2158,6 +2174,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
+#ifdef CONFIG_CIFS_SMB2
+ INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
+ mutex_init(&tcp_ses->reconnect_mutex);
+#endif
memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr,
sizeof(tcp_ses->srcaddr));
memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
@@ -2288,7 +2308,7 @@ cifs_put_smb_ses(struct cifs_ses *ses)
_free_xid(xid);
}
sesInfoFree(ses);
- cifs_put_tcp_session(server);
+ cifs_put_tcp_session(server, 0);
}
#ifdef CONFIG_KEYS
@@ -2461,7 +2481,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
mutex_unlock(&ses->session_mutex);
/* existing SMB ses has a server reference already */
- cifs_put_tcp_session(server);
+ cifs_put_tcp_session(server, 0);
free_xid(xid);
return ses;
}
@@ -2550,7 +2570,7 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc)
return NULL;
}
-static void
+void
cifs_put_tcon(struct cifs_tcon *tcon)
{
unsigned int xid;
@@ -2715,6 +2735,24 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
return 1;
}
+static int
+match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+{
+ struct cifs_sb_info *old = CIFS_SB(sb);
+ struct cifs_sb_info *new = mnt_data->cifs_sb;
+
+ if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) {
+ if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH))
+ return 0;
+ /* The prepath should be null terminated strings */
+ if (strcmp(new->prepath, old->prepath))
+ return 0;
+
+ return 1;
+ }
+ return 0;
+}
+
int
cifs_match_super(struct super_block *sb, void *data)
{
@@ -2742,7 +2780,8 @@ cifs_match_super(struct super_block *sb, void *data)
if (!match_server(tcp_srv, volume_info) ||
!match_session(ses, volume_info) ||
- !match_tcon(tcon, volume_info->UNC)) {
+ !match_tcon(tcon, volume_info->UNC) ||
+ !match_prepath(sb, mnt_data)) {
rc = 0;
goto out;
}
@@ -3158,7 +3197,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
}
}
-void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb)
{
INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
@@ -3240,6 +3279,15 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
+
+
+ if (pvolume_info->prepath) {
+ cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL);
+ if (cifs_sb->prepath == NULL)
+ return -ENOMEM;
+ }
+
+ return 0;
}
static void
@@ -3410,6 +3458,44 @@ cifs_get_volume_info(char *mount_data, const char *devname)
return volume_info;
}
+static int
+cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
+ unsigned int xid,
+ struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+ char *full_path)
+{
+ int rc;
+ char *s;
+ char sep, tmp;
+
+ sep = CIFS_DIR_SEP(cifs_sb);
+ s = full_path;
+
+ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
+ while (rc == 0) {
+ /* skip separators */
+ while (*s == sep)
+ s++;
+ if (!*s)
+ break;
+ /* next separator */
+ while (*s && *s != sep)
+ s++;
+
+ /*
+ * temporarily null-terminate the path at the end of
+ * the current component
+ */
+ tmp = *s;
+ *s = 0;
+ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
+ full_path);
+ *s = tmp;
+ }
+ return rc;
+}
+
int
cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
{
@@ -3536,6 +3622,17 @@ remote_path_check:
kfree(full_path);
goto mount_fail_check;
}
+ if (rc != -EREMOTE) {
+ rc = cifs_are_all_path_components_accessible(server,
+ xid, tcon, cifs_sb,
+ full_path);
+ if (rc != 0) {
+ cifs_dbg(VFS, "cannot query dirs between root and final path, "
+ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ rc = 0;
+ }
+ }
kfree(full_path);
}
@@ -3599,7 +3696,7 @@ mount_fail_check:
else if (ses)
cifs_put_smb_ses(ses);
else
- cifs_put_tcp_session(server);
+ cifs_put_tcp_session(server, 0);
bdi_destroy(&cifs_sb->bdi);
}
@@ -3793,6 +3890,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
bdi_destroy(&cifs_sb->bdi);
kfree(cifs_sb->mountdata);
+ kfree(cifs_sb->prepath);
unload_nls(cifs_sb->local_nls);
kfree(cifs_sb);
}
@@ -3932,7 +4030,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
if (IS_ERR(ses)) {
tcon = (struct cifs_tcon *)ses;
- cifs_put_tcp_session(master_tcon->ses->server);
+ cifs_put_tcp_session(master_tcon->ses->server, 0);
goto out;
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index a998c929286f..543124703e05 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -83,6 +83,7 @@ build_path_from_dentry(struct dentry *direntry)
struct dentry *temp;
int namelen;
int dfsplen;
+ int pplen = 0;
char *full_path;
char dirsep;
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
@@ -94,8 +95,12 @@ build_path_from_dentry(struct dentry *direntry)
dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
else
dfsplen = 0;
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
+
cifs_bp_rename_retry:
- namelen = dfsplen;
+ namelen = dfsplen + pplen;
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
for (temp = direntry; !IS_ROOT(temp);) {
@@ -136,7 +141,7 @@ cifs_bp_rename_retry:
}
}
rcu_read_unlock();
- if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
+ if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) {
cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n",
namelen, dfsplen);
/* presumably this is only possible if racing with a rename
@@ -152,6 +157,17 @@ cifs_bp_rename_retry:
those safely to '/' if any are found in the middle of the prepath */
/* BB test paths to Windows with '/' in the midst of prepath */
+ if (pplen) {
+ int i;
+
+ cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
+ memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
+ full_path[dfsplen] = '\\';
+ for (i = 0; i < pplen-1; i++)
+ if (full_path[dfsplen+1+i] == '/')
+ full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
+ }
+
if (dfsplen) {
strncpy(full_path, tcon->treeName, dfsplen);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 54304ccae7e7..971e7bea5d80 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -895,12 +895,29 @@ struct inode *cifs_root_iget(struct super_block *sb)
struct inode *inode = NULL;
long rc;
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ char *path = NULL;
+ int len;
+
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ && cifs_sb->prepath) {
+ len = strlen(cifs_sb->prepath);
+ path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL);
+ if (path == NULL)
+ return ERR_PTR(-ENOMEM);
+ path[0] = '/';
+ memcpy(path+1, cifs_sb->prepath, len);
+ } else {
+ path = kstrdup("", GFP_KERNEL);
+ if (path == NULL)
+ return ERR_PTR(-ENOMEM);
+ }
xid = get_xid();
+ convert_delimiter(path, CIFS_DIR_SEP(cifs_sb));
if (tcon->unix_ext)
- rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
+ rc = cifs_get_inode_info_unix(&inode, path, sb, xid);
else
- rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
+ rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL);
if (!inode) {
inode = ERR_PTR(rc);
@@ -928,6 +945,7 @@ struct inode *cifs_root_iget(struct super_block *sb)
}
out:
+ kfree(path);
/* can not call macro free_xid here since in a void func
* TODO: This is no longer true
*/
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 610c6c24d41d..d97841e124ba 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -891,6 +891,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile)
return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
}
+static bool
+cifs_can_echo(struct TCP_Server_Info *server)
+{
+ if (server->tcpStatus == CifsGood)
+ return true;
+
+ return false;
+}
+
struct smb_version_operations smb1_operations = {
.send_cancel = send_nt_cancel,
.compare_fids = cifs_compare_fids,
@@ -923,6 +932,7 @@ struct smb_version_operations smb1_operations = {
.get_dfs_refer = CIFSGetDFSRefer,
.qfs_tcon = cifs_qfs_tcon,
.is_path_accessible = cifs_is_path_accessible,
+ .can_echo = cifs_can_echo,
.query_path_info = cifs_query_path_info,
.query_file_info = cifs_query_file_info,
.get_srv_inum = cifs_get_srv_inum,
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index d801f63cddd0..866caf1d2bea 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -266,7 +266,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
* and check it for zero before using.
*/
max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf;
- if (!max_buf) {
+ if (max_buf < sizeof(struct smb2_lock_element)) {
free_xid(xid);
return -EINVAL;
}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 9dd8c968d94e..04fd3946213f 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -254,7 +254,7 @@ out:
case SMB2_CHANGE_NOTIFY:
case SMB2_QUERY_INFO:
case SMB2_SET_INFO:
- return -EAGAIN;
+ rc = -EAGAIN;
}
unload_nls(nls_codepage);
return rc;
@@ -720,9 +720,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
else
return -EIO;
- if (tcon && tcon->bad_network_name)
- return -ENOENT;
-
unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
if (unc_path == NULL)
return -ENOMEM;
@@ -734,6 +731,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
return -EINVAL;
}
+ /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
+ if (tcon)
+ tcon->tid = 0;
+
rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
if (rc) {
kfree(unc_path);
@@ -809,8 +810,6 @@ tcon_exit:
tcon_error_exit:
if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
- if (tcon)
- tcon->bad_network_name = true;
}
goto tcon_exit;
}
@@ -1239,6 +1238,54 @@ smb2_echo_callback(struct mid_q_entry *mid)
add_credits(server, credits_received, CIFS_ECHO_OP);
}
+void smb2_reconnect_server(struct work_struct *work)
+{
+ struct TCP_Server_Info *server = container_of(work,
+ struct TCP_Server_Info, reconnect.work);
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon, *tcon2;
+ struct list_head tmp_list;
+ int tcon_exist = false;
+
+ /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
+ mutex_lock(&server->reconnect_mutex);
+
+ INIT_LIST_HEAD(&tmp_list);
+ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ if (tcon->need_reconnect) {
+ tcon->tc_count++;
+ list_add_tail(&tcon->rlist, &tmp_list);
+ tcon_exist = true;
+ }
+ }
+ }
+ /*
+ * Get the reference to server struct to be sure that the last call of
+ * cifs_put_tcon() in the loop below won't release the server pointer.
+ */
+ if (tcon_exist)
+ server->srv_count++;
+
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
+ smb2_reconnect(SMB2_ECHO, tcon);
+ list_del_init(&tcon->rlist);
+ cifs_put_tcon(tcon);
+ }
+
+ cifs_dbg(FYI, "Reconnecting tcons finished\n");
+ mutex_unlock(&server->reconnect_mutex);
+
+ /* now we can safely release srv struct */
+ if (tcon_exist)
+ cifs_put_tcp_session(server, 1);
+}
+
int
SMB2_echo(struct TCP_Server_Info *server)
{
@@ -1251,32 +1298,11 @@ SMB2_echo(struct TCP_Server_Info *server)
cifs_dbg(FYI, "In echo request\n");
if (server->tcpStatus == CifsNeedNegotiate) {
- struct list_head *tmp, *tmp2;
- struct cifs_ses *ses;
- struct cifs_tcon *tcon;
-
- cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
- spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
- list_for_each(tmp2, &ses->tcon_list) {
- tcon = list_entry(tmp2, struct cifs_tcon,
- tcon_list);
- /* add check for persistent handle reconnect */
- if (tcon && tcon->need_reconnect) {
- spin_unlock(&cifs_tcp_ses_lock);
- rc = smb2_reconnect(SMB2_ECHO, tcon);
- spin_lock(&cifs_tcp_ses_lock);
- }
- }
- }
- spin_unlock(&cifs_tcp_ses_lock);
+ /* No need to send echo on newly established connections */
+ queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
+ return rc;
}
- /* if no session, renegotiate failed above */
- if (server->tcpStatus == CifsNeedNegotiate)
- return -EIO;
-
rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
if (rc)
return rc;
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 2aa3535e38ce..d0cd166ac887 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -93,6 +93,7 @@ extern void smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
extern int smb2_unlock_range(struct cifsFileInfo *cfile,
struct file_lock *flock, const unsigned int xid);
extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
+extern void smb2_reconnect_server(struct work_struct *work);
/*
* SMB2 Worker functions - most of protocol specific implementation details
diff --git a/fs/dcache.c b/fs/dcache.c
index 2d0b9d2f3c43..f4fd9651421c 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2405,6 +2405,12 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
dentry->d_parent = dentry;
list_del_init(&dentry->d_child);
anon->d_parent = dparent;
+ if (likely(!d_unhashed(anon))) {
+ hlist_bl_lock(&anon->d_sb->s_anon);
+ __hlist_bl_del(&anon->d_hash);
+ anon->d_hash.pprev = NULL;
+ hlist_bl_unlock(&anon->d_sb->s_anon);
+ }
list_move(&anon->d_child, &dparent->d_subdirs);
write_seqcount_end(&dentry->d_seq);
@@ -2459,7 +2465,6 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
* could splice into our tree? */
__d_materialise_dentry(dentry, alias);
write_sequnlock(&rename_lock);
- __d_drop(alias);
goto found;
} else {
/* Nope, but we must(!) avoid directory
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5fb975495c2d..1095d77c2a9d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -73,10 +73,9 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
csum_size);
offset += csum_size;
- csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
- EXT4_INODE_SIZE(inode->i_sb) -
- offset);
}
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ EXT4_INODE_SIZE(inode->i_sb) - offset);
}
return csum;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 83ed61a6cfcb..cba1fc678eec 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3063,6 +3063,13 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
if (ar->pright && start + size - 1 >= ar->lright)
size -= start + size - ar->lright;
+ /*
+ * Trim allocation request for filesystems with artificially small
+ * groups.
+ */
+ if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
+ size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
+
end = start + size;
/* check we don't cross already preallocated blocks */
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index faa192087033..1fe383f22ab1 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -753,6 +753,7 @@ static void ext4_put_super(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
+ int aborted = 0;
int i, err;
ext4_unregister_li_request(sb);
@@ -762,9 +763,10 @@ static void ext4_put_super(struct super_block *sb)
destroy_workqueue(sbi->dio_unwritten_wq);
if (sbi->s_journal) {
+ aborted = is_journal_aborted(sbi->s_journal);
err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
- if (err < 0)
+ if ((err < 0) && !aborted)
ext4_abort(sb, "Couldn't clean up the journal");
}
@@ -775,7 +777,7 @@ static void ext4_put_super(struct super_block *sb)
ext4_ext_release(sb);
ext4_xattr_put_super(sb);
- if (!(sb->s_flags & MS_RDONLY)) {
+ if (!(sb->s_flags & MS_RDONLY) && !aborted) {
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
es->s_state = cpu_to_le16(sbi->s_mount_state);
}
@@ -3185,10 +3187,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
ext4_set_bit(s++, buf);
count++;
}
- for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
- ext4_set_bit(EXT4_B2C(sbi, s++), buf);
- count++;
+ j = ext4_bg_num_gdb(sb, grp);
+ if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
+ ext4_error(sb, "Invalid number of block group "
+ "descriptor blocks: %d", j);
+ j = EXT4_BLOCKS_PER_GROUP(sb) - s;
}
+ count += j;
+ for (; j > 0; j--)
+ ext4_set_bit(EXT4_B2C(sbi, s++), buf);
}
if (!count)
return 0;
@@ -3291,7 +3298,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
char *orig_data = kstrdup(data, GFP_KERNEL);
struct buffer_head *bh;
struct ext4_super_block *es = NULL;
- struct ext4_sb_info *sbi;
+ struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block;
@@ -3311,16 +3318,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
ext4_group_t first_not_zeroed;
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- goto out_free_orig;
+ if ((data && !orig_data) || !sbi)
+ goto out_free_base;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
- if (!sbi->s_blockgroup_lock) {
- kfree(sbi);
- goto out_free_orig;
- }
+ if (!sbi->s_blockgroup_lock)
+ goto out_free_base;
+
sb->s_fs_info = sbi;
sbi->s_sb = sb;
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
@@ -3463,11 +3468,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
*/
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
- if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
- &journal_devnum, &journal_ioprio, 0)) {
- ext4_msg(sb, KERN_WARNING,
- "failed to parse options in superblock: %s",
- sbi->s_es->s_mount_opts);
+ if (sbi->s_es->s_mount_opts[0]) {
+ char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
+ sizeof(sbi->s_es->s_mount_opts),
+ GFP_KERNEL);
+ if (!s_mount_opts)
+ goto failed_mount;
+ if (!parse_options(s_mount_opts, sb, &journal_devnum,
+ &journal_ioprio, 0)) {
+ ext4_msg(sb, KERN_WARNING,
+ "failed to parse options in superblock: %s",
+ s_mount_opts);
+ }
+ kfree(s_mount_opts);
}
sbi->s_def_mount_opt = sbi->s_mount_opt;
if (!parse_options((char *) data, sb, &journal_devnum,
@@ -3614,12 +3627,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
- if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
- goto cantfind_ext4;
sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0)
goto cantfind_ext4;
+ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+ sbi->s_inodes_per_group > blocksize * 8) {
+ ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
+ sbi->s_blocks_per_group);
+ goto failed_mount;
+ }
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
@@ -3703,13 +3720,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
sbi->s_cluster_ratio = clustersize / blocksize;
- if (sbi->s_inodes_per_group > blocksize * 8) {
- ext4_msg(sb, KERN_ERR,
- "#inodes per group too big: %lu",
- sbi->s_inodes_per_group);
- goto failed_mount;
- }
-
/* Do we have standard group size of clustersize * 8 blocks ? */
if (sbi->s_blocks_per_group == clustersize << 3)
set_opt2(sb, STD_GROUP_SIZE);
@@ -3769,6 +3779,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG)) {
+ if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
+ ext4_msg(sb, KERN_WARNING,
+ "first meta block group too large: %u "
+ "(group descriptor block count %u)",
+ le32_to_cpu(es->s_first_meta_bg), db_count);
+ goto failed_mount;
+ }
+ }
sbi->s_group_desc = ext4_kvmalloc(db_count *
sizeof(struct buffer_head *),
GFP_KERNEL);
@@ -3884,7 +3903,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
*/
if (!test_opt(sb, NOLOAD) &&
EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
- if (ext4_load_journal(sb, es, journal_devnum))
+ err = ext4_load_journal(sb, es, journal_devnum);
+ if (err)
goto failed_mount3;
} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
@@ -4099,7 +4119,9 @@ no_journal:
}
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
- "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
+ "Opts: %.*s%s%s", descr,
+ (int) sizeof(sbi->s_es->s_mount_opts),
+ sbi->s_es->s_mount_opts,
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
if (es->s_error_count)
@@ -4167,8 +4189,8 @@ failed_mount:
out_fail:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
+out_free_base:
kfree(sbi);
-out_free_orig:
kfree(orig_data);
return err ? err : ret;
}
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 5d4513cb1b3c..04708fab99aa 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1193,6 +1193,16 @@ out:
return 0;
}
+static void fat_dummy_inode_init(struct inode *inode)
+{
+ /* Initialize this dummy inode to work as no-op. */
+ MSDOS_I(inode)->mmu_private = 0;
+ MSDOS_I(inode)->i_start = 0;
+ MSDOS_I(inode)->i_logstart = 0;
+ MSDOS_I(inode)->i_attrs = 0;
+ MSDOS_I(inode)->i_pos = 0;
+}
+
static int fat_read_root(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
@@ -1503,12 +1513,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
fat_inode = new_inode(sb);
if (!fat_inode)
goto out_fail;
- MSDOS_I(fat_inode)->i_pos = 0;
+ fat_dummy_inode_init(fat_inode);
sbi->fat_inode = fat_inode;
fsinfo_inode = new_inode(sb);
if (!fsinfo_inode)
goto out_fail;
+ fat_dummy_inode_init(fsinfo_inode);
fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
sbi->fsinfo_inode = fsinfo_inode;
insert_inode_hash(fsinfo_inode);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index b631c9043460..9aaa6db3e4ba 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -763,7 +763,7 @@ static int get_first_leaf(struct gfs2_inode *dip, u32 index,
int error;
error = get_leaf_nr(dip, index, &leaf_no);
- if (!error)
+ if (!IS_ERR_VALUE(error))
error = get_leaf(dip, leaf_no, bh_out);
return error;
@@ -974,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
index = name->hash >> (32 - dip->i_depth);
error = get_leaf_nr(dip, index, &leaf_no);
- if (error)
+ if (IS_ERR_VALUE(error))
return error;
/* Get the old leaf block */
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 4e5f332f15d9..db7d89cea2ce 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -169,7 +169,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 31666c92b46a..563435684c3c 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -149,8 +149,10 @@ static int get_task_ioprio(struct task_struct *p)
if (ret)
goto out;
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
+ task_lock(p);
if (p->io_context)
ret = p->io_context->ioprio;
+ task_unlock(p);
out:
return ret;
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 21b828c713cc..54e958125c1f 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1655,7 +1655,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
__blist_del_buffer(list, jh);
jh->b_jlist = BJ_None;
- if (test_clear_buffer_jbddirty(bh))
+ if (transaction && is_journal_aborted(transaction->t_journal))
+ clear_buffer_jbddirty(bh);
+ else if (test_clear_buffer_jbddirty(bh))
mark_buffer_dirty(bh); /* Expose it to the VM */
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e093e73178b7..48038e7adb1e 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1435,6 +1435,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
switch (err) {
case -ENOENT:
d_add(dentry, NULL);
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
break;
case -EISDIR:
case -ENOTDIR:
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index a87a44f84113..f8bd4ea2a891 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -419,7 +419,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
*/
if (!PageUptodate(page)) {
unsigned pglen = nfs_page_length(page);
- unsigned end = offset + len;
+ unsigned end = offset + copied;
if (pglen == 0) {
zero_user_segments(page, 0, offset,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c2b89a1a403b..c1148e87d533 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4047,7 +4047,7 @@ out:
*/
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
- struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
+ struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
@@ -4061,13 +4061,9 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
.rpc_argp = &args,
.rpc_resp = &res,
};
- unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
+ unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
int ret = -ENOMEM, i;
- /* As long as we're doing a round trip to the server anyway,
- * let's be prepared for a page of acl data. */
- if (npages == 0)
- npages = 1;
if (npages > ARRAY_SIZE(pages))
return -ERANGE;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 8016892f3f05..879b56d2f722 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -627,6 +627,37 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr)
return nfserr;
}
+/*
+ * A write procedure can have a large argument, and a read procedure can
+ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
+ * reply that can both be larger than a page. The xdr code has taken
+ * advantage of this assumption to be a sloppy about bounds checking in
+ * some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that
+ * problem, we enforce these assumptions here:
+ */
+static bool nfs_request_too_big(struct svc_rqst *rqstp,
+ struct svc_procedure *proc)
+{
+ /*
+ * The ACL code has more careful bounds-checking and is not
+ * susceptible to this problem:
+ */
+ if (rqstp->rq_prog != NFS_PROGRAM)
+ return false;
+ /*
+ * Ditto NFSv4 (which can in theory have argument and reply both
+ * more than a page):
+ */
+ if (rqstp->rq_vers >= 4)
+ return false;
+ /* The reply will be small, we're OK: */
+ if (proc->pc_xdrressize > 0 &&
+ proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
+ return false;
+
+ return rqstp->rq_arg.len > PAGE_SIZE;
+}
+
int
nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
{
@@ -639,6 +670,11 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
rqstp->rq_vers, rqstp->rq_proc);
proc = rqstp->rq_procinfo;
+ if (nfs_request_too_big(rqstp, proc)) {
+ dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
+ *statp = rpc_garbage_args;
+ return 1;
+ }
/*
* Give the xdr decoder a chance to change this if it wants
* (necessary in the NFSv4.0 compound case)
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index b294deb27d17..cf88dd4f8f36 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3264,6 +3264,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
lockres->l_level, new_level);
+ /*
+ * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
+ * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
+ * we can recover correctly from node failure. Otherwise, we may get
+ * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
+ */
+ if (!ocfs2_is_o2cb_active() &&
+ lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
+ lvb = 1;
+
if (lvb)
dlm_flags |= DLM_LKF_VALBLK;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 496af7fd87d5..86ed0f4aefc4 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1104,6 +1104,7 @@ out:
int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
{
int status = 0, size_change;
+ int inode_locked = 0;
struct inode *inode = dentry->d_inode;
struct super_block *sb = inode->i_sb;
struct ocfs2_super *osb = OCFS2_SB(sb);
@@ -1149,6 +1150,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
mlog_errno(status);
goto bail_unlock_rw;
}
+ inode_locked = 1;
if (size_change && attr->ia_size != i_size_read(inode)) {
status = inode_newsize_ok(inode, attr->ia_size);
@@ -1229,7 +1231,10 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
bail_commit:
ocfs2_commit_trans(osb, handle);
bail_unlock:
- ocfs2_inode_unlock(inode, 1);
+ if (status) {
+ ocfs2_inode_unlock(inode, 1);
+ inode_locked = 0;
+ }
bail_unlock_rw:
if (size_change)
ocfs2_rw_unlock(inode, 1);
@@ -1245,6 +1250,8 @@ bail:
if (status < 0)
mlog_errno(status);
}
+ if (inode_locked)
+ ocfs2_inode_unlock(inode, 1);
return status;
}
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 39abf89697ed..88610b3cbc04 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
*/
static struct ocfs2_stack_plugin *active_stack;
+inline int ocfs2_is_o2cb_active(void)
+{
+ return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
+}
+EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
+
static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
{
struct ocfs2_stack_plugin *p;
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index 1ec56fdb8d0d..fa49d8a1dc7b 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -289,4 +289,7 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
+/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
+int ocfs2_is_o2cb_active(void);
+
#endif /* STACKGLUE_H */
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index b86db1236c7c..972cdc282b1a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -279,11 +279,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
- if (stack_guard_page_start(vma, start))
- start += PAGE_SIZE;
end = vma->vm_end;
- if (stack_guard_page_end(vma, end))
- end -= PAGE_SIZE;
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
start,
diff --git a/fs/splice.c b/fs/splice.c
index 2ffa7b0c62fd..ce6ffe94ba26 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -215,6 +215,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
buf->len = spd->partial[page_nr].len;
buf->private = spd->partial[page_nr].private;
buf->ops = spd->ops;
+ buf->flags = 0;
if (spd->flags & SPLICE_F_GIFT)
buf->flags |= PIPE_BUF_FLAG_GIFT;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index cfbb4c1b2f17..d738a7b842da 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -158,6 +158,12 @@ xfs_setfilesize(
rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
0, 1, _THIS_IP_);
+ /* we abort the update if there was an IO error */
+ if (ioend->io_error) {
+ xfs_trans_cancel(tp, 0);
+ return ioend->io_error;
+ }
+
xfs_ilock(ip, XFS_ILOCK_EXCL);
isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
if (!isize) {
@@ -213,14 +219,17 @@ xfs_end_io(
ioend->io_error = -EIO;
goto done;
}
- if (ioend->io_error)
- goto done;
/*
* For unwritten extents we need to issue transactions to convert a
* range to normal written extens after the data I/O has finished.
+ * Detecting and handling completion IO errors is done individually
+ * for each case as different cleanup operations need to be performed
+ * on error.
*/
if (ioend->io_type == XFS_IO_UNWRITTEN) {
+ if (ioend->io_error)
+ goto done;
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
ioend->io_size);
} else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 1b2472a46e46..8ff89db9e663 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -428,6 +428,7 @@ retry:
out_free_pages:
for (i = 0; i < bp->b_page_count; i++)
__free_page(bp->b_pages[i]);
+ bp->b_flags &= ~_XBF_PAGES;
return error;
}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 7cf5e4eafe28..8325cb234d96 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3382,6 +3382,7 @@ xlog_recover_clear_agi_bucket(
agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
offset = offsetof(xfs_agi_t, agi_unlinked) +
(sizeof(xfs_agino_t) * bucket);
+ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
xfs_trans_log_buf(tp, agibp, offset,
(offset + sizeof(xfs_agino_t) - 1));
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 9b4378af414c..eeb43c4816e5 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -40,8 +40,6 @@ struct inode;
struct dentry;
struct user_namespace;
-struct user_namespace *current_user_ns(void);
-
extern const kernel_cap_t __cap_empty_set;
extern const kernel_cap_t __cap_init_eff_set;
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 6c58dd7cb9ac..cd3fb73dc421 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -345,7 +345,10 @@ extern struct user_namespace init_user_ns;
#ifdef CONFIG_USER_NS
#define current_user_ns() (current_cred_xxx(user_ns))
#else
-#define current_user_ns() (&init_user_ns)
+static inline struct user_namespace *current_user_ns(void)
+{
+ return &init_user_ns;
+}
#endif
diff --git a/include/linux/log2.h b/include/linux/log2.h
index fd7ff3d91e6a..f38fae23bdac 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -16,12 +16,6 @@
#include <linux/bitops.h>
/*
- * deal with unrepresentable constant logarithms
- */
-extern __attribute__((const, noreturn))
-int ____ilog2_NaN(void);
-
-/*
* non-constant log of base 2 calculators
* - the arch may override these in asm/bitops.h if they can be implemented
* more efficiently than using fls() and fls64()
@@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
- (n) < 1 ? ____ilog2_NaN() : \
+ (n) < 2 ? 0 : \
(n) & (1ULL << 63) ? 63 : \
(n) & (1ULL << 62) ? 62 : \
(n) & (1ULL << 61) ? 61 : \
@@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
- (n) & (1ULL << 1) ? 1 : \
- (n) & (1ULL << 0) ? 0 : \
- ____ilog2_NaN() \
- ) : \
+ 1 ) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 55590f4fe110..d16f5243f952 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1069,34 +1069,6 @@ int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSDOWN) &&
- (vma->vm_start == addr) &&
- !vma_growsdown(vma->vm_prev, addr);
-}
-
-/* Is the vma a continuation of the stack vma below it? */
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
-}
-
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSUP) &&
- (vma->vm_end == addr) &&
- !vma_growsup(vma->vm_next, addr);
-}
-
extern pid_t
vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
@@ -1622,6 +1594,7 @@ unsigned long ra_submit(struct file_ra_state *ra,
struct address_space *mapping,
struct file *filp);
+extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
@@ -1650,6 +1623,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
return vma;
}
+static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+{
+ unsigned long vm_start = vma->vm_start;
+
+ if (vma->vm_flags & VM_GROWSDOWN) {
+ vm_start -= stack_guard_gap;
+ if (vm_start > vma->vm_start)
+ vm_start = 0;
+ }
+ return vm_start;
+}
+
+static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+{
+ unsigned long vm_end = vma->vm_end;
+
+ if (vma->vm_flags & VM_GROWSUP) {
+ vm_end += stack_guard_gap;
+ if (vm_end < vma->vm_end)
+ vm_end = -PAGE_SIZE;
+ }
+ return vm_end;
+}
+
static inline unsigned long vma_pages(struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 7b8fc73810ad..f2c785958165 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -223,7 +223,7 @@ enum nfsstat4 {
static inline bool seqid_mutating_err(u32 err)
{
- /* rfc 3530 section 8.1.5: */
+ /* See RFC 7530, section 9.1.7 */
switch (err) {
case NFS4ERR_STALE_CLIENTID:
case NFS4ERR_STALE_STATEID:
@@ -232,6 +232,7 @@ static inline bool seqid_mutating_err(u32 err)
case NFS4ERR_BADXDR:
case NFS4ERR_RESOURCE:
case NFS4ERR_NOFILEHANDLE:
+ case NFS4ERR_MOVED:
return false;
};
return true;
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index e52958d7c2d1..3018528bd1bf 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -158,5 +158,6 @@ struct can_filter {
};
#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
+#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
#endif /* CAN_H */
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
index b2cc0cd9c4d9..1a9de73e845d 100644
--- a/include/uapi/linux/packet_diag.h
+++ b/include/uapi/linux/packet_diag.h
@@ -63,7 +63,7 @@ struct packet_diag_mclist {
__u32 pdmc_count;
__u16 pdmc_type;
__u16 pdmc_alen;
- __u8 pdmc_addr[MAX_ADDR_LEN];
+ __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */
};
struct packet_diag_ring {
diff --git a/ipc/shm.c b/ipc/shm.c
index 08b14f69d6cf..ddfad445242c 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1041,8 +1041,8 @@ out_unlock1:
* "raddr" thing points to kernel space, and there has to be a wrapper around
* this.
*/
-long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
- unsigned long shmlba)
+long do_shmat(int shmid, char __user *shmaddr, int shmflg,
+ ulong *raddr, unsigned long shmlba)
{
struct shmid_kernel *shp;
unsigned long addr;
@@ -1063,8 +1063,13 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
goto out;
else if ((addr = (ulong)shmaddr)) {
if (addr & (shmlba - 1)) {
- if (shmflg & SHM_RND)
- addr &= ~(shmlba - 1); /* round down */
+ /*
+ * Round down to the nearest multiple of shmlba.
+ * For sane do_mmap_pgoff() parameters, avoid
+ * round downs that trigger nil-page and MAP_FIXED.
+ */
+ if ((shmflg & SHM_RND) && addr >= shmlba)
+ addr &= ~(shmlba - 1);
else
#ifndef __ARCH_FORCE_SHMLBA
if (addr & ~PAGE_MASK)
diff --git a/kernel/futex.c b/kernel/futex.c
index edc4beae4df1..3f7dd29f4d4d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2413,7 +2413,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
- struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
@@ -2494,6 +2493,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
+ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+ rt_mutex_unlock(&q.pi_state->pi_mutex);
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
@@ -2502,6 +2503,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
spin_unlock(q.lock_ptr);
}
} else {
+ struct rt_mutex *pi_mutex;
+
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
@@ -2525,18 +2528,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (res)
ret = (res < 0) ? res : 0;
+ /*
+ * If fixup_pi_state_owner() faulted and was unable to handle
+ * the fault, unlock the rt_mutex and return the fault to
+ * userspace.
+ */
+ if (ret && rt_mutex_owner(pi_mutex) == current)
+ rt_mutex_unlock(pi_mutex);
+
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
- /*
- * If fixup_pi_state_owner() faulted and was unable to handle the
- * fault, unlock the rt_mutex and return the fault to userspace.
- */
- if (ret == -EFAULT) {
- if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
- rt_mutex_unlock(pi_mutex);
- } else if (ret == -EINTR) {
+ if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
@@ -2902,4 +2906,4 @@ static int __init futex_init(void)
return 0;
}
-__initcall(futex_init);
+core_initcall(futex_init);
diff --git a/kernel/padata.c b/kernel/padata.c
index 072f4ee4eb89..0925ccf92c7f 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -190,19 +190,20 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
reorder = &next_queue->reorder;
+ spin_lock(&reorder->lock);
if (!list_empty(&reorder->list)) {
padata = list_entry(reorder->list.next,
struct padata_priv, list);
- spin_lock(&reorder->lock);
list_del_init(&padata->list);
atomic_dec(&pd->reorder_objects);
- spin_unlock(&reorder->lock);
pd->processed++;
+ spin_unlock(&reorder->lock);
goto out;
}
+ spin_unlock(&reorder->lock);
if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
padata = ERR_PTR(-ENODATA);
diff --git a/kernel/printk.c b/kernel/printk.c
index ee8f6be7d8a9..8acc98aafa6e 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1270,7 +1270,7 @@ static void call_console_drivers(int level, const char *text, size_t len)
{
struct console *con;
- trace_console(text, len);
+ trace_console_rcuidle(text, len);
if (level >= console_loglevel && !ignore_loglevel)
return;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 506e56ec56a9..0892cfa0fe34 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4745,7 +4745,8 @@ void show_state_filter(unsigned long state_filter)
touch_all_softlockup_watchdogs();
#ifdef CONFIG_SCHED_DEBUG
- sysrq_sched_debug_show();
+ if (!state_filter)
+ sysrq_sched_debug_show();
#endif
rcu_read_unlock();
/*
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4fd49fe1046d..430725da89d0 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2224,6 +2224,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
break;
if (neg)
continue;
+ val = convmul * val / convdiv;
if ((min && val < *min) || (max && val > *max))
continue;
*i = val;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 19ee339a1d0d..6f27814e1323 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -775,6 +775,9 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
int cpu = smp_processor_id();
+ if (!bc)
+ return;
+
/* Set it up only once ! */
if (bc->event_handler != tick_handle_oneshot_broadcast) {
int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
diff --git a/mm/filemap.c b/mm/filemap.c
index 725a10043244..72130787db0a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1123,6 +1123,11 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos,
cond_resched();
find_page:
+ if (fatal_signal_pending(current)) {
+ error = -EINTR;
+ goto out;
+ }
+
page = find_get_page(mapping, index);
if (!page) {
page_cache_sync_readahead(mapping,
diff --git a/mm/memory.c b/mm/memory.c
index 2ca2ee113ea2..8b4975d1f167 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1654,12 +1654,6 @@ no_page_table:
return page;
}
-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
-{
- return stack_guard_page_start(vma, addr) ||
- stack_guard_page_end(vma, addr+PAGE_SIZE);
-}
-
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
@@ -1827,11 +1821,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ret;
unsigned int fault_flags = 0;
- /* For mlock, just skip the stack guard page. */
- if (foll_flags & FOLL_MLOCK) {
- if (stack_guard_page(vma, start))
- goto next_page;
- }
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
@@ -3192,40 +3181,6 @@ out_release:
}
/*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
-{
- address &= PAGE_MASK;
- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
- struct vm_area_struct *prev = vma->vm_prev;
-
- /*
- * Is there a mapping abutting this one below?
- *
- * That's only ok if it's the same stack mapping
- * that has gotten split..
- */
- if (prev && prev->vm_end == address)
- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
- return expand_downwards(vma, address - PAGE_SIZE);
- }
- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
- struct vm_area_struct *next = vma->vm_next;
-
- /* As VM_GROWSDOWN but s/below/above/ */
- if (next && next->vm_start == address + PAGE_SIZE)
- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
- return expand_upwards(vma, address + PAGE_SIZE);
- }
- return 0;
-}
-
-/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -3244,10 +3199,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
- /* Check if we need to add a guard page to the stack */
- if (check_stack_guard_page(vma, address) < 0)
- return VM_FAULT_SIGSEGV;
-
/* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 7f1bf93fa87f..9deb93decc9a 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1205,7 +1205,7 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
}
/*
- * Confirm all pages in a range [start, end) is belongs to the same zone.
+ * Confirm all pages in a range [start, end) belong to the same zone.
*/
static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
{
@@ -1213,9 +1213,9 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
struct zone *zone = NULL;
struct page *page;
int i;
- for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
+ for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
pfn < end_pfn;
- pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
+ pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
/* Make sure the memory section is present first */
if (!present_section_nr(pfn_to_section_nr(pfn)))
continue;
@@ -1234,7 +1234,11 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
zone = page_zone(page);
}
}
- return 1;
+
+ if (zone)
+ return 1;
+ else
+ return 0;
}
/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b2061bb5af73..e57c967a1af0 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1537,7 +1537,6 @@ asmlinkage long compat_sys_get_mempolicy(int __user *policy,
asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
compat_ulong_t maxnode)
{
- long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
@@ -1546,14 +1545,13 @@ asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
- err = compat_get_bitmap(bm, nmask, nr_bits);
+ if (compat_get_bitmap(bm, nmask, nr_bits))
+ return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
- err |= copy_to_user(nm, bm, alloc_size);
+ if (copy_to_user(nm, bm, alloc_size))
+ return -EFAULT;
}
- if (err)
- return -EFAULT;
-
return sys_set_mempolicy(mode, nm, nr_bits+1);
}
@@ -1561,7 +1559,6 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
compat_ulong_t mode, compat_ulong_t __user *nmask,
compat_ulong_t maxnode, compat_ulong_t flags)
{
- long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
nodemask_t bm;
@@ -1570,14 +1567,13 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
+ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
+ return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
- err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
+ if (copy_to_user(nm, nodes_addr(bm), alloc_size))
+ return -EFAULT;
}
- if (err)
- return -EFAULT;
-
return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 70ff9b41c970..3c4e4d7ae54e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -263,6 +263,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
unsigned long rlim, retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *next;
unsigned long min_brk;
bool populate;
@@ -308,7 +309,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
}
/* Check against existing mmap mappings. */
- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+ next = find_vma(mm, oldbrk);
+ if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
/* Ok, looks good - let it rip. */
@@ -331,10 +333,22 @@ out:
static long vma_compute_subtree_gap(struct vm_area_struct *vma)
{
- unsigned long max, subtree_gap;
- max = vma->vm_start;
- if (vma->vm_prev)
- max -= vma->vm_prev->vm_end;
+ unsigned long max, prev_end, subtree_gap;
+
+ /*
+ * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
+ * allow two stack_guard_gaps between them here, and when choosing
+ * an unmapped area; whereas when expanding we only require one.
+ * That's a little inconsistent, but keeps the code here simpler.
+ */
+ max = vm_start_gap(vma);
+ if (vma->vm_prev) {
+ prev_end = vm_end_gap(vma->vm_prev);
+ if (max > prev_end)
+ max -= prev_end;
+ else
+ max = 0;
+ }
if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -418,7 +432,7 @@ void validate_mm(struct mm_struct *mm)
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_verify(avc);
vma_unlock_anon_vma(vma);
- highest_address = vma->vm_end;
+ highest_address = vm_end_gap(vma);
vma = vma->vm_next;
i++;
}
@@ -586,7 +600,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = vma->vm_end;
+ mm->highest_vm_end = vm_end_gap(vma);
/*
* vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -835,7 +849,7 @@ again: remove_next = 1 + (end > next->vm_end);
vma_gap_update(vma);
if (end_changed) {
if (!next)
- mm->highest_vm_end = end;
+ mm->highest_vm_end = vm_end_gap(vma);
else if (!adjust_next)
vma_gap_update(next);
}
@@ -878,7 +892,7 @@ again: remove_next = 1 + (end > next->vm_end);
else if (next)
vma_gap_update(next);
else
- mm->highest_vm_end = end;
+ WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
}
if (insert && file)
uprobe_mmap(insert);
@@ -1670,7 +1684,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
while (true) {
/* Visit left subtree if it looks promising */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
@@ -1681,12 +1695,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
}
}
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
return -ENOMEM;
- if (gap_end >= low_limit && gap_end - gap_start >= length)
+ if (gap_end >= low_limit &&
+ gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit right subtree if it looks promising */
@@ -1708,8 +1723,8 @@ check_current:
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) {
- gap_start = vma->vm_prev->vm_end;
- gap_end = vma->vm_start;
+ gap_start = vm_end_gap(vma->vm_prev);
+ gap_end = vm_start_gap(vma);
goto check_current;
}
}
@@ -1773,7 +1788,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
while (true) {
/* Visit right subtree if it looks promising */
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
@@ -1786,10 +1801,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
check_current:
/* Check if current node has a suitable gap */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end < low_limit)
return -ENOMEM;
- if (gap_start <= high_limit && gap_end - gap_start >= length)
+ if (gap_start <= high_limit &&
+ gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit left subtree if it looks promising */
@@ -1812,7 +1828,7 @@ check_current:
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ?
- vma->vm_prev->vm_end : 0;
+ vm_end_gap(vma->vm_prev) : 0;
goto check_current;
}
}
@@ -1850,7 +1866,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr)
@@ -1861,9 +1877,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
@@ -1895,7 +1912,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -1910,9 +1927,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
@@ -2052,21 +2070,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma,
+ unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start, actual_size;
+ unsigned long new_start;
/* address space limit tests */
if (!may_expand_vm(mm, grow))
return -ENOMEM;
/* Stack limit test */
- actual_size = size;
- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
- actual_size -= PAGE_SIZE;
- if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
@@ -2107,32 +2123,43 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
*/
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
- int error;
+ struct vm_area_struct *next;
+ unsigned long gap_addr;
+ int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
- /*
- * We must make sure the anon_vma is allocated
- * so that the anon_vma locking is not a noop.
- */
+ /* Guard against exceeding limits of the address space. */
+ address &= PAGE_MASK;
+ if (address >= TASK_SIZE)
+ return -ENOMEM;
+ address += PAGE_SIZE;
+
+ /* Enforce stack_guard_gap */
+ gap_addr = address + stack_guard_gap;
+
+ /* Guard against overflow */
+ if (gap_addr < address || gap_addr > TASK_SIZE)
+ gap_addr = TASK_SIZE;
+
+ next = vma->vm_next;
+ if (next && next->vm_start < gap_addr) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
+ /* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
- vma_lock_anon_vma(vma);
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
- * Also guard against wrapping around to address 0.
*/
- if (address < PAGE_ALIGN(address+4))
- address = PAGE_ALIGN(address+4);
- else {
- vma_unlock_anon_vma(vma);
- return -ENOMEM;
- }
- error = 0;
+ vma_lock_anon_vma(vma);
/* Somebody else might have raced and expanded it already */
if (address > vma->vm_end) {
@@ -2163,7 +2190,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- vma->vm_mm->highest_vm_end = address;
+ vma->vm_mm->highest_vm_end = vm_end_gap(vma);
spin_unlock(&vma->vm_mm->page_table_lock);
perf_event_mmap(vma);
@@ -2183,27 +2210,36 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
+ struct vm_area_struct *prev;
+ unsigned long gap_addr;
int error;
- /*
- * We must make sure the anon_vma is allocated
- * so that the anon_vma locking is not a noop.
- */
- if (unlikely(anon_vma_prepare(vma)))
- return -ENOMEM;
-
address &= PAGE_MASK;
error = security_mmap_addr(address);
if (error)
return error;
- vma_lock_anon_vma(vma);
+ /* Enforce stack_guard_gap */
+ gap_addr = address - stack_guard_gap;
+ if (gap_addr > address)
+ return -ENOMEM;
+ prev = vma->vm_prev;
+ if (prev && prev->vm_end > gap_addr) {
+ if (!(prev->vm_flags & VM_GROWSDOWN))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
+ /* We must make sure the anon_vma is allocated. */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
+ vma_lock_anon_vma(vma);
/* Somebody else might have raced and expanded it already */
if (address < vma->vm_start) {
@@ -2245,28 +2281,25 @@ int expand_downwards(struct vm_area_struct *vma,
return error;
}
-/*
- * Note how expand_stack() refuses to expand the stack all the way to
- * abut the next virtual mapping, *unless* that mapping itself is also
- * a stack mapping. We want to leave room for a guard page, after all
- * (the guard page itself is not added here, that is done by the
- * actual page faulting logic)
- *
- * This matches the behavior of the guard page logic (see mm/memory.c:
- * check_stack_guard_page()), which only allows the guard page to be
- * removed under these circumstances.
- */
+/* enforced gap between the expanding stack and other mappings. */
+unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
+static int __init cmdline_parse_stack_guard_gap(char *p)
+{
+ unsigned long val;
+ char *endptr;
+
+ val = simple_strtoul(p, &endptr, 10);
+ if (!*endptr)
+ stack_guard_gap = val << PAGE_SHIFT;
+
+ return 0;
+}
+__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
+
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *next;
-
- address &= PAGE_MASK;
- next = vma->vm_next;
- if (next && next->vm_start == address + PAGE_SIZE) {
- if (!(next->vm_flags & VM_GROWSUP))
- return -ENOMEM;
- }
return expand_upwards(vma, address);
}
@@ -2288,14 +2321,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *prev;
-
- address &= PAGE_MASK;
- prev = vma->vm_prev;
- if (prev && prev->vm_end == address) {
- if (!(prev->vm_flags & VM_GROWSDOWN))
- return -ENOMEM;
- }
return expand_downwards(vma, address);
}
@@ -2392,7 +2417,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
vma->vm_prev = prev;
vma_gap_update(vma);
} else
- mm->highest_vm_end = prev ? prev->vm_end : 0;
+ mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL;
if (mm->unmap_area == arch_unmap_area)
addr = prev ? prev->vm_end : mm->mmap_base;
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 736a6011c2c8..e8955f96df5a 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -116,9 +116,16 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
unsigned long reclaimed)
{
unsigned long scale = scanned + reclaimed;
- unsigned long pressure;
+ unsigned long pressure = 0;
/*
+ * reclaimed can be greater than scanned in cases
+ * like THP, where the scanned is 1 and reclaimed
+ * could be 512
+ */
+ if (reclaimed >= scanned)
+ goto out;
+ /*
* We calculate the ratio (in percents) of how many pages were
* scanned vs. reclaimed in a given time frame (window). Note that
* time is in VM reclaimer's "ticks", i.e. number of pages
@@ -128,6 +135,7 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
pressure = scale - (reclaimed * scale / scanned);
pressure = pressure * 100 / scale;
+out:
pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure,
scanned, reclaimed);
diff --git a/net/9p/client.c b/net/9p/client.c
index 853d62327a58..e191aab9e586 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -2080,6 +2080,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
+ if (rsize < count) {
+ pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
+ count = rsize;
+ }
p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 725ce812cfbc..6e0a88d9c554 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -706,14 +706,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
static void bcm_remove_op(struct bcm_op *op)
{
- hrtimer_cancel(&op->timer);
- hrtimer_cancel(&op->thrtimer);
-
- if (op->tsklet.func)
- tasklet_kill(&op->tsklet);
+ if (op->tsklet.func) {
+ while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
+ test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
+ hrtimer_active(&op->timer)) {
+ hrtimer_cancel(&op->timer);
+ tasklet_kill(&op->tsklet);
+ }
+ }
- if (op->thrtsklet.func)
- tasklet_kill(&op->thrtsklet);
+ if (op->thrtsklet.func) {
+ while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
+ test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
+ hrtimer_active(&op->thrtimer)) {
+ hrtimer_cancel(&op->thrtimer);
+ tasklet_kill(&op->thrtsklet);
+ }
+ }
if ((op->frames) && (op->frames != &op->sframe))
kfree(op->frames);
diff --git a/net/can/raw.c b/net/can/raw.c
index f4d86485571f..602be0e07a02 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -470,6 +470,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
if (optlen % sizeof(struct can_filter) != 0)
return -EINVAL;
+ if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
+ return -EINVAL;
+
count = optlen / sizeof(struct can_filter);
if (count > 1) {
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 025ced8fbb57..c99cfde87bd6 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -6,6 +6,7 @@
#include <linux/inet.h>
#include <linux/kthread.h>
#include <linux/net.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
@@ -472,11 +473,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
{
struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
struct socket *sock;
+ unsigned int noio_flag;
int ret;
BUG_ON(con->sock);
+
+ /* sock_create_kern() allocates with GFP_KERNEL */
+ noio_flag = memalloc_noio_save();
ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
IPPROTO_TCP, &sock);
+ memalloc_noio_restore(noio_flag);
if (ret)
return ret;
sock->sk->sk_allocation = GFP_NOFS;
@@ -1969,6 +1975,19 @@ static int process_connect(struct ceph_connection *con)
dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
+ if (con->auth_reply_buf) {
+ /*
+ * Any connection that defines ->get_authorizer()
+ * should also define ->verify_authorizer_reply().
+ * See get_connect_authorizer().
+ */
+ ret = con->ops->verify_authorizer_reply(con, 0);
+ if (ret < 0) {
+ con->error_msg = "bad authorize reply";
+ return ret;
+ }
+ }
+
switch (con->in_reply.tag) {
case CEPH_MSGR_TAG_FEATURES:
pr_err("%s%lld %s feature set mismatch,"
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index a974dfec4bf1..55bb6909edc8 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -80,6 +80,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
struct nlattr *nla;
struct sk_buff *skb;
unsigned long flags;
+ void *msg_header;
al = sizeof(struct net_dm_alert_msg);
al += dm_hit_limit * sizeof(struct net_dm_drop_point);
@@ -87,21 +88,41 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
skb = genlmsg_new(al, GFP_KERNEL);
- if (skb) {
- genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
- 0, NET_DM_CMD_ALERT);
- nla = nla_reserve(skb, NLA_UNSPEC,
- sizeof(struct net_dm_alert_msg));
- msg = nla_data(nla);
- memset(msg, 0, al);
- } else {
- mod_timer(&data->send_timer, jiffies + HZ / 10);
+ if (!skb)
+ goto err;
+
+ msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
+ 0, NET_DM_CMD_ALERT);
+ if (!msg_header) {
+ nlmsg_free(skb);
+ skb = NULL;
+ goto err;
+ }
+ nla = nla_reserve(skb, NLA_UNSPEC,
+ sizeof(struct net_dm_alert_msg));
+ if (!nla) {
+ nlmsg_free(skb);
+ skb = NULL;
+ goto err;
}
+ msg = nla_data(nla);
+ memset(msg, 0, al);
+ goto out;
+err:
+ mod_timer(&data->send_timer, jiffies + HZ / 10);
+out:
spin_lock_irqsave(&data->lock, flags);
swap(data->skb, skb);
spin_unlock_irqrestore(&data->lock, flags);
+ if (skb) {
+ struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+ struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
+
+ genlmsg_end(skb, genlmsg_data(gnlh));
+ }
+
return skb;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index b49e8bafab17..fb82e8a5b006 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -872,7 +872,8 @@ static void neigh_probe(struct neighbour *neigh)
if (skb)
skb = skb_copy(skb, GFP_ATOMIC);
write_unlock(&neigh->lock);
- neigh->ops->solicit(neigh, skb);
+ if (neigh->ops->solicit)
+ neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
kfree_skb(skb);
}
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index f053198e730c..5e3a7302f774 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
for (i = 0; i < hc->tx_seqbufc; i++)
kfree(hc->tx_seqbuf[i]);
hc->tx_seqbufc = 0;
+ dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
}
static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 662071b249cc..e47b15dd9b39 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -140,6 +140,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
newsk->sk_destruct = NULL;
+ bh_unlock_sock(newsk);
sk_free(newsk);
return NULL;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4556cd25acde..017b4792cd44 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -957,7 +957,8 @@ static void nl_fib_input(struct sk_buff *skb)
net = sock_net(skb->sk);
nlh = nlmsg_hdr(skb);
- if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
+ if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
+ skb->len < nlh->nlmsg_len ||
nlmsg_len(nlh) < sizeof(*frn))
return;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index b0178b04bd81..4572ee7c71f4 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -196,9 +196,14 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
static void igmp_gq_start_timer(struct in_device *in_dev)
{
int tv = net_random() % in_dev->mr_maxdelay;
+ unsigned long exp = jiffies + tv + 2;
+
+ if (in_dev->mr_gq_running &&
+ time_after_eq(exp, (in_dev->mr_gq_timer).expires))
+ return;
in_dev->mr_gq_running = 1;
- if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
+ if (!mod_timer(&in_dev->mr_gq_timer, exp))
in_dev_hold(in_dev);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f3b15bb7fbec..0680058fe693 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5335,6 +5335,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_set_state(sk, TCP_ESTABLISHED);
+ icsk->icsk_ack.lrcvtime = tcp_time_stamp;
if (skb != NULL) {
icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5535,7 +5536,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* to stand against the temptation 8) --ANK
*/
inet_csk_schedule_ack(sk);
- icsk->icsk_ack.lrcvtime = tcp_time_stamp;
tcp_enter_quickack_mode(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 195c618aba6c..270840f5ee01 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -270,10 +270,13 @@ EXPORT_SYMBOL(tcp_v4_connect);
*/
void tcp_v4_mtu_reduced(struct sock *sk)
{
- struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk);
- u32 mtu = tcp_sk(sk)->mtu_info;
+ struct dst_entry *dst;
+ u32 mtu;
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+ mtu = tcp_sk(sk)->mtu_info;
dst = inet_csk_update_pmtu(sk, mtu);
if (!dst)
return;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0f0178827259..914a55db8031 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -405,6 +405,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->srtt = 0;
newtp->mdev = TCP_TIMEOUT_INIT;
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+ newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
newtp->packets_out = 0;
newtp->retrans_out = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1f2f6b5406ee..8729a934124f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2154,9 +2154,11 @@ u32 __tcp_select_window(struct sock *sk)
int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
int window;
- if (mss > full_space)
+ if (unlikely(mss > full_space)) {
mss = full_space;
-
+ if (mss <= 0)
+ return 0;
+ }
if (free_space < (full_space >> 1)) {
icsk->icsk_ack.quick = 0;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 4b85e6f636c9..722367a6d817 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -201,7 +201,8 @@ void tcp_delack_timer_handler(struct sock *sk)
sk_mem_reclaim_partial(sk);
- if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+ !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
goto out;
if (time_after(icsk->icsk_ack.timeout, jiffies)) {
@@ -480,7 +481,8 @@ void tcp_write_timer_handler(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
int event;
- if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+ !icsk->icsk_pending)
goto out;
if (time_after(icsk->icsk_timeout, jiffies)) {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a3e2c34d5b7a..9c4aa2e22448 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4768,8 +4768,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
struct net_device *dev;
struct inet6_dev *idev;
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -4778,7 +4777,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
dev_disable_change(idev);
}
}
- rcu_read_unlock();
}
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 1ce7ea1f40b7..17a88ebcc845 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -740,7 +740,6 @@ slow_path:
* Fragment the datagram.
*/
- *prevhdr = NEXTHDR_FRAGMENT;
hroom = LL_RESERVED_SPACE(rt->dst.dev);
troom = rt->dst.dev->needed_tailroom;
@@ -748,6 +747,8 @@ slow_path:
* Keep copying data until we run out.
*/
while(left > 0) {
+ u8 *fragnexthdr_offset;
+
len = left;
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > mtu)
@@ -794,6 +795,10 @@ slow_path:
*/
skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
+ fragnexthdr_offset = skb_network_header(frag);
+ fragnexthdr_offset += prevhdr - skb_network_header(skb);
+ *fragnexthdr_offset = NEXTHDR_FRAGMENT;
+
/*
* Build fragment header.
*/
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 8344f686335d..2026c5b4342d 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -777,7 +777,8 @@ failure:
* Delete a VIF entry
*/
-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
+static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
+ struct list_head *head)
{
struct mif_device *v;
struct net_device *dev;
@@ -823,7 +824,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
dev->ifindex, &in6_dev->cnf);
}
- if (v->flags & MIFF_REGISTER)
+ if ((v->flags & MIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
dev_put(dev);
@@ -1333,7 +1334,6 @@ static int ip6mr_device_event(struct notifier_block *this,
struct mr6_table *mrt;
struct mif_device *v;
int ct;
- LIST_HEAD(list);
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
@@ -1342,10 +1342,9 @@ static int ip6mr_device_event(struct notifier_block *this,
v = &mrt->vif6_table[0];
for (ct = 0; ct < mrt->maxvif; ct++, v++) {
if (v->dev == dev)
- mif6_delete(mrt, ct, &list);
+ mif6_delete(mrt, ct, 1, NULL);
}
}
- unregister_netdevice_many(&list);
return NOTIFY_DONE;
}
@@ -1550,7 +1549,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all)
for (i = 0; i < mrt->maxvif; i++) {
if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
continue;
- mif6_delete(mrt, i, &list);
+ mif6_delete(mrt, i, 0, &list);
}
unregister_netdevice_many(&list);
@@ -1703,7 +1702,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
return -EFAULT;
rtnl_lock();
- ret = mif6_delete(mrt, mifi, NULL);
+ ret = mif6_delete(mrt, mifi, 0, NULL);
rtnl_unlock();
return ret;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 989bd7987985..c7ce2be09d90 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1133,7 +1133,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
- amount = skb->tail - skb->transport_header;
+ amount = skb->len;
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fb5010c27a22..244892ce560e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1676,6 +1676,8 @@ static int ip6_route_del(struct fib6_config *cfg)
continue;
if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
continue;
+ if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
+ continue;
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index 7152624ed5f1..26ccd65cdcab 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -385,9 +385,6 @@ EXPORT_SYMBOL(hashbin_new);
* for deallocating this structure if it's complex. If not the user can
* just supply kfree, which should take care of the job.
*/
-#ifdef CONFIG_LOCKDEP
-static int hashbin_lock_depth = 0;
-#endif
int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
{
irda_queue_t* queue;
@@ -398,22 +395,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
/* Synchronize */
- if ( hashbin->hb_type & HB_LOCK ) {
- spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
- hashbin_lock_depth++);
- }
+ if (hashbin->hb_type & HB_LOCK)
+ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
/*
* Free the entries in the hashbin, TODO: use hashbin_clear when
* it has been shown to work
*/
for (i = 0; i < HASHBIN_SIZE; i ++ ) {
- queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
- while (queue ) {
- if (free_func)
- (*free_func)(queue);
- queue = dequeue_first(
- (irda_queue_t**) &hashbin->hb_queue[i]);
+ while (1) {
+ queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
+
+ if (!queue)
+ break;
+
+ if (free_func) {
+ if (hashbin->hb_type & HB_LOCK)
+ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+ free_func(queue);
+ if (hashbin->hb_type & HB_LOCK)
+ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+ }
}
}
@@ -422,12 +424,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
hashbin->magic = ~HB_MAGIC;
/* Release lock */
- if ( hashbin->hb_type & HB_LOCK) {
+ if (hashbin->hb_type & HB_LOCK)
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
-#ifdef CONFIG_LOCKDEP
- hashbin_lock_depth--;
-#endif
- }
/*
* Free the hashbin structure
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index f4d30b509cdf..1f65095c3217 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -382,7 +382,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
drop:
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
kfree_skb(skb);
- return -1;
+ return 0;
}
/* Userspace will call sendmsg() on the tunnel socket to send L2TP
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index efb510e6f206..a1f47b8d8013 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -114,6 +114,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
break;
}
+ flush_delayed_work(&sdata->dec_tailroom_needed_wk);
drv_remove_interface(local, sdata);
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4b1734a14ff9..0bbb3470fa78 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2242,7 +2242,7 @@ static int packet_snd(struct socket *sock,
int vnet_hdr_len;
struct packet_sock *po = pkt_sk(sk);
unsigned short gso_type = 0;
- int hlen, tlen;
+ int hlen, tlen, linear;
int extra_len = 0;
/*
@@ -2336,7 +2336,9 @@ static int packet_snd(struct socket *sock,
err = -ENOBUFS;
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
- skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
+ linear = vnet_hdr.hdr_len;
+ linear = max(linear, min_t(int, len, dev->hard_header_len));
+ skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out_unlock;
@@ -2556,7 +2558,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
int addr_len)
{
struct sock *sk = sock->sk;
- char name[15];
+ char name[sizeof(uaddr->sa_data) + 1];
struct net_device *dev;
int err = -ENODEV;
@@ -2566,7 +2568,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
if (addr_len != sizeof(struct sockaddr))
return -EINVAL;
- strlcpy(name, uaddr->sa_data, sizeof(name));
+ /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
+ * zero-terminated.
+ */
+ memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
+ name[sizeof(uaddr->sa_data)] = 0;
dev = dev_get_by_name(sock_net(sk), name);
if (dev)
@@ -3678,6 +3686,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
if (unlikely(rb->frames_per_block <= 0))
goto out;
+ if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+ goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;
@@ -3694,7 +3704,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
*/
if (!tx_ring)
init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
- break;
+ break;
default:
break;
}
diff --git a/net/rds/cong.c b/net/rds/cong.c
index e5b65acd650b..cec4c4e6d905 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -285,7 +285,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
- __set_bit_le(off, (void *)map->m_page_addrs[i]);
+ set_bit_le(off, (void *)map->m_page_addrs[i]);
}
void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -299,7 +299,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
- __clear_bit_le(off, (void *)map->m_page_addrs[i]);
+ clear_bit_le(off, (void *)map->m_page_addrs[i]);
}
static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 4178cf387d21..4358ae85cdad 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -6181,6 +6181,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
if (sock->state != SS_UNCONNECTED)
goto out;
+ if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
+ goto out;
+
/* If backlog is zero, disable listening. */
if (!backlog) {
if (sctp_sstate(sk, CLOSED))
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 8f118c7c19e1..4b18115c0543 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -977,6 +977,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
unsigned int hash;
struct unix_address *addr;
struct hlist_head *list;
+ struct path path = { NULL, NULL };
err = -EINVAL;
if (sunaddr->sun_family != AF_UNIX)
@@ -992,9 +993,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
addr_len = err;
+ if (sun_path[0]) {
+ umode_t mode = S_IFSOCK |
+ (SOCK_INODE(sock)->i_mode & ~current_umask());
+ err = unix_mknod(sun_path, mode, &path);
+ if (err) {
+ if (err == -EEXIST)
+ err = -EADDRINUSE;
+ goto out;
+ }
+ }
+
err = mutex_lock_interruptible(&u->readlock);
if (err)
- goto out;
+ goto out_put;
err = -EINVAL;
if (u->addr)
@@ -1011,16 +1023,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
atomic_set(&addr->refcnt, 1);
if (sun_path[0]) {
- struct path path;
- umode_t mode = S_IFSOCK |
- (SOCK_INODE(sock)->i_mode & ~current_umask());
- err = unix_mknod(sun_path, mode, &path);
- if (err) {
- if (err == -EEXIST)
- err = -EADDRINUSE;
- unix_release_addr(addr);
- goto out_up;
- }
addr->hash = UNIX_HASH_SIZE;
hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
spin_lock(&unix_table_lock);
@@ -1047,6 +1049,9 @@ out_unlock:
spin_unlock(&unix_table_lock);
out_up:
mutex_unlock(&u->readlock);
+out_put:
+ if (err)
+ path_put(&path);
out:
return err;
}
diff --git a/samples/seccomp/bpf-helper.h b/samples/seccomp/bpf-helper.h
index 38ee70f3cd5b..1d8de9edd858 100644
--- a/samples/seccomp/bpf-helper.h
+++ b/samples/seccomp/bpf-helper.h
@@ -138,7 +138,7 @@ union arg64 {
#define ARG_32(idx) \
BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
-/* Loads hi into A and lo in X */
+/* Loads lo into M[0] and hi into M[1] and A */
#define ARG_64(idx) \
BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
@@ -153,88 +153,107 @@ union arg64 {
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
jt
-/* Checks the lo, then swaps to check the hi. A=lo,X=hi */
+#define JA32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
+ jt
+
+#define JGE32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
+ jt
+
+#define JGT32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
+ jt
+
+#define JLE32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
+ jt
+
+#define JLT32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
+ jt
+
+/*
+ * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both
+ * A and M[1]. This invariant is kept by restoring A if necessary.
+ */
#define JEQ64(lo, hi, jt) \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ /* if (lo != arg.lo) goto NOMATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
#define JNE64(lo, hi, jt) \
- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 5, 0), \
- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ /* if (hi != arg.hi) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo != arg.lo) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
-
-#define JA32(value, jt) \
- BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
- jt
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
#define JA64(lo, hi, jt) \
+ /* if (hi & arg.hi) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo & arg.lo) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
-#define JGE32(value, jt) \
- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
- jt
-
-#define JLT32(value, jt) \
- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
- jt
-
-/* Shortcut checking if hi > arg.hi. */
#define JGE64(lo, hi, jt) \
+ /* if (hi > arg.hi) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo >= arg.lo) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
- jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
-
-#define JLT64(lo, hi, jt) \
- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
-#define JGT32(value, jt) \
- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
- jt
-
-#define JLE32(value, jt) \
- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
- jt
-
-/* Check hi > args.hi first, then do the GE checking */
#define JGT64(lo, hi, jt) \
+ /* if (hi > arg.hi) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo > arg.lo) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
#define JLE64(lo, hi, jt) \
- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 6, 0), \
- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ /* if (hi < arg.hi) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo <= arg.lo) goto MATCH; */ \
BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ jt, \
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
+
+#define JLT64(lo, hi, jt) \
+ /* if (hi < arg.hi) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo < arg.lo) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
jt, \
- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
#define LOAD_SYSCALL_NR \
BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
index 775843e7f984..b5029c77c3e3 100644
--- a/security/apparmor/include/match.h
+++ b/security/apparmor/include/match.h
@@ -57,6 +57,7 @@ struct table_set_header {
#define YYTD_ID_ACCEPT2 6
#define YYTD_ID_NXT 7
#define YYTD_ID_TSIZE 8
+#define YYTD_ID_MAX 8
#define YYTD_DATA8 1
#define YYTD_DATA16 2
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index bda4569fdd83..0c9d121f15d0 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -313,6 +313,8 @@ static inline int AUDIT_MODE(struct aa_profile *profile)
return profile->audit;
}
+bool policy_view_capable(void);
+bool policy_admin_capable(void);
bool aa_may_manage_policy(int op);
#endif /* __AA_POLICY_H */
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index b21830eced41..6eeaab80865d 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -759,51 +759,49 @@ __setup("apparmor=", apparmor_enabled_setup);
/* set global flag turning off the ability to load policy */
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp)
{
- if (!capable(CAP_MAC_ADMIN))
+ if (!policy_admin_capable())
return -EPERM;
- if (aa_g_lock_policy)
- return -EACCES;
return param_set_bool(val, kp);
}
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp)
{
- if (!capable(CAP_MAC_ADMIN))
+ if (!policy_view_capable())
return -EPERM;
return param_get_bool(buffer, kp);
}
static int param_set_aabool(const char *val, const struct kernel_param *kp)
{
- if (!capable(CAP_MAC_ADMIN))
+ if (!policy_admin_capable())
return -EPERM;
return param_set_bool(val, kp);
}
static int param_get_aabool(char *buffer, const struct kernel_param *kp)
{
- if (!capable(CAP_MAC_ADMIN))
+ if (!policy_view_capable())
return -EPERM;
return param_get_bool(buffer, kp);
}
static int param_set_aauint(const char *val, const struct kernel_param *kp)
{
- if (!capable(CAP_MAC_ADMIN))
+ if (!policy_admin_capable())
return -EPERM;
return param_set_uint(val, kp);
}
static int param_get_aauint(char *buffer, const struct kernel_param *kp)
{
- if (!capable(CAP_MAC_ADMIN))
+ if (!policy_view_capable())
return -EPERM;
return param_get_uint(buffer, kp);
}
static int param_get_audit(char *buffer, struct kernel_param *kp)
{
- if (!capable(CAP_MAC_ADMIN))
+ if (!policy_view_capable())
return -EPERM;
if (!apparmor_enabled)
@@ -815,7 +813,7 @@ static int param_get_audit(char *buffer, struct kernel_param *kp)
static int param_set_audit(const char *val, struct kernel_param *kp)
{
int i;
- if (!capable(CAP_MAC_ADMIN))
+ if (!policy_admin_capable())
return -EPERM;
if (!apparmor_enabled)
@@ -836,7 +834,7 @@ static int param_set_audit(const char *val, struct kernel_param *kp)
static int param_get_mode(char *buffer, struct kernel_param *kp)
{
- if (!capable(CAP_MAC_ADMIN))
+ if (!policy_admin_capable())
return -EPERM;
if (!apparmor_enabled)
@@ -848,7 +846,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp)
static int param_set_mode(const char *val, struct kernel_param *kp)
{
int i;
- if (!capable(CAP_MAC_ADMIN))
+ if (!policy_admin_capable())
return -EPERM;
if (!apparmor_enabled)
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index 90971a8c3789..704b0eb25801 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -45,6 +45,8 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
* it every time we use td_id as an index
*/
th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1;
+ if (th.td_id > YYTD_ID_MAX)
+ goto out;
th.td_flags = be16_to_cpu(*(u16 *) (blob + 2));
th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8));
blob += sizeof(struct table_header);
@@ -59,7 +61,9 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
table = kvmalloc(tsize);
if (table) {
- *table = th;
+ table->td_id = th.td_id;
+ table->td_flags = th.td_flags;
+ table->td_lolen = th.td_lolen;
if (th.td_flags == YYTD_DATA8)
UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
u8, byte_to_byte);
@@ -71,14 +75,14 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
u32, be32_to_cpu);
else
goto fail;
+ /* if table was vmalloced make sure the page tables are synced
+ * before it is used, as it goes live to all cpus.
+ */
+ if (is_vmalloc_addr(table))
+ vm_unmap_aliases();
}
out:
- /* if table was vmalloced make sure the page tables are synced
- * before it is used, as it goes live to all cpus.
- */
- if (is_vmalloc_addr(table))
- vm_unmap_aliases();
return table;
fail:
kvfree(table);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 813200384d97..c4780e108d7d 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -1002,6 +1002,22 @@ static int audit_policy(int op, gfp_t gfp, const char *name, const char *info,
&sa, NULL);
}
+bool policy_view_capable(void)
+{
+ struct user_namespace *user_ns = current_user_ns();
+ bool response = false;
+
+ if (ns_capable(user_ns, CAP_MAC_ADMIN))
+ response = true;
+
+ return response;
+}
+
+bool policy_admin_capable(void)
+{
+ return policy_view_capable() && !aa_g_lock_policy;
+}
+
/**
* aa_may_manage_policy - can the current task manage policy
* @op: the policy manipulation operation being done
@@ -1025,8 +1025,8 @@ bool aa_may_manage_policy(int op)
return 0;
}
- if (!capable(CAP_MAC_ADMIN)) {
- audit_policy(op, NULL, "not policy admin", -EACCES);
+ if (!policy_admin_capable()) {
+ audit_policy(op, NULL, "not policy admin", -EACCES);
return 0;
}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 329b1fd30749..55ff3eecd368 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -571,6 +571,9 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
error = PTR_ERR(profile->policy.dfa);
profile->policy.dfa = NULL;
goto fail;
+ } else if (!profile->policy.dfa) {
+ error = -EPROTO;
+ goto fail;
}
if (!unpack_u32(e, &profile->policy.start[0], "start"))
/* default start state */
@@ -652,7 +655,7 @@ static bool verify_xindex(int xindex, int table_size)
int index, xtype;
xtype = xindex & AA_X_TYPE_MASK;
index = xindex & AA_X_INDEX_MASK;
- if (xtype == AA_X_TABLE && index > table_size)
+ if (xtype == AA_X_TABLE && index >= table_size)
return 0;
return 1;
}
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 08865dcbf5f1..d449dde1bf50 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1909,6 +1909,7 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
info.output_pool != client->pool->size)) {
if (snd_seq_write_pool_allocated(client)) {
/* remove all existing cells */
+ snd_seq_pool_mark_closing(client->pool);
snd_seq_queue_client_leave_cells(client->number);
snd_seq_pool_done(client->pool);
}
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 0d75afa786bc..490b697e83ff 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -72,6 +72,9 @@ void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
return;
*fifo = NULL;
+ if (f->pool)
+ snd_seq_pool_mark_closing(f->pool);
+
snd_seq_fifo_clear(f);
/* wake up clients if any */
@@ -137,6 +140,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
f->tail = cell;
if (f->head == NULL)
f->head = cell;
+ cell->next = NULL;
f->cells++;
spin_unlock_irqrestore(&f->lock, flags);
@@ -216,6 +220,8 @@ void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
spin_lock_irqsave(&f->lock, flags);
cell->next = f->head;
f->head = cell;
+ if (!f->tail)
+ f->tail = cell;
f->cells++;
spin_unlock_irqrestore(&f->lock, flags);
}
@@ -261,6 +267,10 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
/* NOTE: overflow flag is not cleared */
spin_unlock_irqrestore(&f->lock, flags);
+ /* close the old pool and wait until all users are gone */
+ snd_seq_pool_mark_closing(oldpool);
+ snd_use_lock_sync(&f->use_lock);
+
/* release cells in old pool */
for (cell = oldhead; cell; cell = next) {
next = cell->next;
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 2cfe50c71a9d..8a6b7baafa35 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -28,19 +28,16 @@
/* wait until all locks are released */
void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
{
- int max_count = 5 * HZ;
+ int warn_count = 5 * HZ;
if (atomic_read(lockp) < 0) {
printk(KERN_WARNING "seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line);
return;
}
while (atomic_read(lockp) > 0) {
- if (max_count == 0) {
- snd_printk(KERN_WARNING "seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line);
- break;
- }
+ if (warn_count-- == 0)
+ pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line);
schedule_timeout_uninterruptible(1);
- max_count--;
}
}
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index f478f770bf52..8c510781558f 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -411,32 +411,33 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
return 0;
}
+/* refuse the further insertion to the pool */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
+{
+ unsigned long flags;
+
+ if (snd_BUG_ON(!pool))
+ return;
+ spin_lock_irqsave(&pool->lock, flags);
+ pool->closing = 1;
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
/* remove events */
int snd_seq_pool_done(struct snd_seq_pool *pool)
{
unsigned long flags;
struct snd_seq_event_cell *ptr;
- int max_count = 5 * HZ;
if (snd_BUG_ON(!pool))
return -EINVAL;
/* wait for closing all threads */
- spin_lock_irqsave(&pool->lock, flags);
- pool->closing = 1;
- spin_unlock_irqrestore(&pool->lock, flags);
-
if (waitqueue_active(&pool->output_sleep))
wake_up(&pool->output_sleep);
- while (atomic_read(&pool->counter) > 0) {
- if (max_count == 0) {
- snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
- break;
- }
+ while (atomic_read(&pool->counter) > 0)
schedule_timeout_uninterruptible(1);
- max_count--;
- }
/* release all resources */
spin_lock_irqsave(&pool->lock, flags);
@@ -490,6 +491,7 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool)
*ppool = NULL;
if (pool == NULL)
return 0;
+ snd_seq_pool_mark_closing(pool);
snd_seq_pool_done(pool);
kfree(pool);
return 0;
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 4a2ec779b8a7..32f959c17786 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -84,6 +84,7 @@ static inline int snd_seq_total_cells(struct snd_seq_pool *pool)
int snd_seq_pool_init(struct snd_seq_pool *pool);
/* done pool - free events */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool);
int snd_seq_pool_done(struct snd_seq_pool *pool);
/* create pool */
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 4c9aa462de9b..17fe04d892f9 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -183,6 +183,8 @@ void __exit snd_seq_queues_delete(void)
}
}
+static void queue_use(struct snd_seq_queue *queue, int client, int use);
+
/* allocate a new queue -
* return queue index value or negative value for error
*/
@@ -194,11 +196,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
if (q == NULL)
return -ENOMEM;
q->info_flags = info_flags;
+ queue_use(q, client, 1);
if (queue_list_add(q) < 0) {
queue_delete(q);
return -ENOMEM;
}
- snd_seq_queue_use(q->queue, client, 1); /* use this queue */
return q->queue;
}
@@ -504,19 +506,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
return result;
}
-
-/* use or unuse this queue -
- * if it is the first client, starts the timer.
- * if it is not longer used by any clients, stop the timer.
- */
-int snd_seq_queue_use(int queueid, int client, int use)
+/* use or unuse this queue */
+static void queue_use(struct snd_seq_queue *queue, int client, int use)
{
- struct snd_seq_queue *queue;
-
- queue = queueptr(queueid);
- if (queue == NULL)
- return -EINVAL;
- mutex_lock(&queue->timer_mutex);
if (use) {
if (!test_and_set_bit(client, queue->clients_bitmap))
queue->clients++;
@@ -531,6 +523,21 @@ int snd_seq_queue_use(int queueid, int client, int use)
} else {
snd_seq_timer_close(queue);
}
+}
+
+/* use or unuse this queue -
+ * if it is the first client, starts the timer.
+ * if it is not longer used by any clients, stop the timer.
+ */
+int snd_seq_queue_use(int queueid, int client, int use)
+{
+ struct snd_seq_queue *queue;
+
+ queue = queueptr(queueid);
+ if (queue == NULL)
+ return -EINVAL;
+ mutex_lock(&queue->timer_mutex);
+ queue_use(queue, client, use);
mutex_unlock(&queue->timer_mutex);
queuefree(queue);
return 0;
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 749857a889e6..98904d8e51db 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1659,9 +1659,21 @@ static int snd_timer_user_params(struct file *file,
return -EBADFD;
if (copy_from_user(&params, _params, sizeof(params)))
return -EFAULT;
- if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) {
- err = -EINVAL;
- goto _end;
+ if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
+ u64 resolution;
+
+ if (params.ticks < 1) {
+ err = -EINVAL;
+ goto _end;
+ }
+
+ /* Don't allow resolution less than 1ms */
+ resolution = snd_timer_resolution(tu->timeri);
+ resolution *= params.ticks;
+ if (resolution < 1000000) {
+ err = -EINVAL;
+ goto _end;
+ }
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index babbf238a648..af27d67efa82 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2185,6 +2185,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
+ SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 5ea5a18f3f58..77047e3517a5 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -893,9 +893,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
case USB_ID(0x046d, 0x0991):
+ case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
/* Most audio usb devices lie about volume resolution.
* Most Logitech webcams have res = 384.
- * Proboly there is some logitech magic behind this number --fishor
+ * Probably there is some logitech magic behind this number --fishor
*/
if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
snd_printk(KERN_INFO