diff --git a/config/kernel/linux-odroidc1-default.config b/config/kernel/linux-odroidc1-default.config index a7bca4c39..e45bea5cf 100644 --- a/config/kernel/linux-odroidc1-default.config +++ b/config/kernel/linux-odroidc1-default.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 3.10.104 Kernel Configuration +# Linux/arm 3.10.107 Kernel Configuration # CONFIG_ARM=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y @@ -1483,41 +1483,7 @@ CONFIG_POST_PROCESS_MANAGER_PPSCALER=y # # Amlogic Camera Support # -CONFIG_VIDEO_AMLOGIC_CAPTURE=y -CONFIG_AMLOGIC_CAPTURE_FRAME_ROTATE=y -CONFIG_AMLOGIC_VM_DISABLE_VIDEOLAYER=y -# CONFIG_VIDEO_AMLOGIC_CAPTURE_PROBE is not set -# CONFIG_AMLCAP_LOG_TIME_USEFORFRAMES is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_GT2005 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_GC0307 is not set -CONFIG_VIDEO_AMLOGIC_CAPTURE_GC0308=y -CONFIG_VIDEO_AMLOGIC_CAPTURE_GC0328=y -# CONFIG_VIDEO_AMLOGIC_CAPTURE_GC0329 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_GC2015 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_GC2035 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_GC2155 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_OV5640 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_OV5642 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_OV7675 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_OV2655 is not set -CONFIG_VIDEO_AMLOGIC_CAPTURE_SP0838=y -CONFIG_VIDEO_AMLOGIC_CAPTURE_SP2518=y -# CONFIG_VIDEO_AMLOGIC_CAPTURE_SP0A19 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_SP1628 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_HI253 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_HI704 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_HM2057 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_HM5065 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_OV3660 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_OV5647 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_HI2056 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_NT99250 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_NT99252 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_NT99340 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_AR0543 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_AR0833 is not set -# CONFIG_VIDEO_AMLOGIC_CAPTURE_BF3720 is not set -CONFIG_AMLOGIC_VIDEOIN_MANAGER=y +# CONFIG_VIDEO_AMLOGIC_CAPTURE is not set # # V4L2 Video Support @@ -1881,10 +1847,12 @@ CONFIG_MII=y # CONFIG_NET_TEAM is not set CONFIG_MACVLAN=m CONFIG_MACVTAP=m -CONFIG_VXLAN=y -# CONFIG_NETCONSOLE is not set -# CONFIG_NETPOLL is not set -# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_VXLAN=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +# CONFIG_NETPOLL_TRAP is not set +CONFIG_NET_POLL_CONTROLLER=y CONFIG_TUN=y CONFIG_VETH=m @@ -2739,7 +2707,7 @@ CONFIG_VIDEO_V4L2=y # CONFIG_VIDEO_FIXED_MINOR_RANGES is not set CONFIG_VIDEO_TUNER=m CONFIG_VIDEOBUF_GEN=y -CONFIG_VIDEOBUF_VMALLOC=y +CONFIG_VIDEOBUF_VMALLOC=m CONFIG_VIDEOBUF_RESOURCE=y CONFIG_VIDEOBUF_DVB=m CONFIG_VIDEOBUF2_CORE=y diff --git a/patch/kernel/odroidc1-default/patch-3.10.104-105.patch b/patch/kernel/odroidc1-default/patch-3.10.104-105.patch new file mode 100644 index 000000000..048a0d420 --- /dev/null +++ b/patch/kernel/odroidc1-default/patch-3.10.104-105.patch @@ -0,0 +1,10747 @@ +diff --git a/Makefile b/Makefile +index f6a2cbd438a1..80e180e1e4a2 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 104 ++SUBLEVEL = 105 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h +index 766fdfde2b7a..6e9d27ad5103 100644 +--- a/arch/alpha/include/asm/uaccess.h ++++ b/arch/alpha/include/asm/uaccess.h +@@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len) + return __cu_len; + } + +-extern inline long +-__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate) +-{ +- if (__access_ok((unsigned long)validate, len, get_fs())) +- len = __copy_tofrom_user_nocheck(to, from, len); +- return len; +-} +- + #define __copy_to_user(to,from,n) \ + ({ \ + __chk_user_ptr(to); \ +@@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali + #define __copy_to_user_inatomic __copy_to_user + #define __copy_from_user_inatomic __copy_from_user + +- + extern inline long + copy_to_user(void __user *to, const void *from, long n) + { +- return __copy_tofrom_user((__force void *)to, from, n, to); ++ if (likely(__access_ok((unsigned long)to, n, get_fs()))) ++ n = __copy_tofrom_user_nocheck((__force void *)to, from, n); ++ return n; + } + + extern inline long + copy_from_user(void *to, const void __user *from, long n) + { +- return __copy_tofrom_user(to, (__force void *)from, n, from); ++ if (likely(__access_ok((unsigned long)from, n, get_fs()))) ++ n = __copy_tofrom_user_nocheck(to, (__force void *)from, n); ++ else ++ memset(to, 0, n); ++ return n; + } + + extern void __do_clear_user(void); +diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h +index 30c9baffa96f..08770c750696 100644 +--- a/arch/arc/include/asm/uaccess.h ++++ b/arch/arc/include/asm/uaccess.h +@@ -83,7 +83,10 @@ + "2: ;nop\n" \ + " .section .fixup, \"ax\"\n" \ + " .align 4\n" \ +- "3: mov %0, %3\n" \ ++ "3: # return -EFAULT\n" \ ++ " mov %0, %3\n" \ ++ " # zero out dst ptr\n" \ ++ " mov %1, 0\n" \ + " j 2b\n" \ + " .previous\n" \ + " .section __ex_table, \"a\"\n" \ +@@ -101,7 +104,11 @@ + "2: ;nop\n" \ + " .section .fixup, \"ax\"\n" \ + " .align 4\n" \ +- "3: mov %0, %3\n" \ ++ "3: # return -EFAULT\n" \ ++ " mov %0, %3\n" \ ++ " # zero out dst ptr\n" \ ++ " mov %1, 0\n" \ ++ " mov %R1, 0\n" \ + " j 2b\n" \ + " .previous\n" \ + " .section __ex_table, \"a\"\n" \ +diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c +index 6763654239a2..0823087dc9c0 100644 +--- a/arch/arc/kernel/signal.c ++++ b/arch/arc/kernel/signal.c +@@ -80,13 +80,14 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) + int err; + + err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); +- if (!err) +- set_current_blocked(&set); +- +- err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), ++ err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch), + sizeof(sf->uc.uc_mcontext.regs.scratch)); ++ if (err) ++ return err; + +- return err; ++ set_current_blocked(&set); ++ ++ return 0; + } + + static inline int is_do_ss_needed(unsigned int magic) +diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S +index 032a8d987148..9fef67ab1692 100644 +--- a/arch/arm/boot/compressed/head.S ++++ b/arch/arm/boot/compressed/head.S +@@ -715,7 +715,7 @@ __armv7_mmu_cache_on: + orrne r0, r0, #1 @ MMU enabled + movne r1, #0xfffffffd @ domain 0 = client + bic r6, r6, #1 << 31 @ 32-bit translation system +- bic r6, r6, #3 << 0 @ use only ttbr0 ++ bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0 + mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer + mcrne p15, 0, r1, c3, c0, 0 @ load domain access control + mcrne p15, 0, r6, c2, c0, 2 @ load ttb control +diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c +index e57d7e5bf96a..932125a20877 100644 +--- a/arch/arm/common/sa1111.c ++++ b/arch/arm/common/sa1111.c +@@ -872,9 +872,9 @@ struct sa1111_save_data { + + #ifdef CONFIG_PM + +-static int sa1111_suspend(struct platform_device *dev, pm_message_t state) ++static int sa1111_suspend_noirq(struct device *dev) + { +- struct sa1111 *sachip = platform_get_drvdata(dev); ++ struct sa1111 *sachip = dev_get_drvdata(dev); + struct sa1111_save_data *save; + unsigned long flags; + unsigned int val; +@@ -937,9 +937,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state) + * restored by their respective drivers, and must be called + * via LDM after this function. + */ +-static int sa1111_resume(struct platform_device *dev) ++static int sa1111_resume_noirq(struct device *dev) + { +- struct sa1111 *sachip = platform_get_drvdata(dev); ++ struct sa1111 *sachip = dev_get_drvdata(dev); + struct sa1111_save_data *save; + unsigned long flags, id; + void __iomem *base; +@@ -955,7 +955,7 @@ static int sa1111_resume(struct platform_device *dev) + id = sa1111_readl(sachip->base + SA1111_SKID); + if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { + __sa1111_remove(sachip); +- platform_set_drvdata(dev, NULL); ++ dev_set_drvdata(dev, NULL); + kfree(save); + return 0; + } +@@ -1006,8 +1006,8 @@ static int sa1111_resume(struct platform_device *dev) + } + + #else +-#define sa1111_suspend NULL +-#define sa1111_resume NULL ++#define sa1111_suspend_noirq NULL ++#define sa1111_resume_noirq NULL + #endif + + static int sa1111_probe(struct platform_device *pdev) +@@ -1041,6 +1041,11 @@ static int sa1111_remove(struct platform_device *pdev) + return 0; + } + ++static struct dev_pm_ops sa1111_pm_ops = { ++ .suspend_noirq = sa1111_suspend_noirq, ++ .resume_noirq = sa1111_resume_noirq, ++}; ++ + /* + * Not sure if this should be on the system bus or not yet. + * We really want some way to register a system device at +@@ -1053,11 +1058,10 @@ static int sa1111_remove(struct platform_device *pdev) + static struct platform_driver sa1111_device_driver = { + .probe = sa1111_probe, + .remove = sa1111_remove, +- .suspend = sa1111_suspend, +- .resume = sa1111_resume, + .driver = { + .name = "sa1111", + .owner = THIS_MODULE, ++ .pm = &sa1111_pm_ops, + }, + }; + +diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c +index 5859c8bc727c..c2a6d8432005 100644 +--- a/arch/arm/kernel/devtree.c ++++ b/arch/arm/kernel/devtree.c +@@ -90,6 +90,8 @@ void __init arm_dt_init_cpu_maps(void) + return; + + for_each_child_of_node(cpus, cpu) { ++ const __be32 *cell; ++ int prop_bytes; + u32 hwid; + + if (of_node_cmp(cpu->type, "cpu")) +@@ -101,17 +103,23 @@ void __init arm_dt_init_cpu_maps(void) + * properties is considered invalid to build the + * cpu_logical_map. + */ +- if (of_property_read_u32(cpu, "reg", &hwid)) { ++ cell = of_get_property(cpu, "reg", &prop_bytes); ++ if (!cell || prop_bytes < sizeof(*cell)) { + pr_debug(" * %s missing reg property\n", + cpu->full_name); + return; + } + + /* +- * 8 MSBs must be set to 0 in the DT since the reg property ++ * Bits n:24 must be set to 0 in the DT since the reg property + * defines the MPIDR[23:0]. + */ +- if (hwid & ~MPIDR_HWID_BITMASK) ++ do { ++ hwid = be32_to_cpu(*cell++); ++ prop_bytes -= sizeof(*cell); ++ } while (!hwid && prop_bytes > 0); ++ ++ if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK)) + return; + + /* +diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c +index 9db3e98e8b85..4983b1149ec2 100644 +--- a/arch/arm/mach-sa1100/generic.c ++++ b/arch/arm/mach-sa1100/generic.c +@@ -30,6 +30,7 @@ + + #include + #include ++#include + + #include "generic.h" + +@@ -133,6 +134,7 @@ static void sa1100_power_off(void) + + void sa11x0_restart(char mode, const char *cmd) + { ++ clear_reset_status(RESET_STATUS_ALL); + if (mode == 's') { + /* Jump into ROM at address 0 */ + soft_restart(0); +diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h +index fe32c0e4ac01..e647e6d7b875 100644 +--- a/arch/arm64/include/asm/elf.h ++++ b/arch/arm64/include/asm/elf.h +@@ -126,6 +126,7 @@ extern unsigned long randomize_et_dyn(unsigned long base); + + #define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT); + ++/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ + #define ARCH_DLINFO \ + do { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ +diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h +index 0defa0728a9b..c3cab6f87de4 100644 +--- a/arch/arm64/include/asm/spinlock.h ++++ b/arch/arm64/include/asm/spinlock.h +@@ -200,4 +200,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) + #define arch_read_relax(lock) cpu_relax() + #define arch_write_relax(lock) cpu_relax() + ++/* ++ * Accesses appearing in program order before a spin_lock() operation ++ * can be reordered with accesses inside the critical section, by virtue ++ * of arch_spin_lock being constructed using acquire semantics. ++ * ++ * In cases where this is problematic (e.g. try_to_wake_up), an ++ * smp_mb__before_spinlock() can restore the required ordering. ++ */ ++#define smp_mb__before_spinlock() smp_mb() ++ + #endif /* __ASM_SPINLOCK_H */ +diff --git a/arch/arm64/include/uapi/asm/auxvec.h b/arch/arm64/include/uapi/asm/auxvec.h +index 22d6d8885854..4cf0c17787a8 100644 +--- a/arch/arm64/include/uapi/asm/auxvec.h ++++ b/arch/arm64/include/uapi/asm/auxvec.h +@@ -19,4 +19,6 @@ + /* vDSO location */ + #define AT_SYSINFO_EHDR 33 + ++#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ ++ + #endif +diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c +index f4726dc054b3..49e6e3046105 100644 +--- a/arch/arm64/kernel/debug-monitors.c ++++ b/arch/arm64/kernel/debug-monitors.c +@@ -276,8 +276,10 @@ int kernel_active_single_step(void) + /* ptrace API */ + void user_enable_single_step(struct task_struct *task) + { +- set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); +- set_regs_spsr_ss(task_pt_regs(task)); ++ struct thread_info *ti = task_thread_info(task); ++ ++ if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP)) ++ set_regs_spsr_ss(task_pt_regs(task)); + } + + void user_disable_single_step(struct task_struct *task) +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S +index 7cd589ebca2a..5d515e629d0d 100644 +--- a/arch/arm64/kernel/entry.S ++++ b/arch/arm64/kernel/entry.S +@@ -490,7 +490,7 @@ el0_inv: + mov x0, sp + mov x1, #BAD_SYNC + mrs x2, esr_el1 +- b bad_mode ++ b bad_el0_sync + ENDPROC(el0_sync) + + .align 6 +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c +index f30852d28590..488a7b36d48e 100644 +--- a/arch/arm64/kernel/traps.c ++++ b/arch/arm64/kernel/traps.c +@@ -307,16 +307,33 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) + } + + /* +- * bad_mode handles the impossible case in the exception vector. ++ * bad_mode handles the impossible case in the exception vector. This is always ++ * fatal. + */ + asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) + { +- siginfo_t info; +- void __user *pc = (void __user *)instruction_pointer(regs); + console_verbose(); + + pr_crit("Bad mode in %s handler detected, code 0x%08x\n", + handler[reason], esr); ++ ++ die("Oops - bad mode", regs, 0); ++ local_irq_disable(); ++ panic("bad mode"); ++} ++ ++/* ++ * bad_el0_sync handles unexpected, but potentially recoverable synchronous ++ * exceptions taken from EL0. Unlike bad_mode, this returns. ++ */ ++asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) ++{ ++ siginfo_t info; ++ void __user *pc = (void __user *)instruction_pointer(regs); ++ console_verbose(); ++ ++ pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x\n", ++ smp_processor_id(), esr); + __show_regs(regs); + + info.si_signo = SIGILL; +@@ -324,7 +341,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) + info.si_code = ILL_ILLOPC; + info.si_addr = pc; + +- arm64_notify_die("Oops - bad mode", regs, &info, 0); ++ force_sig_info(info.si_signo, &info, current); + } + + void __pte_error(const char *file, int line, unsigned long val) +diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h +index 245b2ee213c9..a0a9b8c31041 100644 +--- a/arch/avr32/include/asm/uaccess.h ++++ b/arch/avr32/include/asm/uaccess.h +@@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from, + + extern __kernel_size_t copy_to_user(void __user *to, const void *from, + __kernel_size_t n); +-extern __kernel_size_t copy_from_user(void *to, const void __user *from, ++extern __kernel_size_t ___copy_from_user(void *to, const void __user *from, + __kernel_size_t n); + + static inline __kernel_size_t __copy_to_user(void __user *to, const void *from, +@@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to, + { + return __copy_user(to, (const void __force *)from, n); + } ++static inline __kernel_size_t copy_from_user(void *to, ++ const void __user *from, ++ __kernel_size_t n) ++{ ++ size_t res = ___copy_from_user(to, from, n); ++ if (unlikely(res)) ++ memset(to + (n - res), 0, res); ++ return res; ++} + + #define __copy_to_user_inatomic __copy_to_user + #define __copy_from_user_inatomic __copy_from_user +diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c +index d93ead02daed..7c6cf14f0985 100644 +--- a/arch/avr32/kernel/avr32_ksyms.c ++++ b/arch/avr32/kernel/avr32_ksyms.c +@@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page); + /* + * Userspace access stuff. + */ +-EXPORT_SYMBOL(copy_from_user); ++EXPORT_SYMBOL(___copy_from_user); + EXPORT_SYMBOL(copy_to_user); + EXPORT_SYMBOL(__copy_user); + EXPORT_SYMBOL(strncpy_from_user); +diff --git a/arch/avr32/lib/copy_user.S b/arch/avr32/lib/copy_user.S +index ea59c04b07de..075373471da1 100644 +--- a/arch/avr32/lib/copy_user.S ++++ b/arch/avr32/lib/copy_user.S +@@ -23,13 +23,13 @@ + */ + .text + .align 1 +- .global copy_from_user +- .type copy_from_user, @function +-copy_from_user: ++ .global ___copy_from_user ++ .type ___copy_from_user, @function ++___copy_from_user: + branch_if_kernel r8, __copy_user + ret_if_privileged r8, r11, r10, r10 + rjmp __copy_user +- .size copy_from_user, . - copy_from_user ++ .size ___copy_from_user, . - ___copy_from_user + + .global copy_to_user + .type copy_to_user, @function +diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c +index 903c7d81d0d5..a8e208eaf2a4 100644 +--- a/arch/avr32/mach-at32ap/pio.c ++++ b/arch/avr32/mach-at32ap/pio.c +@@ -435,7 +435,7 @@ void __init at32_init_pio(struct platform_device *pdev) + struct resource *regs; + struct pio_device *pio; + +- if (pdev->id > MAX_NR_PIO_DEVICES) { ++ if (pdev->id >= MAX_NR_PIO_DEVICES) { + dev_err(&pdev->dev, "only %d PIO devices supported\n", + MAX_NR_PIO_DEVICES); + return; +diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h +index 57701c3b8a59..a992a788409c 100644 +--- a/arch/blackfin/include/asm/uaccess.h ++++ b/arch/blackfin/include/asm/uaccess.h +@@ -177,11 +177,12 @@ static inline int bad_user_access_length(void) + static inline unsigned long __must_check + copy_from_user(void *to, const void __user *from, unsigned long n) + { +- if (access_ok(VERIFY_READ, from, n)) ++ if (likely(access_ok(VERIFY_READ, from, n))) { + memcpy(to, (const void __force *)from, n); +- else +- return n; +- return 0; ++ return 0; ++ } ++ memset(to, 0, n); ++ return n; + } + + static inline unsigned long __must_check +diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h +index 914540801c5e..93bfa8acc38b 100644 +--- a/arch/cris/include/asm/uaccess.h ++++ b/arch/cris/include/asm/uaccess.h +@@ -176,30 +176,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon + extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n); + extern unsigned long __do_clear_user(void __user *to, unsigned long n); + +-static inline unsigned long +-__generic_copy_to_user(void __user *to, const void *from, unsigned long n) +-{ +- if (access_ok(VERIFY_WRITE, to, n)) +- return __copy_user(to,from,n); +- return n; +-} +- +-static inline unsigned long +-__generic_copy_from_user(void *to, const void __user *from, unsigned long n) +-{ +- if (access_ok(VERIFY_READ, from, n)) +- return __copy_user_zeroing(to,from,n); +- return n; +-} +- +-static inline unsigned long +-__generic_clear_user(void __user *to, unsigned long n) +-{ +- if (access_ok(VERIFY_WRITE, to, n)) +- return __do_clear_user(to,n); +- return n; +-} +- + static inline long + __strncpy_from_user(char *dst, const char __user *src, long count) + { +@@ -262,7 +238,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n) + else if (n == 24) + __asm_copy_from_user_24(to, from, ret); + else +- ret = __generic_copy_from_user(to, from, n); ++ ret = __copy_user_zeroing(to, from, n); + + return ret; + } +@@ -312,7 +288,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) + else if (n == 24) + __asm_copy_to_user_24(to, from, ret); + else +- ret = __generic_copy_to_user(to, from, n); ++ ret = __copy_user(to, from, n); + + return ret; + } +@@ -344,26 +320,43 @@ __constant_clear_user(void __user *to, unsigned long n) + else if (n == 24) + __asm_clear_24(to, ret); + else +- ret = __generic_clear_user(to, n); ++ ret = __do_clear_user(to, n); + + return ret; + } + + +-#define clear_user(to, n) \ +-(__builtin_constant_p(n) ? \ +- __constant_clear_user(to, n) : \ +- __generic_clear_user(to, n)) ++static inline size_t clear_user(void __user *to, size_t n) ++{ ++ if (unlikely(!access_ok(VERIFY_WRITE, to, n))) ++ return n; ++ if (__builtin_constant_p(n)) ++ return __constant_clear_user(to, n); ++ else ++ return __do_clear_user(to, n); ++} + +-#define copy_from_user(to, from, n) \ +-(__builtin_constant_p(n) ? \ +- __constant_copy_from_user(to, from, n) : \ +- __generic_copy_from_user(to, from, n)) ++static inline size_t copy_from_user(void *to, const void __user *from, size_t n) ++{ ++ if (unlikely(!access_ok(VERIFY_READ, from, n))) { ++ memset(to, 0, n); ++ return n; ++ } ++ if (__builtin_constant_p(n)) ++ return __constant_copy_from_user(to, from, n); ++ else ++ return __copy_user_zeroing(to, from, n); ++} + +-#define copy_to_user(to, from, n) \ +-(__builtin_constant_p(n) ? \ +- __constant_copy_to_user(to, from, n) : \ +- __generic_copy_to_user(to, from, n)) ++static inline size_t copy_to_user(void __user *to, const void *from, size_t n) ++{ ++ if (unlikely(!access_ok(VERIFY_WRITE, to, n))) ++ return n; ++ if (__builtin_constant_p(n)) ++ return __constant_copy_to_user(to, from, n); ++ else ++ return __copy_user(to, from, n); ++} + + /* We let the __ versions of copy_from/to_user inline, because they're often + * used in fast paths and have only a small space overhead. +diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h +index 0b67ec5b4414..3a74137eeef8 100644 +--- a/arch/frv/include/asm/uaccess.h ++++ b/arch/frv/include/asm/uaccess.h +@@ -263,19 +263,25 @@ do { \ + extern long __memset_user(void *dst, unsigned long count); + extern long __memcpy_user(void *dst, const void *src, unsigned long count); + +-#define clear_user(dst,count) __memset_user(____force(dst), (count)) ++#define __clear_user(dst,count) __memset_user(____force(dst), (count)) + #define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n)) + #define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n)) + + #else + +-#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) ++#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) + #define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0) + #define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0) + + #endif + +-#define __clear_user clear_user ++static inline unsigned long __must_check ++clear_user(void __user *to, unsigned long n) ++{ ++ if (likely(__access_ok(to, n))) ++ n = __clear_user(to, n); ++ return n; ++} + + static inline unsigned long __must_check + __copy_to_user(void __user *to, const void *from, unsigned long n) +diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h +index e4127e4d6a5b..25fc9049db8a 100644 +--- a/arch/hexagon/include/asm/uaccess.h ++++ b/arch/hexagon/include/asm/uaccess.h +@@ -102,7 +102,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src, + { + long res = __strnlen_user(src, n); + +- /* return from strnlen can't be zero -- that would be rubbish. */ ++ if (unlikely(!res)) ++ return -EFAULT; + + if (res > n) { + copy_from_user(dst, src, n); +diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h +index 449c8c0fa2bd..810926c56e31 100644 +--- a/arch/ia64/include/asm/uaccess.h ++++ b/arch/ia64/include/asm/uaccess.h +@@ -262,17 +262,15 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) + __cu_len; \ + }) + +-#define copy_from_user(to, from, n) \ +-({ \ +- void *__cu_to = (to); \ +- const void __user *__cu_from = (from); \ +- long __cu_len = (n); \ +- \ +- __chk_user_ptr(__cu_from); \ +- if (__access_ok(__cu_from, __cu_len, get_fs())) \ +- __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ +- __cu_len; \ +-}) ++static inline unsigned long ++copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ if (likely(__access_ok(from, n, get_fs()))) ++ n = __copy_user((__force void __user *) to, from, n); ++ else ++ memset(to, 0, n); ++ return n; ++} + + #define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) + +diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h +index 1c7047bea200..a26d28d59ae6 100644 +--- a/arch/m32r/include/asm/uaccess.h ++++ b/arch/m32r/include/asm/uaccess.h +@@ -215,7 +215,7 @@ extern int fixup_exception(struct pt_regs *regs); + #define __get_user_nocheck(x,ptr,size) \ + ({ \ + long __gu_err = 0; \ +- unsigned long __gu_val; \ ++ unsigned long __gu_val = 0; \ + might_sleep(); \ + __get_user_size(__gu_val,(ptr),(size),__gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ +diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h +index 307ecd2bd9a1..d7d6b9e53e44 100644 +--- a/arch/metag/include/asm/atomic.h ++++ b/arch/metag/include/asm/atomic.h +@@ -38,6 +38,7 @@ + #define atomic_dec(v) atomic_sub(1, (v)) + + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) ++#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) + + #define smp_mb__before_atomic_dec() barrier() + #define smp_mb__after_atomic_dec() barrier() +@@ -46,8 +47,6 @@ + + #endif + +-#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) +- + #include + + #endif /* __ASM_METAG_ATOMIC_H */ +diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h +index 0748b0a97986..7841f2290385 100644 +--- a/arch/metag/include/asm/uaccess.h ++++ b/arch/metag/include/asm/uaccess.h +@@ -199,8 +199,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to, + static inline unsigned long + copy_from_user(void *to, const void __user *from, unsigned long n) + { +- if (access_ok(VERIFY_READ, from, n)) ++ if (likely(access_ok(VERIFY_READ, from, n))) + return __copy_user_zeroing(to, from, n); ++ memset(to, 0, n); + return n; + } + +diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h +index 04e49553bdf9..5488a1a71665 100644 +--- a/arch/microblaze/include/asm/uaccess.h ++++ b/arch/microblaze/include/asm/uaccess.h +@@ -226,7 +226,7 @@ extern long __user_bad(void); + + #define __get_user(x, ptr) \ + ({ \ +- unsigned long __gu_val; \ ++ unsigned long __gu_val = 0; \ + /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \ + long __gu_err; \ + switch (sizeof(*(ptr))) { \ +@@ -371,10 +371,13 @@ extern long __user_bad(void); + static inline long copy_from_user(void *to, + const void __user *from, unsigned long n) + { ++ unsigned long res = n; + might_sleep(); +- if (access_ok(VERIFY_READ, from, n)) +- return __copy_from_user(to, from, n); +- return n; ++ if (likely(access_ok(VERIFY_READ, from, n))) ++ res = __copy_from_user(to, from, n); ++ if (unlikely(res)) ++ memset(to + (n - res), 0, res); ++ return res; + } + + #define __copy_to_user(to, from, n) \ +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h +index 883a162083af..05863e3ee2e7 100644 +--- a/arch/mips/include/asm/kvm_host.h ++++ b/arch/mips/include/asm/kvm_host.h +@@ -375,7 +375,10 @@ struct kvm_vcpu_arch { + /* Host KSEG0 address of the EI/DI offset */ + void *kseg0_commpage; + +- u32 io_gpr; /* GPR used as IO source/target */ ++ /* Resume PC after MMIO completion */ ++ unsigned long io_pc; ++ /* GPR used as IO source/target */ ++ u32 io_gpr; + + /* Used to calibrate the virutal count register for the guest */ + int32_t host_cp0_count; +@@ -386,8 +389,6 @@ struct kvm_vcpu_arch { + /* Bitmask of pending exceptions to be cleared */ + unsigned long pending_exceptions_clr; + +- unsigned long pending_load_cause; +- + /* Save/Restore the entryhi register when are are preempted/scheduled back in */ + unsigned long preempt_entryhi; + +diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h +index 5e6cd0947393..a288de2199d8 100644 +--- a/arch/mips/include/asm/ptrace.h ++++ b/arch/mips/include/asm/ptrace.h +@@ -73,7 +73,7 @@ static inline int is_syscall_success(struct pt_regs *regs) + + static inline long regs_return_value(struct pt_regs *regs) + { +- if (is_syscall_success(regs)) ++ if (is_syscall_success(regs) || !user_mode(regs)) + return regs->regs[2]; + else + return -regs->regs[2]; +diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h +index f3fa3750f577..e09339df2232 100644 +--- a/arch/mips/include/asm/uaccess.h ++++ b/arch/mips/include/asm/uaccess.h +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + + /* + * The fs value determines whether argument validity checking should be +@@ -938,6 +939,8 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); + might_fault(); \ + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ + __cu_len); \ ++ } else { \ ++ memset(__cu_to, 0, __cu_len); \ + } \ + __cu_len; \ + }) +diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c +index 9f7643874fba..716285497e0e 100644 +--- a/arch/mips/kvm/kvm_mips_emul.c ++++ b/arch/mips/kvm/kvm_mips_emul.c +@@ -254,15 +254,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) + struct mips_coproc *cop0 = vcpu->arch.cop0; + enum emulation_result er = EMULATE_DONE; + +- if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { ++ if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { ++ kvm_clear_c0_guest_status(cop0, ST0_ERL); ++ vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); ++ } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { + kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, + kvm_read_c0_guest_epc(cop0)); + kvm_clear_c0_guest_status(cop0, ST0_EXL); + vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); + +- } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { +- kvm_clear_c0_guest_status(cop0, ST0_ERL); +- vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); + } else { + printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", + vcpu->arch.pc); +@@ -310,6 +310,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) + return er; + } + ++/** ++ * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. ++ * @vcpu: VCPU with changed mappings. ++ * @tlb: TLB entry being removed. ++ * ++ * This is called to indicate a single change in guest MMU mappings, so that we ++ * can arrange TLB flushes on this and other CPUs. ++ */ ++static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, ++ struct kvm_mips_tlb *tlb) ++{ ++ int cpu, i; ++ bool user; ++ ++ /* No need to flush for entries which are already invalid */ ++ if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V)) ++ return; ++ /* User address space doesn't need flushing for KSeg2/3 changes */ ++ user = tlb->tlb_hi < KVM_GUEST_KSEG0; ++ ++ preempt_disable(); ++ ++ /* ++ * Probe the shadow host TLB for the entry being overwritten, if one ++ * matches, invalidate it ++ */ ++ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); ++ ++ /* Invalidate the whole ASID on other CPUs */ ++ cpu = smp_processor_id(); ++ for_each_possible_cpu(i) { ++ if (i == cpu) ++ continue; ++ if (user) ++ vcpu->arch.guest_user_asid[i] = 0; ++ vcpu->arch.guest_kernel_asid[i] = 0; ++ } ++ ++ preempt_enable(); ++} ++ + /* Write Guest TLB Entry @ Index */ + enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) + { +@@ -331,10 +372,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) + } + + tlb = &vcpu->arch.guest_tlb[index]; +-#if 1 +- /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ +- kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); +-#endif ++ ++ kvm_mips_invalidate_guest_tlb(vcpu, tlb); + + tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); + tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); +@@ -373,10 +412,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) + + tlb = &vcpu->arch.guest_tlb[index]; + +-#if 1 +- /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ +- kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); +-#endif ++ kvm_mips_invalidate_guest_tlb(vcpu, tlb); + + tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); + tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); +@@ -419,6 +455,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, + int32_t rt, rd, copz, sel, co_bit, op; + uint32_t pc = vcpu->arch.pc; + unsigned long curr_pc; ++ int cpu, i; + + /* + * Update PC and hold onto current PC in case there is +@@ -538,8 +575,16 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, + ASID_MASK, + vcpu->arch.gprs[rt] & ASID_MASK); + ++ preempt_disable(); + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); ++ cpu = smp_processor_id(); ++ for_each_possible_cpu(i) ++ if (i != cpu) { ++ vcpu->arch.guest_user_asid[i] = 0; ++ vcpu->arch.guest_kernel_asid[i] = 0; ++ } ++ preempt_enable(); + } + kvm_write_c0_guest_entryhi(cop0, + vcpu->arch.gprs[rt]); +@@ -773,6 +818,7 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause, + struct kvm_run *run, struct kvm_vcpu *vcpu) + { + enum emulation_result er = EMULATE_DO_MMIO; ++ unsigned long curr_pc; + int32_t op, base, rt, offset; + uint32_t bytes; + +@@ -781,7 +827,18 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause, + offset = inst & 0xffff; + op = (inst >> 26) & 0x3f; + +- vcpu->arch.pending_load_cause = cause; ++ /* ++ * Find the resume PC now while we have safe and easy access to the ++ * prior branch instruction, and save it for ++ * kvm_mips_complete_mmio_load() to restore later. ++ */ ++ curr_pc = vcpu->arch.pc; ++ er = update_pc(vcpu, cause); ++ if (er == EMULATE_FAIL) ++ return er; ++ vcpu->arch.io_pc = vcpu->arch.pc; ++ vcpu->arch.pc = curr_pc; ++ + vcpu->arch.io_gpr = rt; + + switch (op) { +@@ -1610,7 +1667,6 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) + { + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; + enum emulation_result er = EMULATE_DONE; +- unsigned long curr_pc; + + if (run->mmio.len > sizeof(*gpr)) { + printk("Bad MMIO length: %d", run->mmio.len); +@@ -1618,14 +1674,8 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) + goto done; + } + +- /* +- * Update PC and hold onto current PC in case there is +- * an error and we want to rollback the PC +- */ +- curr_pc = vcpu->arch.pc; +- er = update_pc(vcpu, vcpu->arch.pending_load_cause); +- if (er == EMULATE_FAIL) +- return er; ++ /* Restore saved resume PC */ ++ vcpu->arch.pc = vcpu->arch.io_pc; + + switch (run->mmio.len) { + case 4: +@@ -1647,12 +1697,6 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) + break; + } + +- if (vcpu->arch.pending_load_cause & CAUSEF_BD) +- kvm_debug +- ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", +- vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, +- vcpu->mmio_needed); +- + done: + return er; + } +diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c +index c72a06936781..2046e1c385d4 100644 +--- a/arch/mips/mti-malta/malta-setup.c ++++ b/arch/mips/mti-malta/malta-setup.c +@@ -36,6 +36,9 @@ + #include + #endif + ++#define ROCIT_CONFIG_GEN0 0x1f403000 ++#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7) ++ + extern void malta_be_init(void); + extern int malta_be_handler(struct pt_regs *regs, int is_fixup); + +@@ -108,6 +111,8 @@ static void __init fd_activate(void) + static int __init plat_enable_iocoherency(void) + { + int supported = 0; ++ u32 cfg; ++ + if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { + if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { + BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; +@@ -130,7 +135,8 @@ static int __init plat_enable_iocoherency(void) + } else if (gcmp_niocu() != 0) { + /* Nothing special needs to be done to enable coherency */ + pr_info("CMP IOCU detected\n"); +- if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) { ++ cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0)); ++ if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) { + pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); + return 0; + } +diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h +index d7966e0f7698..b9855e4f0ccd 100644 +--- a/arch/mn10300/include/asm/uaccess.h ++++ b/arch/mn10300/include/asm/uaccess.h +@@ -181,6 +181,7 @@ struct __large_struct { unsigned long buf[100]; }; + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + "3:\n\t" \ ++ " mov 0,%1\n" \ + " mov %3,%0\n" \ + " jmp 2b\n" \ + " .previous\n" \ +diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c +index 7826e6c364e7..ce8899e5e171 100644 +--- a/arch/mn10300/lib/usercopy.c ++++ b/arch/mn10300/lib/usercopy.c +@@ -9,7 +9,7 @@ + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +-#include ++#include + + unsigned long + __generic_copy_to_user(void *to, const void *from, unsigned long n) +@@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) + { + if (access_ok(VERIFY_READ, from, n)) + __copy_user_zeroing(to, from, n); ++ else ++ memset(to, 0, n); + return n; + } + +diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h +index ab2e7a198a4c..d441480a4af4 100644 +--- a/arch/openrisc/include/asm/uaccess.h ++++ b/arch/openrisc/include/asm/uaccess.h +@@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size); + static inline unsigned long + copy_from_user(void *to, const void *from, unsigned long n) + { +- unsigned long over; +- +- if (access_ok(VERIFY_READ, from, n)) +- return __copy_tofrom_user(to, from, n); +- if ((unsigned long)from < TASK_SIZE) { +- over = (unsigned long)from + n - TASK_SIZE; +- return __copy_tofrom_user(to, from, n - over) + over; +- } +- return n; ++ unsigned long res = n; ++ ++ if (likely(access_ok(VERIFY_READ, from, n))) ++ res = __copy_tofrom_user(to, from, n); ++ if (unlikely(res)) ++ memset(to + (n - res), 0, res); ++ return res; + } + + static inline unsigned long + copy_to_user(void *to, const void *from, unsigned long n) + { +- unsigned long over; +- +- if (access_ok(VERIFY_WRITE, to, n)) +- return __copy_tofrom_user(to, from, n); +- if ((unsigned long)to < TASK_SIZE) { +- over = (unsigned long)to + n - TASK_SIZE; +- return __copy_tofrom_user(to, from, n - over) + over; +- } ++ if (likely(access_ok(VERIFY_WRITE, to, n))) ++ n = __copy_tofrom_user(to, from, n); + return n; + } + +@@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size); + static inline __must_check unsigned long + clear_user(void *addr, unsigned long size) + { +- +- if (access_ok(VERIFY_WRITE, addr, size)) +- return __clear_user(addr, size); +- if ((unsigned long)addr < TASK_SIZE) { +- unsigned long over = (unsigned long)addr + size - TASK_SIZE; +- return __clear_user(addr, size - over) + over; +- } ++ if (likely(access_ok(VERIFY_WRITE, addr, size))) ++ size = __clear_user(addr, size); + return size; + } + +diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h +index e0a82358517e..9bbddafb0da3 100644 +--- a/arch/parisc/include/asm/uaccess.h ++++ b/arch/parisc/include/asm/uaccess.h +@@ -9,6 +9,8 @@ + #include + #include + ++#include ++ + #define VERIFY_READ 0 + #define VERIFY_WRITE 1 + +@@ -246,13 +248,14 @@ static inline unsigned long __must_check copy_from_user(void *to, + unsigned long n) + { + int sz = __compiletime_object_size(to); +- int ret = -EFAULT; ++ unsigned long ret = n; + + if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n)) + ret = __copy_from_user(to, from, n); + else + copy_from_user_overflow(); +- ++ if (unlikely(ret)) ++ memset(to + (n - ret), 0, ret); + return ret; + } + +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S +index e767ab733e32..69caa82c50d3 100644 +--- a/arch/parisc/kernel/syscall.S ++++ b/arch/parisc/kernel/syscall.S +@@ -106,8 +106,6 @@ linux_gateway_entry: + mtsp %r0,%sr4 /* get kernel space into sr4 */ + mtsp %r0,%sr5 /* get kernel space into sr5 */ + mtsp %r0,%sr6 /* get kernel space into sr6 */ +- mfsp %sr7,%r1 /* save user sr7 */ +- mtsp %r1,%sr3 /* and store it in sr3 */ + + #ifdef CONFIG_64BIT + /* for now we can *always* set the W bit on entry to the syscall +@@ -133,6 +131,14 @@ linux_gateway_entry: + depdi 0, 31, 32, %r21 + 1: + #endif ++ ++ /* We use a rsm/ssm pair to prevent sr3 from being clobbered ++ * by external interrupts. ++ */ ++ mfsp %sr7,%r1 /* save user sr7 */ ++ rsm PSW_SM_I, %r0 /* disable interrupts */ ++ mtsp %r1,%sr3 /* and store it in sr3 */ ++ + mfctl %cr30,%r1 + xor %r1,%r30,%r30 /* ye olde xor trick */ + xor %r1,%r30,%r1 +@@ -147,6 +153,7 @@ linux_gateway_entry: + */ + + mtsp %r0,%sr7 /* get kernel space into sr7 */ ++ ssm PSW_SM_I, %r0 /* enable interrupts */ + STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */ + mfctl %cr30,%r1 /* get task ptr in %r1 */ + LDREG TI_TASK(%r1),%r1 +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h +index 4db49590acf5..1d47060f488b 100644 +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -323,30 +323,17 @@ extern unsigned long __copy_tofrom_user(void __user *to, + static inline unsigned long copy_from_user(void *to, + const void __user *from, unsigned long n) + { +- unsigned long over; +- +- if (access_ok(VERIFY_READ, from, n)) ++ if (likely(access_ok(VERIFY_READ, from, n))) + return __copy_tofrom_user((__force void __user *)to, from, n); +- if ((unsigned long)from < TASK_SIZE) { +- over = (unsigned long)from + n - TASK_SIZE; +- return __copy_tofrom_user((__force void __user *)to, from, +- n - over) + over; +- } ++ memset(to, 0, n); + return n; + } + + static inline unsigned long copy_to_user(void __user *to, + const void *from, unsigned long n) + { +- unsigned long over; +- + if (access_ok(VERIFY_WRITE, to, n)) + return __copy_tofrom_user(to, (__force void __user *)from, n); +- if ((unsigned long)to < TASK_SIZE) { +- over = (unsigned long)to + n - TASK_SIZE; +- return __copy_tofrom_user(to, (__force void __user *)from, +- n - over) + over; +- } + return n; + } + +@@ -437,10 +424,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size) + might_sleep(); + if (likely(access_ok(VERIFY_WRITE, addr, size))) + return __clear_user(addr, size); +- if ((unsigned long)addr < TASK_SIZE) { +- unsigned long over = (unsigned long)addr + size - TASK_SIZE; +- return __clear_user(addr, size - over) + over; +- } + return size; + } + +diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c +index 48fbc2b97e95..4047d8a035f7 100644 +--- a/arch/powerpc/kernel/nvram_64.c ++++ b/arch/powerpc/kernel/nvram_64.c +@@ -280,7 +280,7 @@ int __init nvram_remove_partition(const char *name, int sig, + + /* Make partition a free partition */ + part->header.signature = NVRAM_SIG_FREE; +- strncpy(part->header.name, "wwwwwwwwwwww", 12); ++ memset(part->header.name, 'w', 12); + part->header.checksum = nvram_checksum(&part->header); + rc = nvram_write_header(part); + if (rc <= 0) { +@@ -298,8 +298,8 @@ int __init nvram_remove_partition(const char *name, int sig, + } + if (prev) { + prev->header.length += part->header.length; +- prev->header.checksum = nvram_checksum(&part->header); +- rc = nvram_write_header(part); ++ prev->header.checksum = nvram_checksum(&prev->header); ++ rc = nvram_write_header(prev); + if (rc <= 0) { + printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc); + return rc; +diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S +index 79796de11737..3263ee23170d 100644 +--- a/arch/powerpc/kernel/vdso64/datapage.S ++++ b/arch/powerpc/kernel/vdso64/datapage.S +@@ -57,7 +57,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) + bl V_LOCAL_FUNC(__get_datapage) + mtlr r12 + addi r3,r3,CFG_SYSCALL_MAP64 +- cmpli cr0,r4,0 ++ cmpldi cr0,r4,0 + crclr cr0*4+so + beqlr + li r0,__NR_syscalls +diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S +index a76b4af37ef2..382021324883 100644 +--- a/arch/powerpc/kernel/vdso64/gettimeofday.S ++++ b/arch/powerpc/kernel/vdso64/gettimeofday.S +@@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) + bne cr0,99f + + li r3,0 +- cmpli cr0,r4,0 ++ cmpldi cr0,r4,0 + crclr cr0*4+so + beqlr + lis r5,CLOCK_REALTIME_RES@h +diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S +index d73a59014900..be94e1be4ae3 100644 +--- a/arch/powerpc/lib/copyuser_64.S ++++ b/arch/powerpc/lib/copyuser_64.S +@@ -336,6 +336,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) + addi r3,r3,8 + 171: + 177: ++179: + addi r3,r3,8 + 370: + 372: +@@ -350,7 +351,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) + 173: + 174: + 175: +-179: + 181: + 184: + 186: +diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S +index 17aa6dfceb34..e507f5e733f3 100644 +--- a/arch/powerpc/mm/slb_low.S ++++ b/arch/powerpc/mm/slb_low.S +@@ -110,7 +110,12 @@ BEGIN_FTR_SECTION + END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) + b slb_finish_load_1T + +-0: ++0: /* ++ * For userspace addresses, make sure this is region 0. ++ */ ++ cmpdi r9, 0 ++ bne 8f ++ + /* when using slices, we extract the psize off the slice bitmaps + * and then we need to get the sllp encoding off the mmu_psize_defs + * array. +diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c +index 0473d31b3a4d..d93c6cab18bf 100644 +--- a/arch/powerpc/platforms/powernv/pci.c ++++ b/arch/powerpc/platforms/powernv/pci.c +@@ -176,8 +176,8 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb) + pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1); + + for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { +- if ((data->pestA[i] >> 63) == 0 && +- (data->pestB[i] >> 63) == 0) ++ if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 && ++ (be64_to_cpu(data->pestB[i]) >> 63) == 0) + continue; + pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]); + pr_info(" PESTB = 0x%016llx\n", data->pestB[i]); +diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h +index 9c33ed4e666f..b6017ace1515 100644 +--- a/arch/s390/include/asm/uaccess.h ++++ b/arch/s390/include/asm/uaccess.h +@@ -164,28 +164,28 @@ extern int __put_user_bad(void) __attribute__((noreturn)); + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: { \ +- unsigned char __x; \ ++ unsigned char __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 2: { \ +- unsigned short __x; \ ++ unsigned short __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 4: { \ +- unsigned int __x; \ ++ unsigned int __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 8: { \ +- unsigned long long __x; \ ++ unsigned long long __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ +diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h +index ab66ddde777b..69326dfb894d 100644 +--- a/arch/score/include/asm/uaccess.h ++++ b/arch/score/include/asm/uaccess.h +@@ -158,7 +158,7 @@ do { \ + __get_user_asm(val, "lw", ptr); \ + break; \ + case 8: \ +- if ((copy_from_user((void *)&val, ptr, 8)) == 0) \ ++ if (__copy_from_user((void *)&val, ptr, 8) == 0) \ + __gu_err = 0; \ + else \ + __gu_err = -EFAULT; \ +@@ -183,6 +183,8 @@ do { \ + \ + if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ + __get_user_common((x), size, __gu_ptr); \ ++ else \ ++ (x) = 0; \ + \ + __gu_err; \ + }) +@@ -196,6 +198,7 @@ do { \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:li %0, %4\n" \ ++ "li %1, 0\n" \ + "j 2b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ +@@ -293,35 +296,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len); + static inline unsigned long + copy_from_user(void *to, const void *from, unsigned long len) + { +- unsigned long over; ++ unsigned long res = len; + +- if (access_ok(VERIFY_READ, from, len)) +- return __copy_tofrom_user(to, from, len); ++ if (likely(access_ok(VERIFY_READ, from, len))) ++ res = __copy_tofrom_user(to, from, len); + +- if ((unsigned long)from < TASK_SIZE) { +- over = (unsigned long)from + len - TASK_SIZE; +- return __copy_tofrom_user(to, from, len - over) + over; +- } +- return len; ++ if (unlikely(res)) ++ memset(to + (len - res), 0, res); ++ ++ return res; + } + + static inline unsigned long + copy_to_user(void *to, const void *from, unsigned long len) + { +- unsigned long over; +- +- if (access_ok(VERIFY_WRITE, to, len)) +- return __copy_tofrom_user(to, from, len); ++ if (likely(access_ok(VERIFY_WRITE, to, len))) ++ len = __copy_tofrom_user(to, from, len); + +- if ((unsigned long)to < TASK_SIZE) { +- over = (unsigned long)to + len - TASK_SIZE; +- return __copy_tofrom_user(to, from, len - over) + over; +- } + return len; + } + +-#define __copy_from_user(to, from, len) \ +- __copy_tofrom_user((to), (from), (len)) ++static inline unsigned long ++__copy_from_user(void *to, const void *from, unsigned long len) ++{ ++ unsigned long left = __copy_tofrom_user(to, from, len); ++ if (unlikely(left)) ++ memset(to + (len - left), 0, left); ++ return left; ++} + + #define __copy_to_user(to, from, len) \ + __copy_tofrom_user((to), (from), (len)) +@@ -335,17 +337,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len) + static inline unsigned long + __copy_from_user_inatomic(void *to, const void *from, unsigned long len) + { +- return __copy_from_user(to, from, len); ++ return __copy_tofrom_user(to, from, len); + } + +-#define __copy_in_user(to, from, len) __copy_from_user(to, from, len) ++#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len) + + static inline unsigned long + copy_in_user(void *to, const void *from, unsigned long len) + { + if (access_ok(VERIFY_READ, from, len) && + access_ok(VERFITY_WRITE, to, len)) +- return copy_from_user(to, from, len); ++ return __copy_tofrom_user(to, from, len); + } + + /* +diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h +index 9486376605f4..c04cc18ae9cd 100644 +--- a/arch/sh/include/asm/uaccess.h ++++ b/arch/sh/include/asm/uaccess.h +@@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n) + __kernel_size_t __copy_size = (__kernel_size_t) n; + + if (__copy_size && __access_ok(__copy_from, __copy_size)) +- return __copy_user(to, from, __copy_size); ++ __copy_size = __copy_user(to, from, __copy_size); ++ ++ if (unlikely(__copy_size)) ++ memset(to + (n - __copy_size), 0, __copy_size); + + return __copy_size; + } +diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h +index 2e07e0f40c6a..a2f9d0531328 100644 +--- a/arch/sh/include/asm/uaccess_64.h ++++ b/arch/sh/include/asm/uaccess_64.h +@@ -24,6 +24,7 @@ + #define __get_user_size(x,ptr,size,retval) \ + do { \ + retval = 0; \ ++ x = 0; \ + switch (size) { \ + case 1: \ + retval = __get_user_asm_b((void *)&x, \ +diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h +index 53a28dd59f59..01f602858de1 100644 +--- a/arch/sparc/include/asm/uaccess_32.h ++++ b/arch/sparc/include/asm/uaccess_32.h +@@ -265,8 +265,10 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un + { + if (n && __access_ok((unsigned long) from, n)) + return __copy_user((__force void __user *) to, from, n); +- else ++ else { ++ memset(to, 0, n); + return n; ++ } + } + + static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) +diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c +index 5ac397ec6986..9df6d0d6d187 100644 +--- a/arch/tile/kernel/time.c ++++ b/arch/tile/kernel/time.c +@@ -215,8 +215,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num) + */ + unsigned long long sched_clock(void) + { +- return clocksource_cyc2ns(get_cycles(), +- sched_clock_mult, SCHED_CLOCK_SHIFT); ++ return mult_frac(get_cycles(), ++ sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT); + } + + int setup_profiling_timer(unsigned int multiplier) +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index 7194d9f094bc..349cf190d236 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -7,7 +7,7 @@ + targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo + + KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 +-KBUILD_CFLAGS += -fno-strict-aliasing -fPIC ++KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC) + KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING + cflags-$(CONFIG_X86_32) := -march=i386 + cflags-$(CONFIG_X86_64) := -mcmodel=small +@@ -20,6 +20,18 @@ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n + + LDFLAGS := -m elf_$(UTS_MACHINE) ++ifeq ($(CONFIG_RELOCATABLE),y) ++# If kernel is relocatable, build compressed kernel as PIE. ++ifeq ($(CONFIG_X86_32),y) ++LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) ++else ++# To build 64-bit compressed kernel as PIE, we disable relocation ++# overflow check to avoid relocation overflow error with a new linker ++# command-line option, -z noreloc-overflow. ++LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ ++ && echo "-z noreloc-overflow -pie --no-dynamic-linker") ++endif ++endif + LDFLAGS_vmlinux := -T + + hostprogs-y := mkpiggy +diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S +index 3b28eff9b90b..104d7e46a6c2 100644 +--- a/arch/x86/boot/compressed/head_32.S ++++ b/arch/x86/boot/compressed/head_32.S +@@ -30,6 +30,34 @@ + #include + #include + ++/* ++ * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X ++ * relocation to get the symbol address in PIC. When the compressed x86 ++ * kernel isn't built as PIC, the linker optimizes R_386_GOT32X ++ * relocations to their fixed symbol addresses. However, when the ++ * compressed x86 kernel is loaded at a different address, it leads ++ * to the following load failure: ++ * ++ * Failed to allocate space for phdrs ++ * ++ * during the decompression stage. ++ * ++ * If the compressed x86 kernel is relocatable at run-time, it should be ++ * compiled with -fPIE, instead of -fPIC, if possible and should be built as ++ * Position Independent Executable (PIE) so that linker won't optimize ++ * R_386_GOT32X relocation to its fixed symbol address. Older ++ * linkers generate R_386_32 relocations against locally defined symbols, ++ * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less ++ * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle ++ * R_386_32 relocations when relocating the kernel. To generate ++ * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as ++ * hidden: ++ */ ++ .hidden _bss ++ .hidden _ebss ++ .hidden _got ++ .hidden _egot ++ + __HEAD + ENTRY(startup_32) + #ifdef CONFIG_EFI_STUB +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S +index 92059b8f3f7b..6ac508a75ae5 100644 +--- a/arch/x86/boot/compressed/head_64.S ++++ b/arch/x86/boot/compressed/head_64.S +@@ -34,6 +34,14 @@ + #include + #include + ++/* ++ * Locally defined symbols should be marked hidden: ++ */ ++ .hidden _bss ++ .hidden _ebss ++ .hidden _got ++ .hidden _egot ++ + __HEAD + .code32 + ENTRY(startup_32) +diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h +index 68c05398bba9..7aadd3cea843 100644 +--- a/arch/x86/include/asm/hugetlb.h ++++ b/arch/x86/include/asm/hugetlb.h +@@ -4,6 +4,7 @@ + #include + #include + ++#define hugepages_supported() cpu_has_pse + + static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h +index 50a7fc0f824a..fb3285805beb 100644 +--- a/arch/x86/include/asm/tlbflush.h ++++ b/arch/x86/include/asm/tlbflush.h +@@ -17,7 +17,14 @@ + + static inline void __native_flush_tlb(void) + { ++ /* ++ * If current->mm == NULL then we borrow a mm which may change during a ++ * task switch and therefore we must not be preempted while we write CR3 ++ * back: ++ */ ++ preempt_disable(); + native_write_cr3(native_read_cr3()); ++ preempt_enable(); + } + + static inline void __native_flush_tlb_global_irq_disabled(void) +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h +index 5ee26875baea..995c49aa1a19 100644 +--- a/arch/x86/include/asm/uaccess.h ++++ b/arch/x86/include/asm/uaccess.h +@@ -381,7 +381,7 @@ do { \ + asm volatile("1: mov"itype" %1,%"rtype"0\n" \ + "2:\n" \ + _ASM_EXTABLE_EX(1b, 2b) \ +- : ltype(x) : "m" (__m(addr))) ++ : ltype(x) : "m" (__m(addr)), "0" (0)) + + #define __put_user_nocheck(x, ptr, size) \ + ({ \ +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 9620d18cb638..3cd8bfc3c4b6 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -1581,6 +1581,9 @@ void __init enable_IR_x2apic(void) + int ret, x2apic_enabled = 0; + int hardware_init_ret; + ++ if (skip_ioapic_setup) ++ return; ++ + /* Make sure irq_remap_ops are initialized */ + setup_irq_remapping_ops(); + +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S +index 8060c8b95b3a..b7e330c57a49 100644 +--- a/arch/x86/kernel/head_32.S ++++ b/arch/x86/kernel/head_32.S +@@ -586,7 +586,7 @@ early_idt_handler_common: + movl %eax,%ds + movl %eax,%es + +- cmpl $(__KERNEL_CS),32(%esp) ++ cmpw $(__KERNEL_CS),32(%esp) + jne 10f + + leal 28(%esp),%eax # Pointer to %eip +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c +index cd6de64cc480..8baf3acd7074 100644 +--- a/arch/x86/kernel/paravirt.c ++++ b/arch/x86/kernel/paravirt.c +@@ -46,12 +46,12 @@ void _paravirt_nop(void) + } + + /* identity function, which can be inlined */ +-u32 _paravirt_ident_32(u32 x) ++u32 notrace _paravirt_ident_32(u32 x) + { + return x; + } + +-u64 _paravirt_ident_64(u64 x) ++u64 notrace _paravirt_ident_64(u64 x) + { + return x; + } +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 335fe70967a8..7e9ca58ae875 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -366,6 +366,7 @@ struct nested_vmx { + struct list_head vmcs02_pool; + int vmcs02_num; + u64 vmcs01_tsc_offset; ++ bool change_vmcs01_virtual_x2apic_mode; + /* L2 must run next, and mustn't decide to exit to L1. */ + bool nested_run_pending; + /* +@@ -6702,6 +6703,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) + { + u32 sec_exec_control; + ++ /* Postpone execution until vmcs01 is the current VMCS. */ ++ if (is_guest_mode(vcpu)) { ++ to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true; ++ return; ++ } ++ + /* + * There is not point to enable virtualize x2apic without enable + * apicv +@@ -8085,6 +8092,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu) + /* Update TSC_OFFSET if TSC was changed while L2 ran */ + vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); + ++ if (vmx->nested.change_vmcs01_virtual_x2apic_mode) { ++ vmx->nested.change_vmcs01_virtual_x2apic_mode = false; ++ vmx_set_virtual_x2apic_mode(vcpu, ++ vcpu->arch.apic_base & X2APIC_ENABLE); ++ } ++ + /* This is needed for same reason as it was needed in prepare_vmcs02 */ + vmx->host_rsp = 0; + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 8e57771d4bfd..b70b67bde90d 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -178,7 +178,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn) + struct kvm_shared_msrs *locals + = container_of(urn, struct kvm_shared_msrs, urn); + struct kvm_shared_msr_values *values; ++ unsigned long flags; + ++ /* ++ * Disabling irqs at this point since the following code could be ++ * interrupted and executed through kvm_arch_hardware_disable() ++ */ ++ local_irq_save(flags); ++ if (locals->registered) { ++ locals->registered = false; ++ user_return_notifier_unregister(urn); ++ } ++ local_irq_restore(flags); + for (slot = 0; slot < shared_msrs_global.nr; ++slot) { + values = &locals->values[slot]; + if (values->host != values->curr) { +@@ -186,8 +197,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn) + values->curr = values->host; + } + } +- locals->registered = false; +- user_return_notifier_unregister(urn); + } + + static void shared_msr_update(unsigned slot, u32 msr) +@@ -3182,6 +3191,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, + }; + case KVM_SET_VAPIC_ADDR: { + struct kvm_vapic_addr va; ++ int idx; + + r = -EINVAL; + if (!irqchip_in_kernel(vcpu->kvm)) +@@ -3189,7 +3199,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, + r = -EFAULT; + if (copy_from_user(&va, argp, sizeof va)) + goto out; ++ idx = srcu_read_lock(&vcpu->kvm->srcu); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); ++ srcu_read_unlock(&vcpu->kvm->srcu, idx); + break; + } + case KVM_X86_SETUP_MCE: { +@@ -6509,11 +6521,13 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) + + void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) + { ++ void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; ++ + kvmclock_reset(vcpu); + +- free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); + fx_free(vcpu); + kvm_x86_ops->vcpu_free(vcpu); ++ free_cpumask_var(wbinvd_dirty_mask); + } + + struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, +diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c +index 657438858e83..7f0c8da7ecea 100644 +--- a/arch/x86/mm/pat.c ++++ b/arch/x86/mm/pat.c +@@ -505,11 +505,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) + return 1; + + while (cursor < to) { +- if (!devmem_is_allowed(pfn)) { +- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n", +- current->comm, from, to - 1); ++ if (!devmem_is_allowed(pfn)) + return 0; +- } + cursor += PAGE_SIZE; + pfn++; + } +diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h +index 7d01b8c56c00..1da6bb44f94f 100644 +--- a/arch/x86/um/asm/barrier.h ++++ b/arch/x86/um/asm/barrier.h +@@ -51,11 +51,7 @@ + + #else /* CONFIG_SMP */ + +-#define smp_mb() barrier() +-#define smp_rmb() barrier() +-#define smp_wmb() barrier() +-#define smp_read_barrier_depends() do { } while (0) +-#define set_mb(var, value) do { var = value; barrier(); } while (0) ++#include + + #endif /* CONFIG_SMP */ + +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c +index fdc3ba28ca38..53b061c9ad7e 100644 +--- a/arch/x86/xen/mmu.c ++++ b/arch/x86/xen/mmu.c +@@ -1187,7 +1187,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr, + + /* NOTE: The loop is more greedy than the cleanup_highmap variant. + * We include the PMD passed in on _both_ boundaries. */ +- for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); ++ for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD)); + pmd++, vaddr += PMD_SIZE) { + if (pmd_none(*pmd)) + continue; +diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c +index 69111c5c352c..ddb0ebb89f47 100644 +--- a/block/cfq-iosched.c ++++ b/block/cfq-iosched.c +@@ -2812,7 +2812,6 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) + if (time_before(jiffies, rq_fifo_time(rq))) + rq = NULL; + +- cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); + return rq; + } + +@@ -3186,6 +3185,9 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) + { + unsigned int max_dispatch; + ++ if (cfq_cfqq_must_dispatch(cfqq)) ++ return true; ++ + /* + * Drain async requests before we start sync IO + */ +@@ -3277,15 +3279,20 @@ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) + + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); + ++ rq = cfq_check_fifo(cfqq); ++ if (rq) ++ cfq_mark_cfqq_must_dispatch(cfqq); ++ + if (!cfq_may_dispatch(cfqd, cfqq)) + return false; + + /* + * follow expired path, else get first next available + */ +- rq = cfq_check_fifo(cfqq); + if (!rq) + rq = cfqq->next_rq; ++ else ++ cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); + + /* + * insert request into driver dispatch list +@@ -3794,7 +3801,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, + * if the new request is sync, but the currently running queue is + * not, let the sync request have priority. + */ +- if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) ++ if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) + return true; + + if (new_cfqq->cfqg != cfqq->cfqg) +diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c +index ebcec7439a1a..2b6dd7401632 100644 +--- a/crypto/ablkcipher.c ++++ b/crypto/ablkcipher.c +@@ -379,6 +379,7 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, + } + crt->base = __crypto_ablkcipher_cast(tfm); + crt->ivsize = alg->ivsize; ++ crt->has_setkey = alg->max_keysize; + + return 0; + } +@@ -460,6 +461,7 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, + crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt; + crt->base = __crypto_ablkcipher_cast(tfm); + crt->ivsize = alg->ivsize; ++ crt->has_setkey = alg->max_keysize; + + return 0; + } +diff --git a/crypto/af_alg.c b/crypto/af_alg.c +index 1aaa555fab56..68ec1ac4104a 100644 +--- a/crypto/af_alg.c ++++ b/crypto/af_alg.c +@@ -76,6 +76,8 @@ int af_alg_register_type(const struct af_alg_type *type) + goto unlock; + + type->ops->owner = THIS_MODULE; ++ if (type->ops_nokey) ++ type->ops_nokey->owner = THIS_MODULE; + node->type = type; + list_add(&node->list, &alg_types); + err = 0; +@@ -125,6 +127,26 @@ int af_alg_release(struct socket *sock) + } + EXPORT_SYMBOL_GPL(af_alg_release); + ++void af_alg_release_parent(struct sock *sk) ++{ ++ struct alg_sock *ask = alg_sk(sk); ++ unsigned int nokey = ask->nokey_refcnt; ++ bool last = nokey && !ask->refcnt; ++ ++ sk = ask->parent; ++ ask = alg_sk(sk); ++ ++ lock_sock(sk); ++ ask->nokey_refcnt -= nokey; ++ if (!last) ++ last = !--ask->refcnt; ++ release_sock(sk); ++ ++ if (last) ++ sock_put(sk); ++} ++EXPORT_SYMBOL_GPL(af_alg_release_parent); ++ + static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + { + struct sock *sk = sock->sk; +@@ -132,6 +154,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + struct sockaddr_alg *sa = (void *)uaddr; + const struct af_alg_type *type; + void *private; ++ int err; + + if (sock->state == SS_CONNECTED) + return -EINVAL; +@@ -157,16 +180,22 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + return PTR_ERR(private); + } + ++ err = -EBUSY; + lock_sock(sk); ++ if (ask->refcnt | ask->nokey_refcnt) ++ goto unlock; + + swap(ask->type, type); + swap(ask->private, private); + ++ err = 0; ++ ++unlock: + release_sock(sk); + + alg_do_release(type, private); + +- return 0; ++ return err; + } + + static int alg_setkey(struct sock *sk, char __user *ukey, +@@ -199,11 +228,15 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type; +- int err = -ENOPROTOOPT; ++ int err = -EBUSY; + + lock_sock(sk); ++ if (ask->refcnt) ++ goto unlock; ++ + type = ask->type; + ++ err = -ENOPROTOOPT; + if (level != SOL_ALG || !type) + goto unlock; + +@@ -228,6 +261,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type; + struct sock *sk2; ++ unsigned int nokey; + int err; + + lock_sock(sk); +@@ -247,18 +281,29 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) + security_sk_clone(sk, sk2); + + err = type->accept(ask->private, sk2); ++ ++ nokey = err == -ENOKEY; ++ if (nokey && type->accept_nokey) ++ err = type->accept_nokey(ask->private, sk2); ++ + if (err) + goto unlock; + + sk2->sk_family = PF_ALG; + +- sock_hold(sk); ++ if (nokey || !ask->refcnt++) ++ sock_hold(sk); ++ ask->nokey_refcnt += nokey; + alg_sk(sk2)->parent = sk; + alg_sk(sk2)->type = type; ++ alg_sk(sk2)->nokey_refcnt = nokey; + + newsock->ops = type->ops; + newsock->state = SS_CONNECTED; + ++ if (nokey) ++ newsock->ops = type->ops_nokey; ++ + err = 0; + + unlock: +diff --git a/crypto/ahash.c b/crypto/ahash.c +index bcd5efc7eb4c..781a8a73a7ff 100644 +--- a/crypto/ahash.c ++++ b/crypto/ahash.c +@@ -370,6 +370,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) + struct ahash_alg *alg = crypto_ahash_alg(hash); + + hash->setkey = ahash_nosetkey; ++ hash->has_setkey = false; + hash->export = ahash_no_export; + hash->import = ahash_no_import; + +@@ -382,8 +383,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) + hash->finup = alg->finup ?: ahash_def_finup; + hash->digest = alg->digest; + +- if (alg->setkey) ++ if (alg->setkey) { + hash->setkey = alg->setkey; ++ hash->has_setkey = true; ++ } + if (alg->export) + hash->export = alg->export; + if (alg->import) +diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c +index c542c0d88afd..d11d431251f7 100644 +--- a/crypto/algif_hash.c ++++ b/crypto/algif_hash.c +@@ -34,6 +34,11 @@ struct hash_ctx { + struct ahash_request req; + }; + ++struct algif_hash_tfm { ++ struct crypto_ahash *hash; ++ bool has_key; ++}; ++ + static int hash_sendmsg(struct kiocb *unused, struct socket *sock, + struct msghdr *msg, size_t ignored) + { +@@ -248,19 +253,151 @@ static struct proto_ops algif_hash_ops = { + .accept = hash_accept, + }; + ++static int hash_check_key(struct socket *sock) ++{ ++ int err = 0; ++ struct sock *psk; ++ struct alg_sock *pask; ++ struct algif_hash_tfm *tfm; ++ struct sock *sk = sock->sk; ++ struct alg_sock *ask = alg_sk(sk); ++ ++ lock_sock(sk); ++ if (ask->refcnt) ++ goto unlock_child; ++ ++ psk = ask->parent; ++ pask = alg_sk(ask->parent); ++ tfm = pask->private; ++ ++ err = -ENOKEY; ++ lock_sock_nested(psk, SINGLE_DEPTH_NESTING); ++ if (!tfm->has_key) ++ goto unlock; ++ ++ if (!pask->refcnt++) ++ sock_hold(psk); ++ ++ ask->refcnt = 1; ++ sock_put(psk); ++ ++ err = 0; ++ ++unlock: ++ release_sock(psk); ++unlock_child: ++ release_sock(sk); ++ ++ return err; ++} ++ ++static int hash_sendmsg_nokey(struct kiocb *unused, struct socket *sock, ++ struct msghdr *msg, size_t size) ++{ ++ int err; ++ ++ err = hash_check_key(sock); ++ if (err) ++ return err; ++ ++ return hash_sendmsg(unused, sock, msg, size); ++} ++ ++static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page, ++ int offset, size_t size, int flags) ++{ ++ int err; ++ ++ err = hash_check_key(sock); ++ if (err) ++ return err; ++ ++ return hash_sendpage(sock, page, offset, size, flags); ++} ++ ++static int hash_recvmsg_nokey(struct kiocb *unused, struct socket *sock, ++ struct msghdr *msg, size_t ignored, int flags) ++{ ++ int err; ++ ++ err = hash_check_key(sock); ++ if (err) ++ return err; ++ ++ return hash_recvmsg(unused, sock, msg, ignored, flags); ++} ++ ++static int hash_accept_nokey(struct socket *sock, struct socket *newsock, ++ int flags) ++{ ++ int err; ++ ++ err = hash_check_key(sock); ++ if (err) ++ return err; ++ ++ return hash_accept(sock, newsock, flags); ++} ++ ++static struct proto_ops algif_hash_ops_nokey = { ++ .family = PF_ALG, ++ ++ .connect = sock_no_connect, ++ .socketpair = sock_no_socketpair, ++ .getname = sock_no_getname, ++ .ioctl = sock_no_ioctl, ++ .listen = sock_no_listen, ++ .shutdown = sock_no_shutdown, ++ .getsockopt = sock_no_getsockopt, ++ .mmap = sock_no_mmap, ++ .bind = sock_no_bind, ++ .setsockopt = sock_no_setsockopt, ++ .poll = sock_no_poll, ++ ++ .release = af_alg_release, ++ .sendmsg = hash_sendmsg_nokey, ++ .sendpage = hash_sendpage_nokey, ++ .recvmsg = hash_recvmsg_nokey, ++ .accept = hash_accept_nokey, ++}; ++ + static void *hash_bind(const char *name, u32 type, u32 mask) + { +- return crypto_alloc_ahash(name, type, mask); ++ struct algif_hash_tfm *tfm; ++ struct crypto_ahash *hash; ++ ++ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); ++ if (!tfm) ++ return ERR_PTR(-ENOMEM); ++ ++ hash = crypto_alloc_ahash(name, type, mask); ++ if (IS_ERR(hash)) { ++ kfree(tfm); ++ return ERR_CAST(hash); ++ } ++ ++ tfm->hash = hash; ++ ++ return tfm; + } + + static void hash_release(void *private) + { +- crypto_free_ahash(private); ++ struct algif_hash_tfm *tfm = private; ++ ++ crypto_free_ahash(tfm->hash); ++ kfree(tfm); + } + + static int hash_setkey(void *private, const u8 *key, unsigned int keylen) + { +- return crypto_ahash_setkey(private, key, keylen); ++ struct algif_hash_tfm *tfm = private; ++ int err; ++ ++ err = crypto_ahash_setkey(tfm->hash, key, keylen); ++ tfm->has_key = !err; ++ ++ return err; + } + + static void hash_sock_destruct(struct sock *sk) +@@ -274,12 +411,14 @@ static void hash_sock_destruct(struct sock *sk) + af_alg_release_parent(sk); + } + +-static int hash_accept_parent(void *private, struct sock *sk) ++static int hash_accept_parent_nokey(void *private, struct sock *sk) + { + struct hash_ctx *ctx; + struct alg_sock *ask = alg_sk(sk); +- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private); +- unsigned ds = crypto_ahash_digestsize(private); ++ struct algif_hash_tfm *tfm = private; ++ struct crypto_ahash *hash = tfm->hash; ++ unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); ++ unsigned ds = crypto_ahash_digestsize(hash); + + ctx = sock_kmalloc(sk, len, GFP_KERNEL); + if (!ctx) +@@ -299,7 +438,7 @@ static int hash_accept_parent(void *private, struct sock *sk) + + ask->private = ctx; + +- ahash_request_set_tfm(&ctx->req, private); ++ ahash_request_set_tfm(&ctx->req, hash); + ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_complete, &ctx->completion); + +@@ -308,12 +447,24 @@ static int hash_accept_parent(void *private, struct sock *sk) + return 0; + } + ++static int hash_accept_parent(void *private, struct sock *sk) ++{ ++ struct algif_hash_tfm *tfm = private; ++ ++ if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) ++ return -ENOKEY; ++ ++ return hash_accept_parent_nokey(private, sk); ++} ++ + static const struct af_alg_type algif_type_hash = { + .bind = hash_bind, + .release = hash_release, + .setkey = hash_setkey, + .accept = hash_accept_parent, ++ .accept_nokey = hash_accept_parent_nokey, + .ops = &algif_hash_ops, ++ .ops_nokey = &algif_hash_ops_nokey, + .name = "hash", + .owner = THIS_MODULE + }; +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c +index 83187f497c7c..ea05c531db26 100644 +--- a/crypto/algif_skcipher.c ++++ b/crypto/algif_skcipher.c +@@ -31,6 +31,11 @@ struct skcipher_sg_list { + struct scatterlist sg[0]; + }; + ++struct skcipher_tfm { ++ struct crypto_ablkcipher *skcipher; ++ bool has_key; ++}; ++ + struct skcipher_ctx { + struct list_head tsgl; + struct af_alg_sgl rsgl; +@@ -441,13 +446,6 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, + char __user *from = iov->iov_base; + + while (seglen) { +- sgl = list_first_entry(&ctx->tsgl, +- struct skcipher_sg_list, list); +- sg = sgl->sg; +- +- while (!sg->length) +- sg++; +- + used = ctx->used; + if (!used) { + err = skcipher_wait_for_data(sk, flags); +@@ -469,6 +467,13 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, + if (!used) + goto free; + ++ sgl = list_first_entry(&ctx->tsgl, ++ struct skcipher_sg_list, list); ++ sg = sgl->sg; ++ ++ while (!sg->length) ++ sg++; ++ + ablkcipher_request_set_crypt(&ctx->req, sg, + ctx->rsgl.sg, used, + ctx->iv); +@@ -544,19 +549,139 @@ static struct proto_ops algif_skcipher_ops = { + .poll = skcipher_poll, + }; + ++static int skcipher_check_key(struct socket *sock) ++{ ++ int err = 0; ++ struct sock *psk; ++ struct alg_sock *pask; ++ struct skcipher_tfm *tfm; ++ struct sock *sk = sock->sk; ++ struct alg_sock *ask = alg_sk(sk); ++ ++ lock_sock(sk); ++ if (ask->refcnt) ++ goto unlock_child; ++ ++ psk = ask->parent; ++ pask = alg_sk(ask->parent); ++ tfm = pask->private; ++ ++ err = -ENOKEY; ++ lock_sock_nested(psk, SINGLE_DEPTH_NESTING); ++ if (!tfm->has_key) ++ goto unlock; ++ ++ if (!pask->refcnt++) ++ sock_hold(psk); ++ ++ ask->refcnt = 1; ++ sock_put(psk); ++ ++ err = 0; ++ ++unlock: ++ release_sock(psk); ++unlock_child: ++ release_sock(sk); ++ ++ return err; ++} ++ ++static int skcipher_sendmsg_nokey(struct kiocb *unused, struct socket *sock, ++ struct msghdr *msg, size_t size) ++{ ++ int err; ++ ++ err = skcipher_check_key(sock); ++ if (err) ++ return err; ++ ++ return skcipher_sendmsg(unused, sock, msg, size); ++} ++ ++static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, ++ int offset, size_t size, int flags) ++{ ++ int err; ++ ++ err = skcipher_check_key(sock); ++ if (err) ++ return err; ++ ++ return skcipher_sendpage(sock, page, offset, size, flags); ++} ++ ++static int skcipher_recvmsg_nokey(struct kiocb *unused, struct socket *sock, ++ struct msghdr *msg, size_t ignored, int flags) ++{ ++ int err; ++ ++ err = skcipher_check_key(sock); ++ if (err) ++ return err; ++ ++ return skcipher_recvmsg(unused, sock, msg, ignored, flags); ++} ++ ++static struct proto_ops algif_skcipher_ops_nokey = { ++ .family = PF_ALG, ++ ++ .connect = sock_no_connect, ++ .socketpair = sock_no_socketpair, ++ .getname = sock_no_getname, ++ .ioctl = sock_no_ioctl, ++ .listen = sock_no_listen, ++ .shutdown = sock_no_shutdown, ++ .getsockopt = sock_no_getsockopt, ++ .mmap = sock_no_mmap, ++ .bind = sock_no_bind, ++ .accept = sock_no_accept, ++ .setsockopt = sock_no_setsockopt, ++ ++ .release = af_alg_release, ++ .sendmsg = skcipher_sendmsg_nokey, ++ .sendpage = skcipher_sendpage_nokey, ++ .recvmsg = skcipher_recvmsg_nokey, ++ .poll = skcipher_poll, ++}; ++ + static void *skcipher_bind(const char *name, u32 type, u32 mask) + { +- return crypto_alloc_ablkcipher(name, type, mask); ++ struct skcipher_tfm *tfm; ++ struct crypto_ablkcipher *skcipher; ++ ++ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); ++ if (!tfm) ++ return ERR_PTR(-ENOMEM); ++ ++ skcipher = crypto_alloc_ablkcipher(name, type, mask); ++ if (IS_ERR(skcipher)) { ++ kfree(tfm); ++ return ERR_CAST(skcipher); ++ } ++ ++ tfm->skcipher = skcipher; ++ ++ return tfm; + } + + static void skcipher_release(void *private) + { +- crypto_free_ablkcipher(private); ++ struct skcipher_tfm *tfm = private; ++ ++ crypto_free_ablkcipher(tfm->skcipher); ++ kfree(tfm); + } + + static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) + { +- return crypto_ablkcipher_setkey(private, key, keylen); ++ struct skcipher_tfm *tfm = private; ++ int err; ++ ++ err = crypto_ablkcipher_setkey(tfm->skcipher, key, keylen); ++ tfm->has_key = !err; ++ ++ return err; + } + + static void skcipher_sock_destruct(struct sock *sk) +@@ -571,24 +696,25 @@ static void skcipher_sock_destruct(struct sock *sk) + af_alg_release_parent(sk); + } + +-static int skcipher_accept_parent(void *private, struct sock *sk) ++static int skcipher_accept_parent_nokey(void *private, struct sock *sk) + { + struct skcipher_ctx *ctx; + struct alg_sock *ask = alg_sk(sk); +- unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private); ++ struct skcipher_tfm *tfm = private; ++ struct crypto_ablkcipher *skcipher = tfm->skcipher; ++ unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(skcipher); + + ctx = sock_kmalloc(sk, len, GFP_KERNEL); + if (!ctx) + return -ENOMEM; +- +- ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private), ++ ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(skcipher), + GFP_KERNEL); + if (!ctx->iv) { + sock_kfree_s(sk, ctx, len); + return -ENOMEM; + } + +- memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private)); ++ memset(ctx->iv, 0, crypto_ablkcipher_ivsize(skcipher)); + + INIT_LIST_HEAD(&ctx->tsgl); + ctx->len = len; +@@ -600,21 +726,33 @@ static int skcipher_accept_parent(void *private, struct sock *sk) + + ask->private = ctx; + +- ablkcipher_request_set_tfm(&ctx->req, private); ++ ablkcipher_request_set_tfm(&ctx->req, skcipher); + ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, +- af_alg_complete, &ctx->completion); ++ af_alg_complete, &ctx->completion); + + sk->sk_destruct = skcipher_sock_destruct; + + return 0; + } + ++static int skcipher_accept_parent(void *private, struct sock *sk) ++{ ++ struct skcipher_tfm *tfm = private; ++ ++ if (!tfm->has_key && crypto_ablkcipher_has_setkey(tfm->skcipher)) ++ return -ENOKEY; ++ ++ return skcipher_accept_parent_nokey(private, sk); ++} ++ + static const struct af_alg_type algif_type_skcipher = { + .bind = skcipher_bind, + .release = skcipher_release, + .setkey = skcipher_setkey, + .accept = skcipher_accept_parent, ++ .accept_nokey = skcipher_accept_parent_nokey, + .ops = &algif_skcipher_ops, ++ .ops_nokey = &algif_skcipher_ops_nokey, + .name = "skcipher", + .owner = THIS_MODULE + }; +diff --git a/crypto/cryptd.c b/crypto/cryptd.c +index 75c415d37086..d85fab975514 100644 +--- a/crypto/cryptd.c ++++ b/crypto/cryptd.c +@@ -565,9 +565,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out) + + static int cryptd_hash_import(struct ahash_request *req, const void *in) + { +- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); ++ struct shash_desc *desc = cryptd_shash_desc(req); ++ ++ desc->tfm = ctx->child; ++ desc->flags = req->base.flags; + +- return crypto_shash_import(&rctx->desc, in); ++ return crypto_shash_import(desc, in); + } + + static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, +diff --git a/crypto/gcm.c b/crypto/gcm.c +index 451e420ce56c..a1ec756b8438 100644 +--- a/crypto/gcm.c ++++ b/crypto/gcm.c +@@ -109,7 +109,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, + struct crypto_ablkcipher *ctr = ctx->ctr; + struct { + be128 hash; +- u8 iv[8]; ++ u8 iv[16]; + + struct crypto_gcm_setkey_result result; + +diff --git a/crypto/shash.c b/crypto/shash.c +index 929058a68561..ac4d76350d1b 100644 +--- a/crypto/shash.c ++++ b/crypto/shash.c +@@ -353,9 +353,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) + crt->final = shash_async_final; + crt->finup = shash_async_finup; + crt->digest = shash_async_digest; ++ crt->setkey = shash_async_setkey; ++ ++ crt->has_setkey = alg->setkey != shash_no_setkey; + +- if (alg->setkey) +- crt->setkey = shash_async_setkey; + if (alg->export) + crt->export = shash_async_export; + if (alg->import) +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c +index fcd7d91cec34..070b843c37ee 100644 +--- a/drivers/acpi/apei/ghes.c ++++ b/drivers/acpi/apei/ghes.c +@@ -647,7 +647,7 @@ static int ghes_proc(struct ghes *ghes) + ghes_do_proc(ghes, ghes->estatus); + out: + ghes_clear_estatus(ghes); +- return 0; ++ return rc; + } + + static void ghes_add_timer(struct ghes *ghes) +diff --git a/drivers/base/core.c b/drivers/base/core.c +index 2a19097a7cb1..986fc4eeaae6 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -827,11 +827,29 @@ static struct kobject *get_device_parent(struct device *dev, + return NULL; + } + ++static inline bool live_in_glue_dir(struct kobject *kobj, ++ struct device *dev) ++{ ++ if (!kobj || !dev->class || ++ kobj->kset != &dev->class->p->glue_dirs) ++ return false; ++ return true; ++} ++ ++static inline struct kobject *get_glue_dir(struct device *dev) ++{ ++ return dev->kobj.parent; ++} ++ ++/* ++ * make sure cleaning up dir as the last step, we need to make ++ * sure .release handler of kobject is run with holding the ++ * global lock ++ */ + static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) + { + /* see if we live in a "glue" directory */ +- if (!glue_dir || !dev->class || +- glue_dir->kset != &dev->class->p->glue_dirs) ++ if (!live_in_glue_dir(glue_dir, dev)) + return; + + mutex_lock(&gdp_mutex); +@@ -839,11 +857,6 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) + mutex_unlock(&gdp_mutex); + } + +-static void cleanup_device_parent(struct device *dev) +-{ +- cleanup_glue_dir(dev, dev->kobj.parent); +-} +- + static int device_add_class_symlinks(struct device *dev) + { + int error; +@@ -1007,6 +1020,7 @@ int device_add(struct device *dev) + struct kobject *kobj; + struct class_interface *class_intf; + int error = -EINVAL; ++ struct kobject *glue_dir = NULL; + + dev = get_device(dev); + if (!dev) +@@ -1051,8 +1065,10 @@ int device_add(struct device *dev) + /* first, register with generic layer. */ + /* we require the name to be set before, and pass NULL */ + error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); +- if (error) ++ if (error) { ++ glue_dir = get_glue_dir(dev); + goto Error; ++ } + + /* notify platform of device entry */ + if (platform_notify) +@@ -1135,11 +1151,11 @@ done: + device_remove_file(dev, &uevent_attr); + attrError: + kobject_uevent(&dev->kobj, KOBJ_REMOVE); ++ glue_dir = get_glue_dir(dev); + kobject_del(&dev->kobj); + Error: +- cleanup_device_parent(dev); +- if (parent) +- put_device(parent); ++ cleanup_glue_dir(dev, glue_dir); ++ put_device(parent); + name_error: + kfree(dev->p); + dev->p = NULL; +@@ -1210,6 +1226,7 @@ void put_device(struct device *dev) + void device_del(struct device *dev) + { + struct device *parent = dev->parent; ++ struct kobject *glue_dir = NULL; + struct class_interface *class_intf; + + /* Notify clients of device removal. This call must come +@@ -1251,8 +1268,9 @@ void device_del(struct device *dev) + if (platform_notify_remove) + platform_notify_remove(dev); + kobject_uevent(&dev->kobj, KOBJ_REMOVE); +- cleanup_device_parent(dev); ++ glue_dir = get_glue_dir(dev); + kobject_del(&dev->kobj); ++ cleanup_glue_dir(dev, glue_dir); + put_device(parent); + } + +diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c +index a5dca6affcbb..776fc08aff0b 100644 +--- a/drivers/block/drbd/drbd_main.c ++++ b/drivers/block/drbd/drbd_main.c +@@ -1771,7 +1771,7 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock, + * do we need to block DRBD_SIG if sock == &meta.socket ?? + * otherwise wake_asender() might interrupt some send_*Ack ! + */ +- rv = kernel_sendmsg(sock, &msg, &iov, 1, size); ++ rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); + if (rv == -EAGAIN) { + if (we_should_drop_the_connection(tconn, sock)) + break; +diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h +index 60103e2517ba..467cb48fcf38 100644 +--- a/drivers/block/xen-blkback/common.h ++++ b/drivers/block/xen-blkback/common.h +@@ -269,8 +269,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, + struct blkif_x86_32_request *src) + { + int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; +- dst->operation = src->operation; +- switch (src->operation) { ++ dst->operation = ACCESS_ONCE(src->operation); ++ switch (dst->operation) { + case BLKIF_OP_READ: + case BLKIF_OP_WRITE: + case BLKIF_OP_WRITE_BARRIER: +@@ -305,8 +305,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, + struct blkif_x86_64_request *src) + { + int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; +- dst->operation = src->operation; +- switch (src->operation) { ++ dst->operation = ACCESS_ONCE(src->operation); ++ switch (dst->operation) { + case BLKIF_OP_READ: + case BLKIF_OP_WRITE: + case BLKIF_OP_WRITE_BARRIER: +diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c +index 402ccfb625c5..b6ec73f320d6 100644 +--- a/drivers/char/hw_random/exynos-rng.c ++++ b/drivers/char/hw_random/exynos-rng.c +@@ -105,6 +105,7 @@ static int exynos_rng_probe(struct platform_device *pdev) + { + struct exynos_rng *exynos_rng; + struct resource *res; ++ int ret; + + exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng), + GFP_KERNEL); +@@ -132,7 +133,13 @@ static int exynos_rng_probe(struct platform_device *pdev) + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + +- return hwrng_register(&exynos_rng->rng); ++ ret = hwrng_register(&exynos_rng->rng); ++ if (ret) { ++ pm_runtime_dont_use_autosuspend(&pdev->dev); ++ pm_runtime_disable(&pdev->dev); ++ } ++ ++ return ret; + } + + static int exynos_rng_remove(struct platform_device *pdev) +diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c +index d2903e772270..2798fb1f91e2 100644 +--- a/drivers/char/hw_random/omap-rng.c ++++ b/drivers/char/hw_random/omap-rng.c +@@ -127,7 +127,12 @@ static int omap_rng_probe(struct platform_device *pdev) + dev_set_drvdata(&pdev->dev, priv); + + pm_runtime_enable(&pdev->dev); +- pm_runtime_get_sync(&pdev->dev); ++ ret = pm_runtime_get_sync(&pdev->dev); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret); ++ pm_runtime_put_noidle(&pdev->dev); ++ goto err_ioremap; ++ } + + ret = hwrng_register(&omap_rng_ops); + if (ret) +@@ -182,8 +187,15 @@ static int omap_rng_suspend(struct device *dev) + static int omap_rng_resume(struct device *dev) + { + struct omap_rng_private_data *priv = dev_get_drvdata(dev); ++ int ret; ++ ++ ret = pm_runtime_get_sync(dev); ++ if (ret < 0) { ++ dev_err(dev, "Failed to runtime_get device: %d\n", ret); ++ pm_runtime_put_noidle(dev); ++ return ret; ++ } + +- pm_runtime_get_sync(dev); + omap_rng_write_reg(priv, RNG_MASK_REG, 0x1); + + return 0; +diff --git a/drivers/char/mem.c b/drivers/char/mem.c +index 1ccbe9482faa..598ece77ee9e 100644 +--- a/drivers/char/mem.c ++++ b/drivers/char/mem.c +@@ -68,12 +68,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) + u64 cursor = from; + + while (cursor < to) { +- if (!devmem_is_allowed(pfn)) { +- printk(KERN_INFO +- "Program %s tried to access /dev/mem between %Lx->%Lx.\n", +- current->comm, from, to); ++ if (!devmem_is_allowed(pfn)) + return 0; +- } + cursor += PAGE_SIZE; + pfn++; + } +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c +index 3b367973a802..d5cbb7c242f6 100644 +--- a/drivers/devfreq/devfreq.c ++++ b/drivers/devfreq/devfreq.c +@@ -472,7 +472,7 @@ struct devfreq *devfreq_add_device(struct device *dev, + devfreq->profile->max_state * + devfreq->profile->max_state, + GFP_KERNEL); +- devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * ++ devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) * + devfreq->profile->max_state, + GFP_KERNEL); + devfreq->last_stat_updated = jiffies; +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c +index a9d98cdd11f4..9e15fc8df060 100644 +--- a/drivers/edac/edac_mc.c ++++ b/drivers/edac/edac_mc.c +@@ -968,7 +968,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci, + mci->ue_mc += count; + + if (!enable_per_layer_report) { +- mci->ce_noinfo_count += count; ++ mci->ue_noinfo_count += count; + return; + } + +diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c +index 7bdb6fe63236..132131934c77 100644 +--- a/drivers/firewire/net.c ++++ b/drivers/firewire/net.c +@@ -73,13 +73,13 @@ struct rfc2734_header { + + #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) + #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) +-#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) ++#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1) + #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) + #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) + +-#define fwnet_set_hdr_lf(lf) ((lf) << 30) ++#define fwnet_set_hdr_lf(lf) ((lf) << 30) + #define fwnet_set_hdr_ether_type(et) (et) +-#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) ++#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16) + #define fwnet_set_hdr_fg_off(fgo) (fgo) + + #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) +@@ -591,6 +591,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, + int retval; + u16 ether_type; + ++ if (len <= RFC2374_UNFRAG_HDR_SIZE) ++ return 0; ++ + hdr.w0 = be32_to_cpu(buf[0]); + lf = fwnet_get_hdr_lf(&hdr); + if (lf == RFC2374_HDR_UNFRAG) { +@@ -615,7 +618,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, + return fwnet_finish_incoming_packet(net, skb, source_node_id, + is_broadcast, ether_type); + } ++ + /* A datagram fragment has been received, now the fun begins. */ ++ ++ if (len <= RFC2374_FRAG_HDR_SIZE) ++ return 0; ++ + hdr.w1 = ntohl(buf[1]); + buf += 2; + len -= RFC2374_FRAG_HDR_SIZE; +@@ -627,7 +635,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, + fg_off = fwnet_get_hdr_fg_off(&hdr); + } + datagram_label = fwnet_get_hdr_dgl(&hdr); +- dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ ++ dg_size = fwnet_get_hdr_dg_size(&hdr); ++ ++ if (fg_off + len > dg_size) ++ return 0; + + spin_lock_irqsave(&dev->lock, flags); + +@@ -735,6 +746,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, + fw_send_response(card, r, rcode); + } + ++static int gasp_source_id(__be32 *p) ++{ ++ return be32_to_cpu(p[0]) >> 16; ++} ++ ++static u32 gasp_specifier_id(__be32 *p) ++{ ++ return (be32_to_cpu(p[0]) & 0xffff) << 8 | ++ (be32_to_cpu(p[1]) & 0xff000000) >> 24; ++} ++ ++static u32 gasp_version(__be32 *p) ++{ ++ return be32_to_cpu(p[1]) & 0xffffff; ++} ++ + static void fwnet_receive_broadcast(struct fw_iso_context *context, + u32 cycle, size_t header_length, void *header, void *data) + { +@@ -744,9 +771,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, + __be32 *buf_ptr; + int retval; + u32 length; +- u16 source_node_id; +- u32 specifier_id; +- u32 ver; + unsigned long offset; + unsigned long flags; + +@@ -763,22 +787,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, + + spin_unlock_irqrestore(&dev->lock, flags); + +- specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 +- | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; +- ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; +- source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; +- +- if (specifier_id == IANA_SPECIFIER_ID && +- (ver == RFC2734_SW_VERSION ++ if (length > IEEE1394_GASP_HDR_SIZE && ++ gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID && ++ (gasp_version(buf_ptr) == RFC2734_SW_VERSION + #if IS_ENABLED(CONFIG_IPV6) +- || ver == RFC3146_SW_VERSION ++ || gasp_version(buf_ptr) == RFC3146_SW_VERSION + #endif +- )) { +- buf_ptr += 2; +- length -= IEEE1394_GASP_HDR_SIZE; +- fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, ++ )) ++ fwnet_incoming_packet(dev, buf_ptr + 2, ++ length - IEEE1394_GASP_HDR_SIZE, ++ gasp_source_id(buf_ptr), + context->card->generation, true); +- } + + packet.payload_length = dev->rcv_buffer_size; + packet.interrupt = 1; +diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c +index 2aa3ca215bd6..d5376aa1c5e1 100644 +--- a/drivers/gpio/gpio-mpc8xxx.c ++++ b/drivers/gpio/gpio-mpc8xxx.c +@@ -295,7 +295,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int virq, + mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data; + + irq_set_chip_data(virq, h->host_data); +- irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); ++ irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_edge_irq); + + return 0; + } +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +index c24c35606836..121680fbebb9 100644 +--- a/drivers/gpu/drm/drm_crtc.c ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -3422,6 +3422,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, + int hdisplay, vdisplay; + int ret = -EINVAL; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || + page_flip->reserved != 0) + return -EINVAL; +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c +index 8ac333094991..4d09582744e6 100644 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c +@@ -257,6 +257,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) + atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); + atombios_blank_crtc(crtc, ATOM_DISABLE); + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); ++ /* Make sure vblank interrupt is still enabled if needed */ ++ radeon_irq_set(rdev); + radeon_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_STANDBY: +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +index bc73021d3596..ae0d7b1cb9aa 100644 +--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c ++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +@@ -331,6 +331,8 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) + WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); + } + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); ++ /* Make sure vblank interrupt is still enabled if needed */ ++ radeon_irq_set(rdev); + radeon_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_STANDBY: +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c +index f7015592544f..6c92c20426d6 100644 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c +@@ -228,8 +228,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, + + rdev = radeon_get_rdev(bo->bdev); + ridx = radeon_copy_ring_index(rdev); +- old_start = old_mem->start << PAGE_SHIFT; +- new_start = new_mem->start << PAGE_SHIFT; ++ old_start = (u64)old_mem->start << PAGE_SHIFT; ++ new_start = (u64)new_mem->start << PAGE_SHIFT; + + switch (old_mem->mem_type) { + case TTM_PL_VRAM: +diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c +index 64c778f7756f..5f69c839d727 100644 +--- a/drivers/hv/hv_util.c ++++ b/drivers/hv/hv_util.c +@@ -244,10 +244,14 @@ static void heartbeat_onchannelcallback(void *context) + struct heartbeat_msg_data *heartbeat_msg; + u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; + +- vmbus_recvpacket(channel, hbeat_txf_buf, +- PAGE_SIZE, &recvlen, &requestid); ++ while (1) { ++ ++ vmbus_recvpacket(channel, hbeat_txf_buf, ++ PAGE_SIZE, &recvlen, &requestid); ++ ++ if (!recvlen) ++ break; + +- if (recvlen > 0) { + icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[ + sizeof(struct vmbuspipe_hdr)]; + +diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c +index d9299dee37d1..dddaa161aadb 100644 +--- a/drivers/hwmon/adt7411.c ++++ b/drivers/hwmon/adt7411.c +@@ -30,6 +30,7 @@ + + #define ADT7411_REG_CFG1 0x18 + #define ADT7411_CFG1_START_MONITOR (1 << 0) ++#define ADT7411_CFG1_RESERVED_BIT3 (1 << 3) + + #define ADT7411_REG_CFG2 0x19 + #define ADT7411_CFG2_DISABLE_AVG (1 << 5) +@@ -292,8 +293,10 @@ static int adt7411_probe(struct i2c_client *client, + mutex_init(&data->device_lock); + mutex_init(&data->update_lock); + ++ /* According to the datasheet, we must only write 1 to bit 3 */ + ret = adt7411_modify_bit(client, ADT7411_REG_CFG1, +- ADT7411_CFG1_START_MONITOR, 1); ++ ADT7411_CFG1_RESERVED_BIT3 ++ | ADT7411_CFG1_START_MONITOR, 1); + if (ret < 0) + return ret; + +diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c +index ceabcfeb587c..c880d13f5405 100644 +--- a/drivers/i2c/busses/i2c-at91.c ++++ b/drivers/i2c/busses/i2c-at91.c +@@ -371,19 +371,57 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id) + + if (!irqstatus) + return IRQ_NONE; +- else if (irqstatus & AT91_TWI_RXRDY) +- at91_twi_read_next_byte(dev); +- else if (irqstatus & AT91_TWI_TXRDY) +- at91_twi_write_next_byte(dev); +- +- /* catch error flags */ +- dev->transfer_status |= status; + ++ /* ++ * When a NACK condition is detected, the I2C controller sets the NACK, ++ * TXCOMP and TXRDY bits all together in the Status Register (SR). ++ * ++ * 1 - Handling NACK errors with CPU write transfer. ++ * ++ * In such case, we should not write the next byte into the Transmit ++ * Holding Register (THR) otherwise the I2C controller would start a new ++ * transfer and the I2C slave is likely to reply by another NACK. ++ * ++ * 2 - Handling NACK errors with DMA write transfer. ++ * ++ * By setting the TXRDY bit in the SR, the I2C controller also triggers ++ * the DMA controller to write the next data into the THR. Then the ++ * result depends on the hardware version of the I2C controller. ++ * ++ * 2a - Without support of the Alternative Command mode. ++ * ++ * This is the worst case: the DMA controller is triggered to write the ++ * next data into the THR, hence starting a new transfer: the I2C slave ++ * is likely to reply by another NACK. ++ * Concurrently, this interrupt handler is likely to be called to manage ++ * the first NACK before the I2C controller detects the second NACK and ++ * sets once again the NACK bit into the SR. ++ * When handling the first NACK, this interrupt handler disables the I2C ++ * controller interruptions, especially the NACK interrupt. ++ * Hence, the NACK bit is pending into the SR. This is why we should ++ * read the SR to clear all pending interrupts at the beginning of ++ * at91_do_twi_transfer() before actually starting a new transfer. ++ * ++ * 2b - With support of the Alternative Command mode. ++ * ++ * When a NACK condition is detected, the I2C controller also locks the ++ * THR (and sets the LOCK bit in the SR): even though the DMA controller ++ * is triggered by the TXRDY bit to write the next data into the THR, ++ * this data actually won't go on the I2C bus hence a second NACK is not ++ * generated. ++ */ + if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) { + at91_disable_twi_interrupts(dev); + complete(&dev->cmd_complete); ++ } else if (irqstatus & AT91_TWI_RXRDY) { ++ at91_twi_read_next_byte(dev); ++ } else if (irqstatus & AT91_TWI_TXRDY) { ++ at91_twi_write_next_byte(dev); + } + ++ /* catch error flags */ ++ dev->transfer_status |= status; ++ + return IRQ_HANDLED; + } + +@@ -391,6 +429,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) + { + int ret; + bool has_unre_flag = dev->pdata->has_unre_flag; ++ unsigned sr; + + /* + * WARNING: the TXCOMP bit in the Status Register is NOT a clear on +@@ -426,13 +465,16 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) + INIT_COMPLETION(dev->cmd_complete); + dev->transfer_status = 0; + ++ /* Clear pending interrupts, such as NACK. */ ++ sr = at91_twi_read(dev, AT91_TWI_SR); ++ + if (!dev->buf_len) { + at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK); + at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); + } else if (dev->msg->flags & I2C_M_RD) { + unsigned start_flags = AT91_TWI_START; + +- if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) { ++ if (sr & AT91_TWI_RXRDY) { + dev_err(dev->dev, "RXRDY still set!"); + at91_twi_read(dev, AT91_TWI_RHR); + } +diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c +index 0f3752967c4b..773a6f5a509f 100644 +--- a/drivers/i2c/busses/i2c-eg20t.c ++++ b/drivers/i2c/busses/i2c-eg20t.c +@@ -798,13 +798,6 @@ static int pch_i2c_probe(struct pci_dev *pdev, + /* Set the number of I2C channel instance */ + adap_info->ch_num = id->driver_data; + +- ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, +- KBUILD_MODNAME, adap_info); +- if (ret) { +- pch_pci_err(pdev, "request_irq FAILED\n"); +- goto err_request_irq; +- } +- + for (i = 0; i < adap_info->ch_num; i++) { + pch_adap = &adap_info->pch_data[i].pch_adapter; + adap_info->pch_i2c_suspended = false; +@@ -821,6 +814,17 @@ static int pch_i2c_probe(struct pci_dev *pdev, + adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i; + + pch_adap->dev.parent = &pdev->dev; ++ } ++ ++ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, ++ KBUILD_MODNAME, adap_info); ++ if (ret) { ++ pch_pci_err(pdev, "request_irq FAILED\n"); ++ goto err_request_irq; ++ } ++ ++ for (i = 0; i < adap_info->ch_num; i++) { ++ pch_adap = &adap_info->pch_data[i].pch_adapter; + + pch_i2c_init(&adap_info->pch_data[i]); + +diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c +index 9d539cbfc833..c0e4143bee90 100644 +--- a/drivers/i2c/i2c-core.c ++++ b/drivers/i2c/i2c-core.c +@@ -1323,6 +1323,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) + /* add the driver to the list of i2c drivers in the driver core */ + driver->driver.owner = owner; + driver->driver.bus = &i2c_bus_type; ++ INIT_LIST_HEAD(&driver->clients); + + /* When registration returns, the driver core + * will have called probe() for all matching-but-unbound devices. +@@ -1341,7 +1342,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) + + pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); + +- INIT_LIST_HEAD(&driver->clients); + /* Walk the adapters that are already present */ + i2c_for_each_dev(driver, __process_new_driver); + +diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c +index a22c427454db..4f9d178e5fd6 100644 +--- a/drivers/iio/accel/kxsd9.c ++++ b/drivers/iio/accel/kxsd9.c +@@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev, + if (ret < 0) + goto error_ret; + *val = ret; ++ ret = IIO_VAL_INT; + break; + case IIO_CHAN_INFO_SCALE: + ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); + if (ret < 0) + goto error_ret; ++ *val = 0; + *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK]; + ret = IIO_VAL_INT_PLUS_MICRO; + break; +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c +index c410217fbe89..951a4f6a3b11 100644 +--- a/drivers/infiniband/core/cm.c ++++ b/drivers/infiniband/core/cm.c +@@ -79,6 +79,8 @@ static struct ib_cm { + __be32 random_id_operand; + struct list_head timewait_list; + struct workqueue_struct *wq; ++ /* Sync on cm change port state */ ++ spinlock_t state_lock; + } cm; + + /* Counter indexes ordered by attribute ID */ +@@ -160,6 +162,8 @@ struct cm_port { + struct ib_mad_agent *mad_agent; + struct kobject port_obj; + u8 port_num; ++ struct list_head cm_priv_prim_list; ++ struct list_head cm_priv_altr_list; + struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; + }; + +@@ -237,6 +241,12 @@ struct cm_id_private { + u8 service_timeout; + u8 target_ack_delay; + ++ struct list_head prim_list; ++ struct list_head altr_list; ++ /* Indicates that the send port mad is registered and av is set */ ++ int prim_send_port_not_ready; ++ int altr_send_port_not_ready; ++ + struct list_head work_list; + atomic_t work_count; + }; +@@ -255,19 +265,46 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, + struct ib_mad_agent *mad_agent; + struct ib_mad_send_buf *m; + struct ib_ah *ah; ++ struct cm_av *av; ++ unsigned long flags, flags2; ++ int ret = 0; + ++ /* don't let the port to be released till the agent is down */ ++ spin_lock_irqsave(&cm.state_lock, flags2); ++ spin_lock_irqsave(&cm.lock, flags); ++ if (!cm_id_priv->prim_send_port_not_ready) ++ av = &cm_id_priv->av; ++ else if (!cm_id_priv->altr_send_port_not_ready && ++ (cm_id_priv->alt_av.port)) ++ av = &cm_id_priv->alt_av; ++ else { ++ pr_info("%s: not valid CM id\n", __func__); ++ ret = -ENODEV; ++ spin_unlock_irqrestore(&cm.lock, flags); ++ goto out; ++ } ++ spin_unlock_irqrestore(&cm.lock, flags); ++ /* Make sure the port haven't released the mad yet */ + mad_agent = cm_id_priv->av.port->mad_agent; +- ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); +- if (IS_ERR(ah)) +- return PTR_ERR(ah); ++ if (!mad_agent) { ++ pr_info("%s: not a valid MAD agent\n", __func__); ++ ret = -ENODEV; ++ goto out; ++ } ++ ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr); ++ if (IS_ERR(ah)) { ++ ret = PTR_ERR(ah); ++ goto out; ++ } + + m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, +- cm_id_priv->av.pkey_index, ++ av->pkey_index, + 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, + GFP_ATOMIC); + if (IS_ERR(m)) { + ib_destroy_ah(ah); +- return PTR_ERR(m); ++ ret = PTR_ERR(m); ++ goto out; + } + + /* Timeout set by caller if response is expected. */ +@@ -277,7 +314,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, + atomic_inc(&cm_id_priv->refcount); + m->context[0] = cm_id_priv; + *msg = m; +- return 0; ++ ++out: ++ spin_unlock_irqrestore(&cm.state_lock, flags2); ++ return ret; + } + + static int cm_alloc_response_msg(struct cm_port *port, +@@ -346,7 +386,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, + grh, &av->ah_attr); + } + +-static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) ++static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av, ++ struct cm_id_private *cm_id_priv) + { + struct cm_device *cm_dev; + struct cm_port *port = NULL; +@@ -376,7 +417,18 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) + ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path, + &av->ah_attr); + av->timeout = path->packet_life_time + 1; +- return 0; ++ ++ spin_lock_irqsave(&cm.lock, flags); ++ if (&cm_id_priv->av == av) ++ list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list); ++ else if (&cm_id_priv->alt_av == av) ++ list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list); ++ else ++ ret = -EINVAL; ++ ++ spin_unlock_irqrestore(&cm.lock, flags); ++ ++ return ret; + } + + static int cm_alloc_id(struct cm_id_private *cm_id_priv) +@@ -716,6 +768,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, + spin_lock_init(&cm_id_priv->lock); + init_completion(&cm_id_priv->comp); + INIT_LIST_HEAD(&cm_id_priv->work_list); ++ INIT_LIST_HEAD(&cm_id_priv->prim_list); ++ INIT_LIST_HEAD(&cm_id_priv->altr_list); + atomic_set(&cm_id_priv->work_count, -1); + atomic_set(&cm_id_priv->refcount, 1); + return &cm_id_priv->id; +@@ -914,6 +968,15 @@ retest: + break; + } + ++ spin_lock_irq(&cm.lock); ++ if (!list_empty(&cm_id_priv->altr_list) && ++ (!cm_id_priv->altr_send_port_not_ready)) ++ list_del(&cm_id_priv->altr_list); ++ if (!list_empty(&cm_id_priv->prim_list) && ++ (!cm_id_priv->prim_send_port_not_ready)) ++ list_del(&cm_id_priv->prim_list); ++ spin_unlock_irq(&cm.lock); ++ + cm_free_id(cm_id->local_id); + cm_deref_id(cm_id_priv); + wait_for_completion(&cm_id_priv->comp); +@@ -1137,12 +1200,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, + goto out; + } + +- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); ++ ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av, ++ cm_id_priv); + if (ret) + goto error1; + if (param->alternate_path) { + ret = cm_init_av_by_path(param->alternate_path, +- &cm_id_priv->alt_av); ++ &cm_id_priv->alt_av, cm_id_priv); + if (ret) + goto error1; + } +@@ -1562,7 +1626,8 @@ static int cm_req_handler(struct cm_work *work) + + cm_process_routed_req(req_msg, work->mad_recv_wc->wc); + cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); +- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); ++ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av, ++ cm_id_priv); + if (ret) { + ib_get_cached_gid(work->port->cm_dev->ib_device, + work->port->port_num, 0, &work->path[0].sgid); +@@ -1572,7 +1637,8 @@ static int cm_req_handler(struct cm_work *work) + goto rejected; + } + if (req_msg->alt_local_lid) { +- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); ++ ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av, ++ cm_id_priv); + if (ret) { + ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, + &work->path[0].sgid, +@@ -2627,7 +2693,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id, + goto out; + } + +- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); ++ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av, ++ cm_id_priv); + if (ret) + goto out; + cm_id_priv->alt_av.timeout = +@@ -2739,7 +2806,8 @@ static int cm_lap_handler(struct cm_work *work) + cm_init_av_for_response(work->port, work->mad_recv_wc->wc, + work->mad_recv_wc->recv_buf.grh, + &cm_id_priv->av); +- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); ++ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av, ++ cm_id_priv); + ret = atomic_inc_and_test(&cm_id_priv->work_count); + if (!ret) + list_add_tail(&work->list, &cm_id_priv->work_list); +@@ -2931,7 +2999,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, + return -EINVAL; + + cm_id_priv = container_of(cm_id, struct cm_id_private, id); +- ret = cm_init_av_by_path(param->path, &cm_id_priv->av); ++ ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv); + if (ret) + goto out; + +@@ -3352,7 +3420,9 @@ out: + static int cm_migrate(struct ib_cm_id *cm_id) + { + struct cm_id_private *cm_id_priv; ++ struct cm_av tmp_av; + unsigned long flags; ++ int tmp_send_port_not_ready; + int ret = 0; + + cm_id_priv = container_of(cm_id, struct cm_id_private, id); +@@ -3361,7 +3431,14 @@ static int cm_migrate(struct ib_cm_id *cm_id) + (cm_id->lap_state == IB_CM_LAP_UNINIT || + cm_id->lap_state == IB_CM_LAP_IDLE)) { + cm_id->lap_state = IB_CM_LAP_IDLE; ++ /* Swap address vector */ ++ tmp_av = cm_id_priv->av; + cm_id_priv->av = cm_id_priv->alt_av; ++ cm_id_priv->alt_av = tmp_av; ++ /* Swap port send ready state */ ++ tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready; ++ cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready; ++ cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready; + } else + ret = -EINVAL; + spin_unlock_irqrestore(&cm_id_priv->lock, flags); +@@ -3767,6 +3844,9 @@ static void cm_add_one(struct ib_device *ib_device) + port->cm_dev = cm_dev; + port->port_num = i; + ++ INIT_LIST_HEAD(&port->cm_priv_prim_list); ++ INIT_LIST_HEAD(&port->cm_priv_altr_list); ++ + ret = cm_create_port_fs(port); + if (ret) + goto error1; +@@ -3813,6 +3893,8 @@ static void cm_remove_one(struct ib_device *ib_device) + { + struct cm_device *cm_dev; + struct cm_port *port; ++ struct cm_id_private *cm_id_priv; ++ struct ib_mad_agent *cur_mad_agent; + struct ib_port_modify port_modify = { + .clr_port_cap_mask = IB_PORT_CM_SUP + }; +@@ -3830,10 +3912,22 @@ static void cm_remove_one(struct ib_device *ib_device) + for (i = 1; i <= ib_device->phys_port_cnt; i++) { + port = cm_dev->port[i-1]; + ib_modify_port(ib_device, port->port_num, 0, &port_modify); +- ib_unregister_mad_agent(port->mad_agent); ++ /* Mark all the cm_id's as not valid */ ++ spin_lock_irq(&cm.lock); ++ list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list) ++ cm_id_priv->altr_send_port_not_ready = 1; ++ list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list) ++ cm_id_priv->prim_send_port_not_ready = 1; ++ spin_unlock_irq(&cm.lock); + flush_workqueue(cm.wq); ++ spin_lock_irq(&cm.state_lock); ++ cur_mad_agent = port->mad_agent; ++ port->mad_agent = NULL; ++ spin_unlock_irq(&cm.state_lock); ++ ib_unregister_mad_agent(cur_mad_agent); + cm_remove_port_fs(port); + } ++ + device_unregister(cm_dev->device); + kfree(cm_dev); + } +@@ -3846,6 +3940,7 @@ static int __init ib_cm_init(void) + INIT_LIST_HEAD(&cm.device_list); + rwlock_init(&cm.device_lock); + spin_lock_init(&cm.lock); ++ spin_lock_init(&cm.state_lock); + cm.listen_service_table = RB_ROOT; + cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); + cm.remote_id_table = RB_ROOT; +diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c +index d2360a8ef0b2..180d7f436ed5 100644 +--- a/drivers/infiniband/core/multicast.c ++++ b/drivers/infiniband/core/multicast.c +@@ -106,7 +106,6 @@ struct mcast_group { + atomic_t refcount; + enum mcast_group_state state; + struct ib_sa_query *query; +- int query_id; + u16 pkey_index; + u8 leave_state; + int retries; +@@ -339,11 +338,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member) + member->multicast.comp_mask, + 3000, GFP_KERNEL, join_handler, group, + &group->query); +- if (ret >= 0) { +- group->query_id = ret; +- ret = 0; +- } +- return ret; ++ return (ret > 0) ? 0 : ret; + } + + static int send_leave(struct mcast_group *group, u8 leave_state) +@@ -363,11 +358,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state) + IB_SA_MCMEMBER_REC_JOIN_STATE, + 3000, GFP_KERNEL, leave_handler, + group, &group->query); +- if (ret >= 0) { +- group->query_id = ret; +- ret = 0; +- } +- return ret; ++ return (ret > 0) ? 0 : ret; + } + + static void join_group(struct mcast_group *group, struct mcast_member *member, +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c +index f50623d07a75..37b720794148 100644 +--- a/drivers/infiniband/core/uverbs_main.c ++++ b/drivers/infiniband/core/uverbs_main.c +@@ -224,12 +224,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, + container_of(uobj, struct ib_uqp_object, uevent.uobject); + + idr_remove_uobj(&ib_uverbs_qp_idr, uobj); +- if (qp != qp->real_qp) { +- ib_close_qp(qp); +- } else { ++ if (qp == qp->real_qp) + ib_uverbs_detach_umcast(qp, uqp); +- ib_destroy_qp(qp); +- } ++ ib_destroy_qp(qp); + ib_uverbs_release_uevent(file, &uqp->uevent); + kfree(uqp); + } +diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c +index d5e60f44ba5a..5b8a62c6bc8d 100644 +--- a/drivers/infiniband/hw/mlx4/cq.c ++++ b/drivers/infiniband/hw/mlx4/cq.c +@@ -239,11 +239,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector + if (context) + if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { + err = -EFAULT; +- goto err_dbmap; ++ goto err_cq_free; + } + + return &cq->ibcq; + ++err_cq_free: ++ mlx4_cq_free(dev->dev, &cq->mcq); ++ + err_dbmap: + if (context) + mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); +diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c +index 25b2cdff00f8..27bedc39b47c 100644 +--- a/drivers/infiniband/hw/mlx4/mcg.c ++++ b/drivers/infiniband/hw/mlx4/mcg.c +@@ -483,7 +483,7 @@ static u8 get_leave_state(struct mcast_group *group) + if (!group->members[i]) + leave_state |= (1 << i); + +- return leave_state & (group->rec.scope_join_state & 7); ++ return leave_state & (group->rec.scope_join_state & 0xf); + } + + static int join_group(struct mcast_group *group, int slave, u8 join_mask) +@@ -558,8 +558,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work) + } else + mcg_warn_group(group, "DRIVER BUG\n"); + } else if (group->state == MCAST_LEAVE_SENT) { +- if (group->rec.scope_join_state & 7) +- group->rec.scope_join_state &= 0xf8; ++ if (group->rec.scope_join_state & 0xf) ++ group->rec.scope_join_state &= 0xf0; + group->state = MCAST_IDLE; + mutex_unlock(&group->lock); + if (release_group(group, 1)) +@@ -599,7 +599,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask, + static int handle_join_req(struct mcast_group *group, u8 join_mask, + struct mcast_req *req) + { +- u8 group_join_state = group->rec.scope_join_state & 7; ++ u8 group_join_state = group->rec.scope_join_state & 0xf; + int ref = 0; + u16 status; + struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; +@@ -684,8 +684,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work) + u8 cur_join_state; + + resp_join_state = ((struct ib_sa_mcmember_data *) +- group->response_sa_mad.data)->scope_join_state & 7; +- cur_join_state = group->rec.scope_join_state & 7; ++ group->response_sa_mad.data)->scope_join_state & 0xf; ++ cur_join_state = group->rec.scope_join_state & 0xf; + + if (method == IB_MGMT_METHOD_GET_RESP) { + /* successfull join */ +@@ -704,7 +704,7 @@ process_requests: + req = list_first_entry(&group->pending_list, struct mcast_req, + group_list); + sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; +- req_join_state = sa_data->scope_join_state & 0x7; ++ req_join_state = sa_data->scope_join_state & 0xf; + + /* For a leave request, we will immediately answer the VF, and + * update our internal counters. The actual leave will be sent +diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h +index eb71aaa26a9a..fb9a7b340f1f 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib.h ++++ b/drivers/infiniband/ulp/ipoib/ipoib.h +@@ -460,6 +460,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, + struct ipoib_ah *address, u32 qpn); + void ipoib_reap_ah(struct work_struct *work); + ++struct ipoib_path *__path_find(struct net_device *dev, void *gid); + void ipoib_mark_paths_invalid(struct net_device *dev); + void ipoib_flush_paths(struct net_device *dev); + struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c +index 3eceb61e3532..aa9ad2d70ddd 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c +@@ -1290,6 +1290,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) + } + } + ++#define QPN_AND_OPTIONS_OFFSET 4 ++ + static void ipoib_cm_tx_start(struct work_struct *work) + { + struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, +@@ -1298,6 +1300,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) + struct ipoib_neigh *neigh; + struct ipoib_cm_tx *p; + unsigned long flags; ++ struct ipoib_path *path; + int ret; + + struct ib_sa_path_rec pathrec; +@@ -1310,7 +1313,19 @@ static void ipoib_cm_tx_start(struct work_struct *work) + p = list_entry(priv->cm.start_list.next, typeof(*p), list); + list_del_init(&p->list); + neigh = p->neigh; ++ + qpn = IPOIB_QPN(neigh->daddr); ++ /* ++ * As long as the search is with these 2 locks, ++ * path existence indicates its validity. ++ */ ++ path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET); ++ if (!path) { ++ pr_info("%s ignore not valid path %pI6\n", ++ __func__, ++ neigh->daddr + QPN_AND_OPTIONS_OFFSET); ++ goto free_neigh; ++ } + memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); + + spin_unlock_irqrestore(&priv->lock, flags); +@@ -1322,6 +1337,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) + spin_lock_irqsave(&priv->lock, flags); + + if (ret) { ++free_neigh: + neigh = p->neigh; + if (neigh) { + neigh->cm = NULL; +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c +index 2cfa76f5d99e..39168d3cb7dc 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c +@@ -979,8 +979,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, + } + + if (level == IPOIB_FLUSH_LIGHT) { ++ int oper_up; + ipoib_mark_paths_invalid(dev); ++ /* Set IPoIB operation as down to prevent races between: ++ * the flush flow which leaves MCG and on the fly joins ++ * which can happen during that time. mcast restart task ++ * should deal with join requests we missed. ++ */ ++ oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); + ipoib_mcast_dev_flush(dev); ++ if (oper_up) ++ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); + } + + if (level >= IPOIB_FLUSH_NORMAL) +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c +index a481094af85f..375f9edd4027 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c +@@ -251,7 +251,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) + return -EINVAL; + } + +-static struct ipoib_path *__path_find(struct net_device *dev, void *gid) ++struct ipoib_path *__path_find(struct net_device *dev, void *gid) + { + struct ipoib_dev_priv *priv = netdev_priv(dev); + struct rb_node *n = priv->path_tree.rb_node; +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c +index 9870c540e6fb..2d8f9593fb18 100644 +--- a/drivers/input/serio/i8042.c ++++ b/drivers/input/serio/i8042.c +@@ -1223,6 +1223,7 @@ static int __init i8042_create_kbd_port(void) + serio->start = i8042_start; + serio->stop = i8042_stop; + serio->close = i8042_port_close; ++ serio->ps2_cmd_mutex = &i8042_mutex; + serio->port_data = port; + serio->dev.parent = &i8042_platform_device->dev; + strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name)); +@@ -1248,6 +1249,7 @@ static int __init i8042_create_aux_port(int idx) + serio->write = i8042_aux_write; + serio->start = i8042_start; + serio->stop = i8042_stop; ++ serio->ps2_cmd_mutex = &i8042_mutex; + serio->port_data = port; + serio->dev.parent = &i8042_platform_device->dev; + if (idx < 0) { +@@ -1310,21 +1312,6 @@ static void i8042_unregister_ports(void) + } + } + +-/* +- * Checks whether port belongs to i8042 controller. +- */ +-bool i8042_check_port_owner(const struct serio *port) +-{ +- int i; +- +- for (i = 0; i < I8042_NUM_PORTS; i++) +- if (i8042_ports[i].serio == port) +- return true; +- +- return false; +-} +-EXPORT_SYMBOL(i8042_check_port_owner); +- + static void i8042_free_irqs(void) + { + if (i8042_aux_irq_registered) +diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c +index 07a8363f3c5c..b5ec313cb9c9 100644 +--- a/drivers/input/serio/libps2.c ++++ b/drivers/input/serio/libps2.c +@@ -57,19 +57,17 @@ EXPORT_SYMBOL(ps2_sendbyte); + + void ps2_begin_command(struct ps2dev *ps2dev) + { +- mutex_lock(&ps2dev->cmd_mutex); ++ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex; + +- if (i8042_check_port_owner(ps2dev->serio)) +- i8042_lock_chip(); ++ mutex_lock(m); + } + EXPORT_SYMBOL(ps2_begin_command); + + void ps2_end_command(struct ps2dev *ps2dev) + { +- if (i8042_check_port_owner(ps2dev->serio)) +- i8042_unlock_chip(); ++ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex; + +- mutex_unlock(&ps2dev->cmd_mutex); ++ mutex_unlock(m); + } + EXPORT_SYMBOL(ps2_end_command); + +diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c +index 1418bdda61bb..ceaa790b71a2 100644 +--- a/drivers/input/touchscreen/ili210x.c ++++ b/drivers/input/touchscreen/ili210x.c +@@ -169,7 +169,7 @@ static ssize_t ili210x_calibrate(struct device *dev, + + return count; + } +-static DEVICE_ATTR(calibrate, 0644, NULL, ili210x_calibrate); ++static DEVICE_ATTR(calibrate, S_IWUSR, NULL, ili210x_calibrate); + + static struct attribute *ili210x_attributes[] = { + &dev_attr_calibrate.attr, +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index 6bde2a124c72..1c62c248da6a 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -1991,6 +1991,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) + kfree(dom->aperture[i]); + } + ++ if (dom->domain.id) ++ domain_id_free(dom->domain.id); ++ + kfree(dom); + } + +@@ -2551,8 +2554,16 @@ static void update_device_table(struct protection_domain *domain) + { + struct iommu_dev_data *dev_data; + +- list_for_each_entry(dev_data, &domain->dev_list, list) ++ list_for_each_entry(dev_data, &domain->dev_list, list) { + set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); ++ ++ if (dev_data->alias_data == NULL) ++ continue; ++ ++ /* There is an alias, update device table entry for it */ ++ set_dte_entry(dev_data->alias_data->devid, domain, ++ dev_data->alias_data->ats.enabled); ++ } + } + + static void update_domain(struct protection_domain *domain) +diff --git a/drivers/isdn/hardware/mISDN/ipac.h b/drivers/isdn/hardware/mISDN/ipac.h +index 8121e046b739..31fb3b0fd0e4 100644 +--- a/drivers/isdn/hardware/mISDN/ipac.h ++++ b/drivers/isdn/hardware/mISDN/ipac.h +@@ -217,6 +217,7 @@ struct ipac_hw { + #define ISAC_IND_DR 0x0 + #define ISAC_IND_SD 0x2 + #define ISAC_IND_DIS 0x3 ++#define ISAC_IND_DR6 0x5 + #define ISAC_IND_EI 0x6 + #define ISAC_IND_RSY 0x4 + #define ISAC_IND_ARD 0x8 +diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c +index ccd7d851be26..bac920c6022f 100644 +--- a/drivers/isdn/hardware/mISDN/mISDNipac.c ++++ b/drivers/isdn/hardware/mISDN/mISDNipac.c +@@ -80,6 +80,7 @@ isac_ph_state_bh(struct dchannel *dch) + l1_event(dch->l1, HW_DEACT_CNF); + break; + case ISAC_IND_DR: ++ case ISAC_IND_DR6: + dch->state = 3; + l1_event(dch->l1, HW_DEACT_IND); + break; +@@ -660,6 +661,7 @@ isac_l1cmd(struct dchannel *dch, u32 cmd) + spin_lock_irqsave(isac->hwlock, flags); + if ((isac->state == ISAC_IND_EI) || + (isac->state == ISAC_IND_DR) || ++ (isac->state == ISAC_IND_DR6) || + (isac->state == ISAC_IND_RS)) + ph_command(isac, ISAC_CMD_TIM); + else +diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c +index 5cefb479c707..00bd80a63895 100644 +--- a/drivers/isdn/mISDN/socket.c ++++ b/drivers/isdn/mISDN/socket.c +@@ -717,6 +717,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) + if (!maddr || maddr->family != AF_ISDN) + return -EINVAL; + ++ if (addr_len < sizeof(struct sockaddr_mISDN)) ++ return -EINVAL; ++ + lock_sock(sk); + + if (_pms(sk)->dev) { +diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c +index a9a47cd029d5..ace01a30f310 100644 +--- a/drivers/md/dm-flakey.c ++++ b/drivers/md/dm-flakey.c +@@ -286,15 +286,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) + pb->bio_submitted = true; + + /* +- * Map reads as normal only if corrupt_bio_byte set. ++ * Error reads if neither corrupt_bio_byte or drop_writes are set. ++ * Otherwise, flakey_end_io() will decide if the reads should be modified. + */ + if (bio_data_dir(bio) == READ) { +- /* If flags were specified, only corrupt those that match. */ +- if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && +- all_corrupt_bio_flags_match(bio, fc)) +- goto map_bio; +- else ++ if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags)) + return -EIO; ++ goto map_bio; + } + + /* +@@ -331,14 +329,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) + struct flakey_c *fc = ti->private; + struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); + +- /* +- * Corrupt successful READs while in down state. +- */ + if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { +- if (fc->corrupt_bio_byte) ++ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && ++ all_corrupt_bio_flags_match(bio, fc)) { ++ /* ++ * Corrupt successful matching READs while in down state. ++ */ + corrupt_bio_data(bio, fc); +- else ++ ++ } else if (!test_bit(DROP_WRITES, &fc->flags)) { ++ /* ++ * Error read during the down_interval if drop_writes ++ * wasn't configured. ++ */ + return -EIO; ++ } + } + + return error; +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index f69fed826a56..a77ef6cac62d 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -2323,6 +2323,7 @@ EXPORT_SYMBOL_GPL(dm_device_name); + + static void __dm_destroy(struct mapped_device *md, bool wait) + { ++ struct request_queue *q = md->queue; + struct dm_table *map; + + might_sleep(); +@@ -2333,6 +2334,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait) + set_bit(DMF_FREEING, &md->flags); + spin_unlock(&_minor_lock); + ++ spin_lock_irq(q->queue_lock); ++ queue_flag_set(QUEUE_FLAG_DYING, q); ++ spin_unlock_irq(q->queue_lock); ++ + /* + * Take suspend_lock so that presuspend and postsuspend methods + * do not race with internal suspend. +diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c +index 2c7217fb1415..4a1346fb383e 100644 +--- a/drivers/media/dvb-frontends/mb86a20s.c ++++ b/drivers/media/dvb-frontends/mb86a20s.c +@@ -75,25 +75,27 @@ static struct regdata mb86a20s_init1[] = { + }; + + static struct regdata mb86a20s_init2[] = { +- { 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 }, ++ { 0x50, 0xd1 }, { 0x51, 0x22 }, ++ { 0x39, 0x01 }, ++ { 0x71, 0x00 }, + { 0x3b, 0x21 }, +- { 0x3c, 0x38 }, ++ { 0x3c, 0x3a }, + { 0x01, 0x0d }, +- { 0x04, 0x08 }, { 0x05, 0x03 }, ++ { 0x04, 0x08 }, { 0x05, 0x05 }, + { 0x04, 0x0e }, { 0x05, 0x00 }, +- { 0x04, 0x0f }, { 0x05, 0x37 }, +- { 0x04, 0x0b }, { 0x05, 0x78 }, ++ { 0x04, 0x0f }, { 0x05, 0x14 }, ++ { 0x04, 0x0b }, { 0x05, 0x8c }, + { 0x04, 0x00 }, { 0x05, 0x00 }, +- { 0x04, 0x01 }, { 0x05, 0x1e }, +- { 0x04, 0x02 }, { 0x05, 0x07 }, +- { 0x04, 0x03 }, { 0x05, 0xd0 }, ++ { 0x04, 0x01 }, { 0x05, 0x07 }, ++ { 0x04, 0x02 }, { 0x05, 0x0f }, ++ { 0x04, 0x03 }, { 0x05, 0xa0 }, + { 0x04, 0x09 }, { 0x05, 0x00 }, + { 0x04, 0x0a }, { 0x05, 0xff }, +- { 0x04, 0x27 }, { 0x05, 0x00 }, ++ { 0x04, 0x27 }, { 0x05, 0x64 }, + { 0x04, 0x28 }, { 0x05, 0x00 }, +- { 0x04, 0x1e }, { 0x05, 0x00 }, +- { 0x04, 0x29 }, { 0x05, 0x64 }, +- { 0x04, 0x32 }, { 0x05, 0x02 }, ++ { 0x04, 0x1e }, { 0x05, 0xff }, ++ { 0x04, 0x29 }, { 0x05, 0x0a }, ++ { 0x04, 0x32 }, { 0x05, 0x0a }, + { 0x04, 0x14 }, { 0x05, 0x02 }, + { 0x04, 0x04 }, { 0x05, 0x00 }, + { 0x04, 0x05 }, { 0x05, 0x22 }, +@@ -101,8 +103,6 @@ static struct regdata mb86a20s_init2[] = { + { 0x04, 0x07 }, { 0x05, 0xd8 }, + { 0x04, 0x12 }, { 0x05, 0x00 }, + { 0x04, 0x13 }, { 0x05, 0xff }, +- { 0x04, 0x15 }, { 0x05, 0x4e }, +- { 0x04, 0x16 }, { 0x05, 0x20 }, + + /* + * On this demod, when the bit count reaches the count below, +@@ -156,42 +156,36 @@ static struct regdata mb86a20s_init2[] = { + { 0x50, 0x51 }, { 0x51, 0x04 }, /* MER symbol 4 */ + { 0x45, 0x04 }, /* CN symbol 4 */ + { 0x48, 0x04 }, /* CN manual mode */ +- ++ { 0x50, 0xd5 }, { 0x51, 0x01 }, + { 0x50, 0xd6 }, { 0x51, 0x1f }, + { 0x50, 0xd2 }, { 0x51, 0x03 }, +- { 0x50, 0xd7 }, { 0x51, 0xbf }, +- { 0x28, 0x74 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xff }, +- { 0x28, 0x46 }, { 0x29, 0x00 }, { 0x2a, 0x1a }, { 0x2b, 0x0c }, +- +- { 0x04, 0x40 }, { 0x05, 0x00 }, +- { 0x28, 0x00 }, { 0x2b, 0x08 }, +- { 0x28, 0x05 }, { 0x2b, 0x00 }, ++ { 0x50, 0xd7 }, { 0x51, 0x3f }, + { 0x1c, 0x01 }, +- { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x1f }, +- { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x18 }, +- { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x12 }, +- { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x30 }, +- { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x37 }, +- { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 }, +- { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x09 }, +- { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x06 }, +- { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7b }, +- { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x76 }, +- { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7d }, +- { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x08 }, +- { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0b }, +- { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 }, +- { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf2 }, +- { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf3 }, +- { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x05 }, +- { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 }, +- { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f }, +- { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xef }, +- { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xd8 }, +- { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xf1 }, +- { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x3d }, +- { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x94 }, +- { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xba }, ++ { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 }, ++ { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d }, ++ { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 }, ++ { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 }, ++ { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 }, ++ { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 }, ++ { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 }, ++ { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 }, ++ { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e }, ++ { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e }, ++ { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 }, ++ { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f }, ++ { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 }, ++ { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 }, ++ { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe }, ++ { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 }, ++ { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee }, ++ { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 }, ++ { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f }, ++ { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 }, ++ { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 }, ++ { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a }, ++ { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc }, ++ { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba }, ++ { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 }, + { 0x50, 0x1e }, { 0x51, 0x5d }, + { 0x50, 0x22 }, { 0x51, 0x00 }, + { 0x50, 0x23 }, { 0x51, 0xc8 }, +@@ -200,9 +194,7 @@ static struct regdata mb86a20s_init2[] = { + { 0x50, 0x26 }, { 0x51, 0x00 }, + { 0x50, 0x27 }, { 0x51, 0xc3 }, + { 0x50, 0x39 }, { 0x51, 0x02 }, +- { 0xec, 0x0f }, +- { 0xeb, 0x1f }, +- { 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 }, ++ { 0x50, 0xd5 }, { 0x51, 0x01 }, + { 0xd0, 0x00 }, + }; + +@@ -321,7 +313,11 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, fe_status_t *status) + if (val >= 7) + *status |= FE_HAS_SYNC; + +- if (val >= 8) /* Maybe 9? */ ++ /* ++ * Actually, on state S8, it starts receiving TS, but the TS ++ * output is only on normal state after the transition to S9. ++ */ ++ if (val >= 9) + *status |= FE_HAS_LOCK; + + dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n", +@@ -2080,6 +2076,11 @@ static void mb86a20s_release(struct dvb_frontend *fe) + kfree(state); + } + ++static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe) ++{ ++ return DVBFE_ALGO_HW; ++} ++ + static struct dvb_frontend_ops mb86a20s_ops; + + struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config, +@@ -2153,6 +2154,7 @@ static struct dvb_frontend_ops mb86a20s_ops = { + .read_status = mb86a20s_read_status_and_stats, + .read_signal_strength = mb86a20s_read_signal_strength_from_cache, + .tune = mb86a20s_tune, ++ .get_frontend_algo = mb86a20s_get_frontend_algo, + }; + + MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware"); +diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c +index 235ba657d52e..79a24efc03d6 100644 +--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c ++++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c +@@ -1261,7 +1261,10 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev, + dev->board.agc_analog_digital_select_gpio, + analog_or_digital); + +- return status; ++ if (status < 0) ++ return status; ++ ++ return 0; + } + + int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3) +diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c +index 13249e5a7891..c13c32347ad2 100644 +--- a/drivers/media/usb/cx231xx/cx231xx-cards.c ++++ b/drivers/media/usb/cx231xx/cx231xx-cards.c +@@ -452,7 +452,7 @@ struct cx231xx_board cx231xx_boards[] = { + .output_mode = OUT_MODE_VIP11, + .demod_xfer_mode = 0, + .ctl_pin_status_mask = 0xFFFFFFC4, +- .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */ ++ .agc_analog_digital_select_gpio = 0x1c, + .tuner_sif_gpio = -1, + .tuner_scl_gpio = -1, + .tuner_sda_gpio = -1, +diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c +index 4ba3ce09b713..6f5ffcc19356 100644 +--- a/drivers/media/usb/cx231xx/cx231xx-core.c ++++ b/drivers/media/usb/cx231xx/cx231xx-core.c +@@ -723,6 +723,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode) + break; + case CX231XX_BOARD_CNXT_RDE_253S: + case CX231XX_BOARD_CNXT_RDU_253S: ++ case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: + errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); + break; + case CX231XX_BOARD_HAUPPAUGE_EXETER: +@@ -747,7 +748,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode) + case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: + case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: + case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: +- errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); ++ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); + break; + default: + break; +diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c +index c4ff9739a7ae..d28d9068396f 100644 +--- a/drivers/media/usb/em28xx/em28xx-i2c.c ++++ b/drivers/media/usb/em28xx/em28xx-i2c.c +@@ -469,9 +469,8 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap, + int addr, rc, i; + u8 reg; + +- rc = rt_mutex_trylock(&dev->i2c_bus_lock); +- if (rc < 0) +- return rc; ++ if (!rt_mutex_trylock(&dev->i2c_bus_lock)) ++ return -EAGAIN; + + /* Switch I2C bus if needed */ + if (bus != dev->cur_i2c_bus && +diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c +index 7604f4e5df40..af6a245dc505 100644 +--- a/drivers/mfd/mfd-core.c ++++ b/drivers/mfd/mfd-core.c +@@ -263,6 +263,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones) + clones[i]); + } + ++ put_device(dev); ++ + return 0; + } + EXPORT_SYMBOL(mfd_clone_cell); +diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c +index 4b7ea3fb143c..1f8f856946cd 100644 +--- a/drivers/misc/mei/nfc.c ++++ b/drivers/misc/mei/nfc.c +@@ -292,7 +292,7 @@ static int mei_nfc_if_version(struct mei_nfc_dev *ndev) + return -ENOMEM; + + bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); +- if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { ++ if (bytes_recv < if_version_length) { + dev_err(&dev->pdev->dev, "Could not read IF version\n"); + ret = -EIO; + goto err; +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c +index a2863b7b9e21..ce34c492a887 100644 +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -2093,7 +2093,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, + set_capacity(md->disk, size); + + if (mmc_host_cmd23(card->host)) { +- if (mmc_card_mmc(card) || ++ if ((mmc_card_mmc(card) && ++ card->csd.mmca_vsn >= CSD_SPEC_VER_3) || + (mmc_card_sd(card) && + card->scr.cmds & SD_SCR_CMD23_SUPPORT)) + md->flags |= MMC_BLK_CMD23; +diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c +index 4278a1787d08..f3a423213108 100644 +--- a/drivers/mmc/host/mxs-mmc.c ++++ b/drivers/mmc/host/mxs-mmc.c +@@ -674,13 +674,13 @@ static int mxs_mmc_probe(struct platform_device *pdev) + + platform_set_drvdata(pdev, mmc); + ++ spin_lock_init(&host->lock); ++ + ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, + DRIVER_NAME, host); + if (ret) + goto out_free_dma; + +- spin_lock_init(&host->lock); +- + ret = mmc_add_host(mmc); + if (ret) + goto out_free_dma; +diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c +index 744ca5cacc9b..f9fa3fad728e 100644 +--- a/drivers/mtd/maps/pmcmsp-flash.c ++++ b/drivers/mtd/maps/pmcmsp-flash.c +@@ -75,15 +75,15 @@ static int __init init_msp_flash(void) + + printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); + +- msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL); ++ msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL); + if (!msp_flash) + return -ENOMEM; + +- msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); ++ msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL); + if (!msp_parts) + goto free_msp_flash; + +- msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); ++ msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL); + if (!msp_maps) + goto free_msp_parts; + +diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c +index 32d5e40c6863..48b63e849067 100644 +--- a/drivers/mtd/mtd_blkdevs.c ++++ b/drivers/mtd/mtd_blkdevs.c +@@ -198,8 +198,8 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) + if (!dev) + return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/ + +- mutex_lock(&dev->lock); + mutex_lock(&mtd_table_mutex); ++ mutex_lock(&dev->lock); + + if (dev->open) + goto unlock; +@@ -223,8 +223,8 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) + + unlock: + dev->open++; +- mutex_unlock(&mtd_table_mutex); + mutex_unlock(&dev->lock); ++ mutex_unlock(&mtd_table_mutex); + blktrans_dev_put(dev); + return ret; + +@@ -234,8 +234,8 @@ error_release: + error_put: + module_put(dev->tr->owner); + kref_put(&dev->ref, blktrans_dev_release); +- mutex_unlock(&mtd_table_mutex); + mutex_unlock(&dev->lock); ++ mutex_unlock(&mtd_table_mutex); + blktrans_dev_put(dev); + return ret; + } +@@ -247,8 +247,8 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode) + if (!dev) + return; + +- mutex_lock(&dev->lock); + mutex_lock(&mtd_table_mutex); ++ mutex_lock(&dev->lock); + + if (--dev->open) + goto unlock; +@@ -262,8 +262,8 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode) + __put_mtd_device(dev->mtd); + } + unlock: +- mutex_unlock(&mtd_table_mutex); + mutex_unlock(&dev->lock); ++ mutex_unlock(&mtd_table_mutex); + blktrans_dev_put(dev); + } + +diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c +index c3e15a558173..e4f16cf413a5 100644 +--- a/drivers/mtd/nand/davinci_nand.c ++++ b/drivers/mtd/nand/davinci_nand.c +@@ -241,6 +241,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode) + unsigned long flags; + u32 val; + ++ /* Reset ECC hardware */ ++ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET); ++ + spin_lock_irqsave(&davinci_nand_lock, flags); + + /* Start 4-bit ECC calculation for read/write */ +diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c +index bf8108d65b73..f6f1604deb8e 100644 +--- a/drivers/mtd/ubi/fastmap.c ++++ b/drivers/mtd/ubi/fastmap.c +@@ -438,10 +438,11 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, + unsigned long long ec = be64_to_cpu(ech->ec); + unmap_peb(ai, pnum); + dbg_bld("Adding PEB to free: %i", pnum); ++ + if (err == UBI_IO_FF_BITFLIPS) +- add_aeb(ai, free, pnum, ec, 1); +- else +- add_aeb(ai, free, pnum, ec, 0); ++ scrub = 1; ++ ++ add_aeb(ai, free, pnum, ec, scrub); + continue; + } else if (err == 0 || err == UBI_IO_BITFLIPS) { + dbg_bld("Found non empty PEB:%i in pool", pnum); +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index c0ed7c802819..ce41616d9d1a 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -1565,9 +1565,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) + bond_dev->name, slave_dev->name); + } + +- /* already enslaved */ +- if (slave_dev->flags & IFF_SLAVE) { +- pr_debug("Error, Device was already enslaved\n"); ++ /* already in-use? */ ++ if (netdev_is_rx_handler_busy(slave_dev)) { ++ netdev_err(bond_dev, ++ "Error: Device is in use and cannot be enslaved\n"); + return -EBUSY; + } + +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index 464e5f66b66d..284d751ea97f 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -394,9 +395,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb); + /* + * CAN device restart for bus-off recovery + */ +-static void can_restart(unsigned long data) ++static void can_restart(struct net_device *dev) + { +- struct net_device *dev = (struct net_device *)data; + struct can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; +@@ -436,6 +436,14 @@ restart: + netdev_err(dev, "Error %d during restart", err); + } + ++static void can_restart_work(struct work_struct *work) ++{ ++ struct delayed_work *dwork = to_delayed_work(work); ++ struct can_priv *priv = container_of(dwork, struct can_priv, restart_work); ++ ++ can_restart(priv->dev); ++} ++ + int can_restart_now(struct net_device *dev) + { + struct can_priv *priv = netdev_priv(dev); +@@ -449,8 +457,8 @@ int can_restart_now(struct net_device *dev) + if (priv->state != CAN_STATE_BUS_OFF) + return -EBUSY; + +- /* Runs as soon as possible in the timer context */ +- mod_timer(&priv->restart_timer, jiffies); ++ cancel_delayed_work_sync(&priv->restart_work); ++ can_restart(dev); + + return 0; + } +@@ -472,8 +480,8 @@ void can_bus_off(struct net_device *dev) + priv->can_stats.bus_off++; + + if (priv->restart_ms) +- mod_timer(&priv->restart_timer, +- jiffies + (priv->restart_ms * HZ) / 1000); ++ schedule_delayed_work(&priv->restart_work, ++ msecs_to_jiffies(priv->restart_ms)); + } + EXPORT_SYMBOL_GPL(can_bus_off); + +@@ -556,6 +564,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) + return NULL; + + priv = netdev_priv(dev); ++ priv->dev = dev; + + if (echo_skb_max) { + priv->echo_skb_max = echo_skb_max; +@@ -565,7 +574,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) + + priv->state = CAN_STATE_STOPPED; + +- init_timer(&priv->restart_timer); ++ INIT_DELAYED_WORK(&priv->restart_work, can_restart_work); + + return dev; + } +@@ -599,8 +608,6 @@ int open_candev(struct net_device *dev) + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + +- setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev); +- + return 0; + } + EXPORT_SYMBOL_GPL(open_candev); +@@ -615,7 +622,7 @@ void close_candev(struct net_device *dev) + { + struct can_priv *priv = netdev_priv(dev); + +- del_timer_sync(&priv->restart_timer); ++ cancel_delayed_work_sync(&priv->restart_work); + can_flush_echo_skb(dev); + } + EXPORT_SYMBOL_GPL(close_candev); +diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c +index d175bbd3ffd3..4ac9dfd3f127 100644 +--- a/drivers/net/ethernet/marvell/sky2.c ++++ b/drivers/net/ethernet/marvell/sky2.c +@@ -5197,6 +5197,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); + + static void sky2_shutdown(struct pci_dev *pdev) + { ++ struct sky2_hw *hw = pci_get_drvdata(pdev); ++ int port; ++ ++ for (port = 0; port < hw->ports; port++) { ++ struct net_device *ndev = hw->dev[port]; ++ ++ rtnl_lock(); ++ if (netif_running(ndev)) { ++ dev_close(ndev); ++ netif_device_detach(ndev); ++ } ++ rtnl_unlock(); ++ } + sky2_suspend(&pdev->dev); + pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); + pci_set_power_state(pdev, PCI_D3hot); +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +index 063f3f4d4867..a206ce615e97 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +@@ -2027,7 +2027,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) + struct mlx4_en_dev *mdev = en_priv->mdev; + u64 mac_u64 = mlx4_en_mac_to_u64(mac); + +- if (!is_valid_ether_addr(mac)) ++ if (is_multicast_ether_addr(mac)) + return -EINVAL; + + return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c +index 14a8d2958698..ab79c0f13d0a 100644 +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -2317,8 +2317,6 @@ ppp_unregister_channel(struct ppp_channel *chan) + spin_lock_bh(&pn->all_channels_lock); + list_del(&pch->list); + spin_unlock_bh(&pn->all_channels_lock); +- put_net(pch->chan_net); +- pch->chan_net = NULL; + + pch->file.dead = 1; + wake_up_interruptible(&pch->file.rwait); +@@ -2925,6 +2923,9 @@ ppp_disconnect_channel(struct channel *pch) + */ + static void ppp_destroy_channel(struct channel *pch) + { ++ put_net(pch->chan_net); ++ pch->chan_net = NULL; ++ + atomic_dec(&channel_count); + + if (!pch->file.dead) { +diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c +index afb117c16d2d..8ba774de3474 100644 +--- a/drivers/net/usb/kaweth.c ++++ b/drivers/net/usb/kaweth.c +@@ -1031,6 +1031,7 @@ static int kaweth_probe( + kaweth = netdev_priv(netdev); + kaweth->dev = udev; + kaweth->net = netdev; ++ kaweth->intf = intf; + + spin_lock_init(&kaweth->device_lock); + init_waitqueue_head(&kaweth->term_wait); +@@ -1141,8 +1142,6 @@ err_fw: + + dev_dbg(dev, "Initializing net device.\n"); + +- kaweth->intf = intf; +- + kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kaweth->tx_urb) + goto err_free_netdev; +diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +index 301e572e8923..2c524305589f 100644 +--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c ++++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +@@ -3726,7 +3726,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, + (u8 *)&settings->beacon.head[ie_offset], + settings->beacon.head_len - ie_offset, + WLAN_EID_SSID); +- if (!ssid_ie) ++ if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN) + return -EINVAL; + + memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len); +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c +index 4fb9635d3919..7660b523dcf1 100644 +--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c ++++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c +@@ -1079,8 +1079,10 @@ bool dma_rxfill(struct dma_pub *pub) + + pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, + DMA_FROM_DEVICE); +- if (dma_mapping_error(di->dmadev, pa)) ++ if (dma_mapping_error(di->dmadev, pa)) { ++ brcmu_pkt_buf_free_skb(p); + return false; ++ } + + /* save the free packet pointer */ + di->rxp[rxout] = p; +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/brcm80211/brcmsmac/stf.c +index dd9162722495..0ab865de1491 100644 +--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.c ++++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.c +@@ -87,7 +87,7 @@ void + brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc, u16 *ss_algo_channel, + u16 chanspec) + { +- struct tx_power power; ++ struct tx_power power = { }; + u8 siso_mcs_id, cdd_mcs_id, stbc_mcs_id; + + /* Clear previous settings */ +diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c +index f05962c32497..2e3a0d73f090 100644 +--- a/drivers/net/wireless/iwlwifi/pcie/tx.c ++++ b/drivers/net/wireless/iwlwifi/pcie/tx.c +@@ -1311,9 +1311,9 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, + + /* start the TFD with the scratchbuf */ + scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE); +- memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); ++ memcpy(&txq->scratchbufs[idx], &out_cmd->hdr, scratch_size); + iwl_pcie_txq_build_tfd(trans, txq, +- iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), ++ iwl_pcie_get_scratchbuf_dma(txq, idx), + scratch_size, 1); + + /* map first command fragment, if any remains */ +diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c +index e7f7cdfafd51..fa0e45b82ce0 100644 +--- a/drivers/net/wireless/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/mwifiex/cfg80211.c +@@ -1633,8 +1633,9 @@ done: + is_scanning_required = 1; + } else { + dev_dbg(priv->adapter->dev, +- "info: trying to associate to '%s' bssid %pM\n", +- (char *) req_ssid.ssid, bss->bssid); ++ "info: trying to associate to '%.*s' bssid %pM\n", ++ req_ssid.ssid_len, (char *)req_ssid.ssid, ++ bss->bssid); + memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN); + break; + } +@@ -1675,8 +1676,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, + return -EINVAL; + } + +- wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n", +- (char *) sme->ssid, sme->bssid); ++ wiphy_dbg(wiphy, "info: Trying to associate to %.*s and bssid %pM\n", ++ (int)sme->ssid_len, (char *)sme->ssid, sme->bssid); + + ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, + priv->bss_mode, sme->channel, sme, 0); +@@ -1799,8 +1800,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, + goto done; + } + +- wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n", +- (char *) params->ssid, params->bssid); ++ wiphy_dbg(wiphy, "info: trying to join to %.*s and bssid %pM\n", ++ params->ssid_len, (char *)params->ssid, params->bssid); + + mwifiex_set_ibss_params(priv, params); + +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c +index 1595f818b8c0..ec88898ce42b 100644 +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -454,17 +454,17 @@ static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif, + struct netrx_pending_operations *npo) + { + struct netbk_rx_meta *meta; +- struct xen_netif_rx_request *req; ++ struct xen_netif_rx_request req; + +- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); ++ RING_COPY_REQUEST(&vif->rx, vif->rx.req_cons++, &req); + + meta = npo->meta + npo->meta_prod++; + meta->gso_size = 0; + meta->size = 0; +- meta->id = req->id; ++ meta->id = req.id; + + npo->copy_off = 0; +- npo->copy_gref = req->gref; ++ npo->copy_gref = req.gref; + + return meta; + } +@@ -582,7 +582,7 @@ static int netbk_gop_skb(struct sk_buff *skb, + struct xenvif *vif = netdev_priv(skb->dev); + int nr_frags = skb_shinfo(skb)->nr_frags; + int i; +- struct xen_netif_rx_request *req; ++ struct xen_netif_rx_request req; + struct netbk_rx_meta *meta; + unsigned char *data; + int head = 1; +@@ -592,14 +592,14 @@ static int netbk_gop_skb(struct sk_buff *skb, + + /* Set up a GSO prefix descriptor, if necessary */ + if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { +- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); ++ RING_COPY_REQUEST(&vif->rx, vif->rx.req_cons++, &req); + meta = npo->meta + npo->meta_prod++; + meta->gso_size = skb_shinfo(skb)->gso_size; + meta->size = 0; +- meta->id = req->id; ++ meta->id = req.id; + } + +- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); ++ RING_COPY_REQUEST(&vif->rx, vif->rx.req_cons++, &req); + meta = npo->meta + npo->meta_prod++; + + if (!vif->gso_prefix) +@@ -608,9 +608,9 @@ static int netbk_gop_skb(struct sk_buff *skb, + meta->gso_size = 0; + + meta->size = 0; +- meta->id = req->id; ++ meta->id = req.id; + npo->copy_off = 0; +- npo->copy_gref = req->gref; ++ npo->copy_gref = req.gref; + + data = skb->data; + while (data < skb_tail_pointer(skb)) { +@@ -928,9 +928,7 @@ static void tx_add_credit(struct xenvif *vif) + * Allow a burst big enough to transmit a jumbo packet of up to 128kB. + * Otherwise the interface can seize up due to insufficient credit. + */ +- max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size; +- max_burst = min(max_burst, 131072UL); +- max_burst = max(max_burst, vif->credit_bytes); ++ max_burst = max(131072UL, vif->credit_bytes); + + /* Take care that adding a new chunk of credit doesn't wrap to zero. */ + max_credit = vif->remaining_credit + vif->credit_bytes; +@@ -956,7 +954,7 @@ static void netbk_tx_err(struct xenvif *vif, + make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); + if (cons == end) + break; +- txp = RING_GET_REQUEST(&vif->tx, cons++); ++ RING_COPY_REQUEST(&vif->tx, cons++, txp); + } while (1); + vif->tx.req_cons = cons; + xen_netbk_check_rx_xenvif(vif); +@@ -1023,8 +1021,7 @@ static int netbk_count_requests(struct xenvif *vif, + if (drop_err) + txp = &dropped_tx; + +- memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), +- sizeof(*txp)); ++ RING_COPY_REQUEST(&vif->tx, cons + slots, txp); + + /* If the guest submitted a frame >= 64 KiB then + * first->size overflowed and following slots will +@@ -1312,8 +1309,7 @@ static int xen_netbk_get_extras(struct xenvif *vif, + return -EBADR; + } + +- memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons), +- sizeof(extra)); ++ RING_COPY_REQUEST(&vif->tx, cons, &extra); + if (unlikely(!extra.type || + extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { + vif->tx.req_cons = ++cons; +@@ -1503,7 +1499,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) + + idx = vif->tx.req_cons; + rmb(); /* Ensure that we see the request before we copy it. */ +- memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); ++ RING_COPY_REQUEST(&vif->tx, idx, &txreq); + + /* Credit-based scheduling. */ + if (txreq.size > vif->remaining_credit && +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index a6637158d078..b6625e58bc57 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -339,19 +339,52 @@ static void quirk_s3_64M(struct pci_dev *dev) + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); + ++static void quirk_io(struct pci_dev *dev, int pos, unsigned size, ++ const char *name) ++{ ++ u32 region; ++ struct pci_bus_region bus_region; ++ struct resource *res = dev->resource + pos; ++ ++ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion); ++ ++ if (!region) ++ return; ++ ++ res->name = pci_name(dev); ++ res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK; ++ res->flags |= ++ (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN); ++ region &= ~(size - 1); ++ ++ /* Convert from PCI bus to resource space */ ++ bus_region.start = region; ++ bus_region.end = region + size - 1; ++ pcibios_bus_to_resource(dev, res, &bus_region); ++ ++ dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n", ++ name, PCI_BASE_ADDRESS_0 + (pos << 2), res); ++} ++ + /* + * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS + * ver. 1.33 20070103) don't set the correct ISA PCI region header info. + * BAR0 should be 8 bytes; instead, it may be set to something like 8k + * (which conflicts w/ BAR1's memory range). ++ * ++ * CS553x's ISA PCI BARs may also be read-only (ref: ++ * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward). + */ + static void quirk_cs5536_vsa(struct pci_dev *dev) + { ++ static char *name = "CS5536 ISA bridge"; ++ + if (pci_resource_len(dev, 0) != 8) { +- struct resource *res = &dev->resource[0]; +- res->end = res->start + 8 - 1; +- dev_info(&dev->dev, "CS5536 ISA bridge bug detected " +- "(incorrect header); workaround applied.\n"); ++ quirk_io(dev, 0, 8, name); /* SMB */ ++ quirk_io(dev, 1, 256, name); /* GPIO */ ++ quirk_io(dev, 2, 64, name); /* MFGPT */ ++ dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n", ++ name); + } + } + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); +diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c +index 45c16447744b..1ed4145164d6 100644 +--- a/drivers/regulator/tps65910-regulator.c ++++ b/drivers/regulator/tps65910-regulator.c +@@ -1080,6 +1080,12 @@ static int tps65910_probe(struct platform_device *pdev) + pmic->num_regulators = ARRAY_SIZE(tps65910_regs); + pmic->ext_sleep_control = tps65910_ext_sleep_control; + info = tps65910_regs; ++ /* Work around silicon erratum SWCZ010: output programmed ++ * voltage level can go higher than expected or crash ++ * Workaround: use no synchronization of DCDC clocks ++ */ ++ tps65910_reg_clear_bits(pmic->mfd, TPS65910_DCDCCTRL, ++ DCDCCTRL_DCDCCKSYNC_MASK); + break; + case TPS65911: + pmic->get_ctrl_reg = &tps65911_get_ctrl_register; +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c +index e91ec8cd9b09..aa9d384205c8 100644 +--- a/drivers/s390/block/dasd.c ++++ b/drivers/s390/block/dasd.c +@@ -1615,9 +1615,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, + unsigned long long now; + int expires; + ++ cqr = (struct dasd_ccw_req *) intparm; + if (IS_ERR(irb)) { + switch (PTR_ERR(irb)) { + case -EIO: ++ if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { ++ device = (struct dasd_device *) cqr->startdev; ++ cqr->status = DASD_CQR_CLEARED; ++ dasd_device_clear_timer(device); ++ wake_up(&dasd_flush_wq); ++ dasd_schedule_device_bh(device); ++ return; ++ } + break; + case -ETIMEDOUT: + DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " +@@ -1633,7 +1642,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, + } + + now = get_tod_clock(); +- cqr = (struct dasd_ccw_req *) intparm; + /* check for conditions that should be handled immediately */ + if (!cqr || + !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && +diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c +index e1a8cc2526e7..c846a63ea672 100644 +--- a/drivers/s390/scsi/zfcp_dbf.c ++++ b/drivers/s390/scsi/zfcp_dbf.c +@@ -3,7 +3,7 @@ + * + * Debug traces for zfcp. + * +- * Copyright IBM Corp. 2002, 2010 ++ * Copyright IBM Corp. 2002, 2016 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -58,7 +58,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area, + * @tag: tag indicating which kind of unsolicited status has been received + * @req: request for which a response was received + */ +-void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) ++void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req) + { + struct zfcp_dbf *dbf = req->adapter->dbf; + struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix; +@@ -78,6 +78,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) + rec->u.res.req_issued = req->issued; + rec->u.res.prot_status = q_pref->prot_status; + rec->u.res.fsf_status = q_head->fsf_status; ++ rec->u.res.port_handle = q_head->port_handle; ++ rec->u.res.lun_handle = q_head->lun_handle; + + memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual, + FSF_PROT_STATUS_QUAL_SIZE); +@@ -90,7 +92,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) + rec->pl_len, "fsf_res", req->req_id); + } + +- debug_event(dbf->hba, 1, rec, sizeof(*rec)); ++ debug_event(dbf->hba, level, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->hba_lock, flags); + } + +@@ -234,7 +236,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, + if (sdev) { + rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status); + rec->lun = zfcp_scsi_dev_lun(sdev); +- } ++ } else ++ rec->lun = ZFCP_DBF_INVALID_LUN; + } + + /** +@@ -313,13 +316,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) + spin_unlock_irqrestore(&dbf->rec_lock, flags); + } + ++/** ++ * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery ++ * @tag: identifier for event ++ * @wka_port: well known address port ++ * @req_id: request ID to correlate with potential HBA trace record ++ */ ++void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port, ++ u64 req_id) ++{ ++ struct zfcp_dbf *dbf = wka_port->adapter->dbf; ++ struct zfcp_dbf_rec *rec = &dbf->rec_buf; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dbf->rec_lock, flags); ++ memset(rec, 0, sizeof(*rec)); ++ ++ rec->id = ZFCP_DBF_REC_RUN; ++ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); ++ rec->port_status = wka_port->status; ++ rec->d_id = wka_port->d_id; ++ rec->lun = ZFCP_DBF_INVALID_LUN; ++ ++ rec->u.run.fsf_req_id = req_id; ++ rec->u.run.rec_status = ~0; ++ rec->u.run.rec_step = ~0; ++ rec->u.run.rec_action = ~0; ++ rec->u.run.rec_count = ~0; ++ ++ debug_event(dbf->rec, 1, rec, sizeof(*rec)); ++ spin_unlock_irqrestore(&dbf->rec_lock, flags); ++} ++ + static inline +-void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, +- u64 req_id, u32 d_id) ++void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, ++ char *paytag, struct scatterlist *sg, u8 id, u16 len, ++ u64 req_id, u32 d_id, u16 cap_len) + { + struct zfcp_dbf_san *rec = &dbf->san_buf; + u16 rec_len; + unsigned long flags; ++ struct zfcp_dbf_pay *payload = &dbf->pay_buf; ++ u16 pay_sum = 0; + + spin_lock_irqsave(&dbf->san_lock, flags); + memset(rec, 0, sizeof(*rec)); +@@ -327,10 +365,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, + rec->id = id; + rec->fsf_req_id = req_id; + rec->d_id = d_id; +- rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD); +- memcpy(rec->payload, data, rec_len); + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); ++ rec->pl_len = len; /* full length even if we cap pay below */ ++ if (!sg) ++ goto out; ++ rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD); ++ memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */ ++ if (len <= rec_len) ++ goto out; /* skip pay record if full content in rec->payload */ ++ ++ /* if (len > rec_len): ++ * dump data up to cap_len ignoring small duplicate in rec->payload ++ */ ++ spin_lock(&dbf->pay_lock); ++ memset(payload, 0, sizeof(*payload)); ++ memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN); ++ payload->fsf_req_id = req_id; ++ payload->counter = 0; ++ for (; sg && pay_sum < cap_len; sg = sg_next(sg)) { ++ u16 pay_len, offset = 0; ++ ++ while (offset < sg->length && pay_sum < cap_len) { ++ pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC, ++ (u16)(sg->length - offset)); ++ /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */ ++ memcpy(payload->data, sg_virt(sg) + offset, pay_len); ++ debug_event(dbf->pay, 1, payload, ++ zfcp_dbf_plen(pay_len)); ++ payload->counter++; ++ offset += pay_len; ++ pay_sum += pay_len; ++ } ++ } ++ spin_unlock(&dbf->pay_lock); + ++out: + debug_event(dbf->san, 1, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->san_lock, flags); + } +@@ -347,9 +416,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id) + struct zfcp_fsf_ct_els *ct_els = fsf->data; + u16 length; + +- length = (u16)(ct_els->req->length + FC_CT_HDR_LEN); +- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length, +- fsf->req_id, d_id); ++ length = (u16)zfcp_qdio_real_bytes(ct_els->req); ++ zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ, ++ length, fsf->req_id, d_id, length); ++} ++ ++static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag, ++ struct zfcp_fsf_req *fsf, ++ u16 len) ++{ ++ struct zfcp_fsf_ct_els *ct_els = fsf->data; ++ struct fc_ct_hdr *reqh = sg_virt(ct_els->req); ++ struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1); ++ struct scatterlist *resp_entry = ct_els->resp; ++ struct fc_gpn_ft_resp *acc; ++ int max_entries, x, last = 0; ++ ++ if (!(memcmp(tag, "fsscth2", 7) == 0 ++ && ct_els->d_id == FC_FID_DIR_SERV ++ && reqh->ct_rev == FC_CT_REV ++ && reqh->ct_in_id[0] == 0 ++ && reqh->ct_in_id[1] == 0 ++ && reqh->ct_in_id[2] == 0 ++ && reqh->ct_fs_type == FC_FST_DIR ++ && reqh->ct_fs_subtype == FC_NS_SUBTYPE ++ && reqh->ct_options == 0 ++ && reqh->_ct_resvd1 == 0 ++ && reqh->ct_cmd == FC_NS_GPN_FT ++ /* reqh->ct_mr_size can vary so do not match but read below */ ++ && reqh->_ct_resvd2 == 0 ++ && reqh->ct_reason == 0 ++ && reqh->ct_explan == 0 ++ && reqh->ct_vendor == 0 ++ && reqn->fn_resvd == 0 ++ && reqn->fn_domain_id_scope == 0 ++ && reqn->fn_area_id_scope == 0 ++ && reqn->fn_fc4_type == FC_TYPE_FCP)) ++ return len; /* not GPN_FT response so do not cap */ ++ ++ acc = sg_virt(resp_entry); ++ max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp)) ++ + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one ++ * to account for header as 1st pseudo "entry" */; ++ ++ /* the basic CT_IU preamble is the same size as one entry in the GPN_FT ++ * response, allowing us to skip special handling for it - just skip it ++ */ ++ for (x = 1; x < max_entries && !last; x++) { ++ if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) ++ acc++; ++ else ++ acc = sg_virt(++resp_entry); ++ ++ last = acc->fp_flags & FC_NS_FID_LAST; ++ } ++ len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp))); ++ return len; /* cap after last entry */ + } + + /** +@@ -363,9 +485,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) + struct zfcp_fsf_ct_els *ct_els = fsf->data; + u16 length; + +- length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN); +- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length, +- fsf->req_id, 0); ++ length = (u16)zfcp_qdio_real_bytes(ct_els->resp); ++ zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES, ++ length, fsf->req_id, ct_els->d_id, ++ zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length)); + } + + /** +@@ -379,11 +502,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) + struct fsf_status_read_buffer *srb = + (struct fsf_status_read_buffer *) fsf->data; + u16 length; ++ struct scatterlist sg; + + length = (u16)(srb->length - + offsetof(struct fsf_status_read_buffer, payload)); +- zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length, +- fsf->req_id, ntoh24(srb->d_id)); ++ sg_init_one(&sg, srb->payload.data, length); ++ zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length, ++ fsf->req_id, ntoh24(srb->d_id), length); + } + + /** +@@ -392,7 +517,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) + * @sc: pointer to struct scsi_cmnd + * @fsf: pointer to struct zfcp_fsf_req + */ +-void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) ++void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, ++ struct zfcp_fsf_req *fsf) + { + struct zfcp_adapter *adapter = + (struct zfcp_adapter *) sc->device->host->hostdata[0]; +@@ -434,7 +560,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) + } + } + +- debug_event(dbf->scsi, 1, rec, sizeof(*rec)); ++ debug_event(dbf->scsi, level, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->scsi_lock, flags); + } + +diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h +index 3ac7a4b30dd9..440aa619da1d 100644 +--- a/drivers/s390/scsi/zfcp_dbf.h ++++ b/drivers/s390/scsi/zfcp_dbf.h +@@ -2,7 +2,7 @@ + * zfcp device driver + * debug feature declarations + * +- * Copyright IBM Corp. 2008, 2010 ++ * Copyright IBM Corp. 2008, 2015 + */ + + #ifndef ZFCP_DBF_H +@@ -17,6 +17,11 @@ + + #define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull + ++enum zfcp_dbf_pseudo_erp_act_type { ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff, ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe, ++}; ++ + /** + * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action + * @ready: number of ready recovery actions +@@ -110,6 +115,7 @@ struct zfcp_dbf_san { + u32 d_id; + #define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32) + char payload[ZFCP_DBF_SAN_MAX_PAYLOAD]; ++ u16 pl_len; + } __packed; + + /** +@@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res { + u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; + u32 fsf_status; + u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; ++ u32 port_handle; ++ u32 lun_handle; + } __packed; + + /** +@@ -279,7 +287,7 @@ static inline + void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) + { + if (level <= req->adapter->dbf->hba->level) +- zfcp_dbf_hba_fsf_res(tag, req); ++ zfcp_dbf_hba_fsf_res(tag, level, req); + } + + /** +@@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd, + scmd->device->host->hostdata[0]; + + if (level <= adapter->dbf->scsi->level) +- zfcp_dbf_scsi(tag, scmd, req); ++ zfcp_dbf_scsi(tag, level, scmd, req); + } + + /** +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c +index 8e8f3533d2a1..b4cd26d24152 100644 +--- a/drivers/s390/scsi/zfcp_erp.c ++++ b/drivers/s390/scsi/zfcp_erp.c +@@ -3,7 +3,7 @@ + * + * Error Recovery Procedures (ERP). + * +- * Copyright IBM Corp. 2002, 2010 ++ * Copyright IBM Corp. 2002, 2015 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -1225,8 +1225,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) + break; + + case ZFCP_ERP_ACTION_REOPEN_PORT: +- if (result == ZFCP_ERP_SUCCEEDED) +- zfcp_scsi_schedule_rport_register(port); ++ /* This switch case might also happen after a forced reopen ++ * was successfully done and thus overwritten with a new ++ * non-forced reopen at `ersfs_2'. In this case, we must not ++ * do the clean-up of the non-forced version. ++ */ ++ if (act->step != ZFCP_ERP_STEP_UNINITIALIZED) ++ if (result == ZFCP_ERP_SUCCEEDED) ++ zfcp_scsi_schedule_rport_register(port); + /* fall through */ + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: + put_device(&port->dev); +diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h +index 1d3dd3f7d699..01527c31d1da 100644 +--- a/drivers/s390/scsi/zfcp_ext.h ++++ b/drivers/s390/scsi/zfcp_ext.h +@@ -3,7 +3,7 @@ + * + * External function declarations. + * +- * Copyright IBM Corp. 2002, 2010 ++ * Copyright IBM Corp. 2002, 2015 + */ + + #ifndef ZFCP_EXT_H +@@ -49,8 +49,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); + extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, + struct zfcp_port *, struct scsi_device *, u8, u8); + extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); ++extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64); + extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); +-extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *); ++extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *); + extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); + extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); + extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **); +@@ -58,7 +59,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *); + extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); + extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); + extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); +-extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *); ++extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *, ++ struct zfcp_fsf_req *); + + /* zfcp_erp.c */ + extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c +index 9152999a0707..f246097b7c6d 100644 +--- a/drivers/s390/scsi/zfcp_fsf.c ++++ b/drivers/s390/scsi/zfcp_fsf.c +@@ -3,7 +3,7 @@ + * + * Implementation of FSF commands. + * +- * Copyright IBM Corp. 2002, 2013 ++ * Copyright IBM Corp. 2002, 2015 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -513,7 +513,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) + fc_host_port_type(shost) = FC_PORTTYPE_PTP; + break; + case FSF_TOPO_FABRIC: +- fc_host_port_type(shost) = FC_PORTTYPE_NPORT; ++ if (bottom->connection_features & FSF_FEATURE_NPIV_MODE) ++ fc_host_port_type(shost) = FC_PORTTYPE_NPIV; ++ else ++ fc_host_port_type(shost) = FC_PORTTYPE_NPORT; + break; + case FSF_TOPO_AL: + fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; +@@ -618,7 +621,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) + + if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) { + fc_host_permanent_port_name(shost) = bottom->wwpn; +- fc_host_port_type(shost) = FC_PORTTYPE_NPIV; + } else + fc_host_permanent_port_name(shost) = fc_host_port_name(shost); + fc_host_maxframe_size(shost) = bottom->maximum_frame_size; +@@ -988,8 +990,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, + if (zfcp_adapter_multi_buffer_active(adapter)) { + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) + return -EIO; ++ qtcb->bottom.support.req_buf_length = ++ zfcp_qdio_real_bytes(sg_req); + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) + return -EIO; ++ qtcb->bottom.support.resp_buf_length = ++ zfcp_qdio_real_bytes(sg_resp); + + zfcp_qdio_set_data_div(qdio, &req->qdio_req, + zfcp_qdio_sbale_count(sg_req)); +@@ -1079,6 +1085,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, + + req->handler = zfcp_fsf_send_ct_handler; + req->qtcb->header.port_handle = wka_port->handle; ++ ct->d_id = wka_port->d_id; + req->data = ct; + + zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); +@@ -1182,6 +1189,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, + + hton24(req->qtcb->bottom.support.d_id, d_id); + req->handler = zfcp_fsf_send_els_handler; ++ els->d_id = d_id; + req->data = els; + + zfcp_dbf_san_req("fssels1", req, d_id); +@@ -1599,7 +1607,7 @@ out: + int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) + { + struct zfcp_qdio *qdio = wka_port->adapter->qdio; +- struct zfcp_fsf_req *req; ++ struct zfcp_fsf_req *req = NULL; + int retval = -EIO; + + spin_lock_irq(&qdio->req_q_lock); +@@ -1628,6 +1636,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) + zfcp_fsf_req_free(req); + out: + spin_unlock_irq(&qdio->req_q_lock); ++ if (req && !IS_ERR(req)) ++ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); + return retval; + } + +@@ -1652,7 +1662,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) + int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) + { + struct zfcp_qdio *qdio = wka_port->adapter->qdio; +- struct zfcp_fsf_req *req; ++ struct zfcp_fsf_req *req = NULL; + int retval = -EIO; + + spin_lock_irq(&qdio->req_q_lock); +@@ -1681,6 +1691,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) + zfcp_fsf_req_free(req); + out: + spin_unlock_irq(&qdio->req_q_lock); ++ if (req && !IS_ERR(req)) ++ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); + return retval; + } + +diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h +index 5e795b86931b..8cad41ffb6b8 100644 +--- a/drivers/s390/scsi/zfcp_fsf.h ++++ b/drivers/s390/scsi/zfcp_fsf.h +@@ -3,7 +3,7 @@ + * + * Interface to the FSF support functions. + * +- * Copyright IBM Corp. 2002, 2010 ++ * Copyright IBM Corp. 2002, 2015 + */ + + #ifndef FSF_H +@@ -462,6 +462,7 @@ struct zfcp_blk_drv_data { + * @handler_data: data passed to handler function + * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC) + * @status: used to pass error status to calling function ++ * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS + */ + struct zfcp_fsf_ct_els { + struct scatterlist *req; +@@ -470,6 +471,7 @@ struct zfcp_fsf_ct_els { + void *handler_data; + struct zfcp_port *port; + int status; ++ u32 d_id; + }; + + #endif /* FSF_H */ +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c +index 7b353647cb90..38ee0df633a3 100644 +--- a/drivers/s390/scsi/zfcp_scsi.c ++++ b/drivers/s390/scsi/zfcp_scsi.c +@@ -3,7 +3,7 @@ + * + * Interface to Linux SCSI midlayer. + * +- * Copyright IBM Corp. 2002, 2013 ++ * Copyright IBM Corp. 2002, 2015 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -577,6 +577,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) + ids.port_id = port->d_id; + ids.roles = FC_RPORT_ROLE_FCP_TARGET; + ++ zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL, ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); + rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); + if (!rport) { + dev_err(&port->adapter->ccw_device->dev, +@@ -598,6 +601,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port) + struct fc_rport *rport = port->rport; + + if (rport) { ++ zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL, ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); + fc_remote_port_delete(rport); + port->rport = NULL; + } +diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c +index 1822cb9ec623..8d9477cc3227 100644 +--- a/drivers/scsi/arcmsr/arcmsr_hba.c ++++ b/drivers/scsi/arcmsr/arcmsr_hba.c +@@ -1803,7 +1803,8 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, + + case ARCMSR_MESSAGE_WRITE_WQBUFFER: { + unsigned char *ver_addr; +- int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; ++ uint32_t user_len; ++ int32_t my_empty_len, wqbuf_firstindex, wqbuf_lastindex; + uint8_t *pQbuffer, *ptmpuserbuffer; + + ver_addr = kmalloc(1032, GFP_ATOMIC); +@@ -1820,6 +1821,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, + } + ptmpuserbuffer = ver_addr; + user_len = pcmdmessagefld->cmdmessage.Length; ++ if (user_len > 1032) { ++ retvalue = ARCMSR_MESSAGE_FAIL; ++ kfree(ver_addr); ++ goto message_out; ++ } + memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); + wqbuf_lastindex = acb->wqbuf_lastindex; + wqbuf_firstindex = acb->wqbuf_firstindex; +@@ -2063,18 +2069,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + struct CommandControlBlock *ccb; + int target = cmd->device->id; +- int lun = cmd->device->lun; +- uint8_t scsicmd = cmd->cmnd[0]; + cmd->scsi_done = done; + cmd->host_scribble = NULL; + cmd->result = 0; +- if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){ +- if(acb->devstate[target][lun] == ARECA_RAID_GONE) { +- cmd->result = (DID_NO_CONNECT << 16); +- } +- cmd->scsi_done(cmd); +- return 0; +- } + if (target == 16) { + /* virtual device for iop message transfer */ + arcmsr_handle_virtual_command(acb, cmd); +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c +index 4e31caa21ddf..920686155310 100644 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c +@@ -717,7 +717,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) + spin_lock_irqsave(vhost->host->host_lock, flags); + vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; +- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + + /* Clean out the queue */ + memset(crq->msgs, 0, PAGE_SIZE); +diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h +index 280e769a1686..a0e0a61dc882 100644 +--- a/drivers/scsi/megaraid/megaraid_sas.h ++++ b/drivers/scsi/megaraid/megaraid_sas.h +@@ -1402,7 +1402,7 @@ struct megasas_instance_template { + }; + + #define MEGASAS_IS_LOGICAL(scp) \ +- (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 ++ ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) + + #define MEGASAS_DEV_INDEX(inst, scp) \ + ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c +index 6ced6a398d60..0626a168c55b 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c +@@ -1487,16 +1487,13 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd + goto out_done; + } + +- switch (scmd->cmnd[0]) { +- case SYNCHRONIZE_CACHE: +- /* +- * FW takes care of flush cache on its own +- * No need to send it down +- */ ++ /* ++ * FW takes care of flush cache on its own for Virtual Disk. ++ * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW. ++ */ ++ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) { + scmd->result = DID_OK << 16; + goto out_done; +- default: +- break; + } + + if (instance->instancet->build_and_issue_cmd(instance, scmd)) { +diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c +index fe76185cd79a..64caa5ce3237 100644 +--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c ++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c +@@ -3926,6 +3926,11 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + } + } + ++static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) ++{ ++ return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); ++} ++ + /** + * _scsih_qcmd - main scsi request entry point + * @scmd: pointer to scsi command object +@@ -3948,6 +3953,13 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) + u32 mpi_control; + u16 smid; + ++ /** ++ * Lock the device for any subsequent command until ++ * command is done. ++ */ ++ if (ata_12_16_cmd(scmd)) ++ scsi_internal_device_block(scmd->device); ++ + scmd->scsi_done = done; + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { +@@ -4454,6 +4466,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) + if (scmd == NULL) + return 1; + ++ if (ata_12_16_cmd(scmd)) ++ scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); ++ + mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); + + if (mpi_reply == NULL) { +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h +index 994656cbfac9..997e13f6d1ac 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.h ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h +@@ -219,6 +219,7 @@ struct MPT3SAS_TARGET { + * @eedp_enable: eedp support enable bit + * @eedp_type: 0(type_1), 1(type_2), 2(type_3) + * @eedp_block_length: block size ++ * @ata_command_pending: SATL passthrough outstanding for device + */ + struct MPT3SAS_DEVICE { + struct MPT3SAS_TARGET *sas_target; +@@ -227,6 +228,17 @@ struct MPT3SAS_DEVICE { + u8 configured_lun; + u8 block; + u8 tlr_snoop_check; ++ /* ++ * Bug workaround for SATL handling: the mpt2/3sas firmware ++ * doesn't return BUSY or TASK_SET_FULL for subsequent ++ * commands while a SATL pass through is in operation as the ++ * spec requires, it simply does nothing with them until the ++ * pass through completes, causing them possibly to timeout if ++ * the passthrough is a long executing command (like format or ++ * secure erase). This variable allows us to do the right ++ * thing while a SATL command is pending. ++ */ ++ unsigned long ata_command_pending; + }; + + #define MPT3_CMD_NOT_USED 0x8000 /* free */ +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index f8c4b8564251..1d6e115571c9 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -3390,6 +3390,20 @@ _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, + le16_to_cpu(event_data->VolDevHandle)); + } + ++static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) ++{ ++ struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; ++ ++ if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) ++ return 0; ++ ++ if (pending) ++ return test_and_set_bit(0, &priv->ata_command_pending); ++ ++ clear_bit(0, &priv->ata_command_pending); ++ return 0; ++} ++ + /** + * _scsih_flush_running_cmds - completing outstanding commands. + * @ioc: per adapter object +@@ -3411,6 +3425,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) + if (!scmd) + continue; + count++; ++ _scsih_set_satl_pending(scmd, false); + mpt3sas_base_free_smid(ioc, smid); + scsi_dma_unmap(scmd); + if (ioc->pci_error_recovery) +@@ -3515,7 +3530,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) + SAM_STAT_CHECK_CONDITION; + } + +- + /** + * _scsih_qcmd_lck - main scsi request entry point + * @scmd: pointer to scsi command object +@@ -3557,6 +3571,19 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) + return 0; + } + ++ /* ++ * Bug work around for firmware SATL handling. The loop ++ * is based on atomic operations and ensures consistency ++ * since we're lockless at this point ++ */ ++ do { ++ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { ++ scmd->result = SAM_STAT_BUSY; ++ scmd->scsi_done(scmd); ++ return 0; ++ } ++ } while (_scsih_set_satl_pending(scmd, true)); ++ + sas_target_priv_data = sas_device_priv_data->sas_target; + + /* invalid device handle */ +@@ -4046,6 +4073,8 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) + if (scmd == NULL) + return 1; + ++ _scsih_set_satl_pending(scmd, false); ++ + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + if (mpi_reply == NULL) { +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c +index 0a537a0515ca..be86e7a02bbc 100644 +--- a/drivers/scsi/scsi_debug.c ++++ b/drivers/scsi/scsi_debug.c +@@ -3504,6 +3504,7 @@ static void __exit scsi_debug_exit(void) + bus_unregister(&pseudo_lld_bus); + root_device_unregister(pseudo_primary); + ++ vfree(map_storep); + if (dif_storep) + vfree(dif_storep); + +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c +index 859240408f9e..92d4f65cbc2e 100644 +--- a/drivers/scsi/scsi_scan.c ++++ b/drivers/scsi/scsi_scan.c +@@ -1517,12 +1517,12 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, + out_err: + kfree(lun_data); + out: +- scsi_device_put(sdev); + if (scsi_device_created(sdev)) + /* + * the sdev we used didn't appear in the report luns scan + */ + __scsi_remove_device(sdev); ++ scsi_device_put(sdev); + return ret; + } + +diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c +index bc23d66a7a1e..1ff17352abde 100644 +--- a/drivers/staging/iio/impedance-analyzer/ad5933.c ++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c +@@ -646,6 +646,7 @@ static void ad5933_work(struct work_struct *work) + struct iio_dev *indio_dev = i2c_get_clientdata(st->client); + signed short buf[2]; + unsigned char status; ++ int ret; + + mutex_lock(&indio_dev->mlock); + if (st->state == AD5933_CTRL_INIT_START_FREQ) { +@@ -653,19 +654,22 @@ static void ad5933_work(struct work_struct *work) + ad5933_cmd(st, AD5933_CTRL_START_SWEEP); + st->state = AD5933_CTRL_START_SWEEP; + schedule_delayed_work(&st->work, st->poll_time_jiffies); +- mutex_unlock(&indio_dev->mlock); +- return; ++ goto out; + } + +- ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); ++ ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); ++ if (ret) ++ goto out; + + if (status & AD5933_STAT_DATA_VALID) { + int scan_count = bitmap_weight(indio_dev->active_scan_mask, + indio_dev->masklength); +- ad5933_i2c_read(st->client, ++ ret = ad5933_i2c_read(st->client, + test_bit(1, indio_dev->active_scan_mask) ? + AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA, + scan_count * 2, (u8 *)buf); ++ if (ret) ++ goto out; + + if (scan_count == 2) { + buf[0] = be16_to_cpu(buf[0]); +@@ -677,8 +681,7 @@ static void ad5933_work(struct work_struct *work) + } else { + /* no data available - try again later */ + schedule_delayed_work(&st->work, st->poll_time_jiffies); +- mutex_unlock(&indio_dev->mlock); +- return; ++ goto out; + } + + if (status & AD5933_STAT_SWEEP_DONE) { +@@ -690,7 +693,7 @@ static void ad5933_work(struct work_struct *work) + ad5933_cmd(st, AD5933_CTRL_INC_FREQ); + schedule_delayed_work(&st->work, st->poll_time_jiffies); + } +- ++out: + mutex_unlock(&indio_dev->mlock); + } + +diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c +index 1afe192bef6a..b5cbe12e2815 100644 +--- a/drivers/tty/tty_ldisc.c ++++ b/drivers/tty/tty_ldisc.c +@@ -400,6 +400,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush); + * they are not on hot paths so a little discipline won't do + * any harm. + * ++ * The line discipline-related tty_struct fields are reset to ++ * prevent the ldisc driver from re-using stale information for ++ * the new ldisc instance. ++ * + * Locking: takes termios_mutex + */ + +@@ -408,6 +412,9 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num) + mutex_lock(&tty->termios_mutex); + tty->termios.c_line = num; + mutex_unlock(&tty->termios_mutex); ++ ++ tty->disc_data = NULL; ++ tty->receive_room = 0; + } + + /** +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 6dff194751f1..010ec70d59fb 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -863,10 +863,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, + if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) + return 0; + ++ if (new_screen_size > (4 << 20)) ++ return -EINVAL; + newscreen = kmalloc(new_screen_size, GFP_USER); + if (!newscreen) + return -ENOMEM; + ++ if (vc == sel_cons) ++ clear_selection(); ++ + old_rows = vc->vc_rows; + old_row_size = vc->vc_size_row; + +@@ -1164,7 +1169,7 @@ static void csi_J(struct vc_data *vc, int vpar) + break; + case 3: /* erase scroll-back buffer (and whole display) */ + scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, +- vc->vc_screenbuf_size >> 1); ++ vc->vc_screenbuf_size); + set_origin(vc); + if (CON_IS_VISIBLE(vc)) + update_screen(vc); +diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c +index 252434c9ea9d..2290b1f4b41f 100644 +--- a/drivers/uio/uio_dmem_genirq.c ++++ b/drivers/uio/uio_dmem_genirq.c +@@ -229,7 +229,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) + ++uiomem; + } + +- priv->dmem_region_start = i; ++ priv->dmem_region_start = uiomem - &uioinfo->mem[0]; + priv->num_dmem_regions = pdata->num_dynamic_regions; + + for (i = 0; i < pdata->num_dynamic_regions; ++i) { +diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c +index 475c9c114689..b77badb68890 100644 +--- a/drivers/usb/chipidea/core.c ++++ b/drivers/usb/chipidea/core.c +@@ -381,6 +381,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) + return -ENOMEM; + } + ++ spin_lock_init(&ci->lock); + ci->dev = dev; + ci->platdata = dev->platform_data; + if (ci->platdata->phy) +diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c +index f1cab425163f..45c8ffa798b8 100644 +--- a/drivers/usb/chipidea/udc.c ++++ b/drivers/usb/chipidea/udc.c +@@ -1647,8 +1647,6 @@ static int udc_start(struct ci13xxx *ci) + struct device *dev = ci->dev; + int retval = 0; + +- spin_lock_init(&ci->lock); +- + ci->gadget.ops = &usb_gadget_ops; + ci->gadget.speed = USB_SPEED_UNKNOWN; + ci->gadget.max_speed = USB_SPEED_HIGH; +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index e7436ebbf04c..b364845de5ad 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1213,7 +1213,6 @@ made_compressed_probe: + spin_lock_init(&acm->write_lock); + spin_lock_init(&acm->read_lock); + mutex_init(&acm->mutex); +- acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); + acm->is_int_ep = usb_endpoint_xfer_int(epread); + if (acm->is_int_ep) + acm->bInterval = epread->bInterval; +@@ -1262,14 +1261,14 @@ made_compressed_probe: + urb->transfer_dma = rb->dma; + if (acm->is_int_ep) { + usb_fill_int_urb(urb, acm->dev, +- acm->rx_endpoint, ++ usb_rcvintpipe(usb_dev, epread->bEndpointAddress), + rb->base, + acm->readsize, + acm_read_bulk_callback, rb, + acm->bInterval); + } else { + usb_fill_bulk_urb(urb, acm->dev, +- acm->rx_endpoint, ++ usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress), + rb->base, + acm->readsize, + acm_read_bulk_callback, rb); +diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h +index 1683ac161cf6..bf4e1bb4fb27 100644 +--- a/drivers/usb/class/cdc-acm.h ++++ b/drivers/usb/class/cdc-acm.h +@@ -95,7 +95,6 @@ struct acm { + struct urb *read_urbs[ACM_NR]; + struct acm_rb read_buffers[ACM_NR]; + int rx_buflimit; +- int rx_endpoint; + spinlock_t read_lock; + int write_used; /* number of non-empty write buffers */ + int transmitting; +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index 9b05e88d6220..3252bb2dcb80 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -144,6 +144,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, + } + } + ++static const unsigned short low_speed_maxpacket_maxes[4] = { ++ [USB_ENDPOINT_XFER_CONTROL] = 8, ++ [USB_ENDPOINT_XFER_ISOC] = 0, ++ [USB_ENDPOINT_XFER_BULK] = 0, ++ [USB_ENDPOINT_XFER_INT] = 8, ++}; ++static const unsigned short full_speed_maxpacket_maxes[4] = { ++ [USB_ENDPOINT_XFER_CONTROL] = 64, ++ [USB_ENDPOINT_XFER_ISOC] = 1023, ++ [USB_ENDPOINT_XFER_BULK] = 64, ++ [USB_ENDPOINT_XFER_INT] = 64, ++}; ++static const unsigned short high_speed_maxpacket_maxes[4] = { ++ [USB_ENDPOINT_XFER_CONTROL] = 64, ++ [USB_ENDPOINT_XFER_ISOC] = 1024, ++ [USB_ENDPOINT_XFER_BULK] = 512, ++ [USB_ENDPOINT_XFER_INT] = 1024, ++}; ++static const unsigned short super_speed_maxpacket_maxes[4] = { ++ [USB_ENDPOINT_XFER_CONTROL] = 512, ++ [USB_ENDPOINT_XFER_ISOC] = 1024, ++ [USB_ENDPOINT_XFER_BULK] = 1024, ++ [USB_ENDPOINT_XFER_INT] = 1024, ++}; ++ + static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + int asnum, struct usb_host_interface *ifp, int num_ep, + unsigned char *buffer, int size) +@@ -152,6 +177,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + struct usb_endpoint_descriptor *d; + struct usb_host_endpoint *endpoint; + int n, i, j, retval; ++ unsigned int maxp; ++ const unsigned short *maxpacket_maxes; + + d = (struct usb_endpoint_descriptor *) buffer; + buffer += d->bLength; +@@ -186,8 +213,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + memcpy(&endpoint->desc, d, n); + INIT_LIST_HEAD(&endpoint->urb_list); + +- /* Fix up bInterval values outside the legal range. Use 32 ms if no +- * proper value can be guessed. */ ++ /* ++ * Fix up bInterval values outside the legal range. ++ * Use 10 or 8 ms if no proper value can be guessed. ++ */ + i = 0; /* i = min, j = max, n = default */ + j = 255; + if (usb_endpoint_xfer_int(d)) { +@@ -195,20 +224,24 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + switch (to_usb_device(ddev)->speed) { + case USB_SPEED_SUPER: + case USB_SPEED_HIGH: +- /* Many device manufacturers are using full-speed ++ /* ++ * Many device manufacturers are using full-speed + * bInterval values in high-speed interrupt endpoint +- * descriptors. Try to fix those and fall back to a +- * 32 ms default value otherwise. */ ++ * descriptors. Try to fix those and fall back to an ++ * 8-ms default value otherwise. ++ */ + n = fls(d->bInterval*8); + if (n == 0) +- n = 9; /* 32 ms = 2^(9-1) uframes */ ++ n = 7; /* 8 ms = 2^(7-1) uframes */ + j = 16; + break; + default: /* USB_SPEED_FULL or _LOW */ +- /* For low-speed, 10 ms is the official minimum. ++ /* ++ * For low-speed, 10 ms is the official minimum. + * But some "overclocked" devices might want faster +- * polling so we'll allow it. */ +- n = 32; ++ * polling so we'll allow it. ++ */ ++ n = 10; + break; + } + } else if (usb_endpoint_xfer_isoc(d)) { +@@ -216,10 +249,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + j = 16; + switch (to_usb_device(ddev)->speed) { + case USB_SPEED_HIGH: +- n = 9; /* 32 ms = 2^(9-1) uframes */ ++ n = 7; /* 8 ms = 2^(7-1) uframes */ + break; + default: /* USB_SPEED_FULL */ +- n = 6; /* 32 ms = 2^(6-1) frames */ ++ n = 4; /* 8 ms = 2^(4-1) frames */ + break; + } + } +@@ -247,6 +280,41 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + endpoint->desc.wMaxPacketSize = cpu_to_le16(8); + } + ++ /* Validate the wMaxPacketSize field */ ++ maxp = usb_endpoint_maxp(&endpoint->desc); ++ ++ /* Find the highest legal maxpacket size for this endpoint */ ++ i = 0; /* additional transactions per microframe */ ++ switch (to_usb_device(ddev)->speed) { ++ case USB_SPEED_LOW: ++ maxpacket_maxes = low_speed_maxpacket_maxes; ++ break; ++ case USB_SPEED_FULL: ++ maxpacket_maxes = full_speed_maxpacket_maxes; ++ break; ++ case USB_SPEED_HIGH: ++ /* Bits 12..11 are allowed only for HS periodic endpoints */ ++ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { ++ i = maxp & (BIT(12) | BIT(11)); ++ maxp &= ~i; ++ } ++ /* fallthrough */ ++ default: ++ maxpacket_maxes = high_speed_maxpacket_maxes; ++ break; ++ case USB_SPEED_SUPER: ++ maxpacket_maxes = super_speed_maxpacket_maxes; ++ break; ++ } ++ j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; ++ ++ if (maxp > j) { ++ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", ++ cfgno, inum, asnum, d->bEndpointAddress, maxp, j); ++ maxp = j; ++ endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); ++ } ++ + /* + * Some buggy high speed devices have bulk endpoints using + * maxpacket sizes other than 512. High speed HCDs may not +@@ -254,9 +322,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + */ + if (to_usb_device(ddev)->speed == USB_SPEED_HIGH + && usb_endpoint_xfer_bulk(d)) { +- unsigned maxp; +- +- maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff; + if (maxp != 512) + dev_warn(ddev, "config %d interface %d altsetting %d " + "bulk endpoint 0x%X has invalid maxpacket %d\n", +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 6e70c88b25fb..0dfee61f7878 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -1802,14 +1802,6 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, + s_pkt = 1; + } + +- /* +- * We assume here we will always receive the entire data block +- * which we should receive. Meaning, if we program RX to +- * receive 4K but we receive only 2K, we assume that's all we +- * should receive and we simply bounce the request back to the +- * gadget driver for further processing. +- */ +- req->request.actual += req->request.length - count; + if (s_pkt) + return 1; + if ((event->status & DEPEVT_STATUS_LST) && +@@ -1829,6 +1821,7 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, + struct dwc3_trb *trb; + unsigned int slot; + unsigned int i; ++ int count = 0; + int ret; + + do { +@@ -1845,6 +1838,8 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, + slot++; + slot %= DWC3_TRB_NUM; + trb = &dep->trb_pool[slot]; ++ count += trb->size & DWC3_TRB_SIZE_MASK; ++ + + ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, + event, status); +@@ -1852,6 +1847,14 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, + break; + }while (++i < req->request.num_mapped_sgs); + ++ /* ++ * We assume here we will always receive the entire data block ++ * which we should receive. Meaning, if we program RX to ++ * receive 4K but we receive only 2K, we assume that's all we ++ * should receive and we simply bounce the request back to the ++ * gadget driver for further processing. ++ */ ++ req->request.actual += req->request.length - count; + dwc3_gadget_giveback(dep, req, status); + + if (ret) +diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c +index 9a7ee3347e4d..9fd233003769 100644 +--- a/drivers/usb/gadget/fsl_qe_udc.c ++++ b/drivers/usb/gadget/fsl_qe_udc.c +@@ -1881,11 +1881,8 @@ static int qe_get_frame(struct usb_gadget *gadget) + + tmp = in_be16(&udc->usb_param->frame_n); + if (tmp & 0x8000) +- tmp = tmp & 0x07ff; +- else +- tmp = -EINVAL; +- +- return (int)tmp; ++ return tmp & 0x07ff; ++ return -EINVAL; + } + + static int fsl_qe_start(struct usb_gadget *gadget, +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index 0f71c3a22507..0f6edce536cb 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -275,6 +275,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) + + ret = 0; + virt_dev = xhci->devs[slot_id]; ++ if (!virt_dev) ++ return -ENODEV; ++ + cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); + if (!cmd) { + xhci_dbg(xhci, "Couldn't allocate command structure.\n"); +diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c +index 80894791c020..c3e9cfc7c276 100644 +--- a/drivers/usb/misc/legousbtower.c ++++ b/drivers/usb/misc/legousbtower.c +@@ -953,24 +953,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device + dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval; + dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval; + +- /* we can register the device now, as it is ready */ +- usb_set_intfdata (interface, dev); +- +- retval = usb_register_dev (interface, &tower_class); +- +- if (retval) { +- /* something prevented us from registering this driver */ +- dev_err(idev, "Not able to get a minor for this device.\n"); +- usb_set_intfdata (interface, NULL); +- goto error; +- } +- dev->minor = interface->minor; +- +- /* let the user know what node this device is now attached to */ +- dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major " +- "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE), +- USB_MAJOR, dev->minor); +- + /* get the firmware version and log it */ + result = usb_control_msg (udev, + usb_rcvctrlpipe(udev, 0), +@@ -991,6 +973,23 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device + get_version_reply.minor, + le16_to_cpu(get_version_reply.build_no)); + ++ /* we can register the device now, as it is ready */ ++ usb_set_intfdata (interface, dev); ++ ++ retval = usb_register_dev (interface, &tower_class); ++ ++ if (retval) { ++ /* something prevented us from registering this driver */ ++ dev_err(idev, "Not able to get a minor for this device.\n"); ++ usb_set_intfdata (interface, NULL); ++ goto error; ++ } ++ dev->minor = interface->minor; ++ ++ /* let the user know what node this device is now attached to */ ++ dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major " ++ "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE), ++ USB_MAJOR, dev->minor); + + exit: + dbg(2, "%s: leave, return value 0x%.8lx (dev)", __func__, (long) dev); +diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c +index 6a030b931a3b..254194d61915 100644 +--- a/drivers/usb/renesas_usbhs/mod.c ++++ b/drivers/usb/renesas_usbhs/mod.c +@@ -272,9 +272,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data) + usbhs_write(priv, INTSTS0, ~irq_state.intsts0 & INTSTS0_MAGIC); + usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC); + +- usbhs_write(priv, BRDYSTS, ~irq_state.brdysts); ++ /* ++ * The driver should not clear the xxxSTS after the line of ++ * "call irq callback functions" because each "if" statement is ++ * possible to call the callback function for avoiding any side effects. ++ */ ++ if (irq_state.intsts0 & BRDY) ++ usbhs_write(priv, BRDYSTS, ~irq_state.brdysts); + usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts); +- usbhs_write(priv, BEMPSTS, ~irq_state.bempsts); ++ if (irq_state.intsts0 & BEMP) ++ usbhs_write(priv, BEMPSTS, ~irq_state.bempsts); + + /* + * call irq callback functions +diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c +index 78b48c31abf5..efa75b4e51f2 100644 +--- a/drivers/usb/serial/kobil_sct.c ++++ b/drivers/usb/serial/kobil_sct.c +@@ -336,7 +336,8 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port, + port->interrupt_out_urb->transfer_buffer_length = length; + + priv->cur_pos = priv->cur_pos + length; +- result = usb_submit_urb(port->interrupt_out_urb, GFP_NOIO); ++ result = usb_submit_urb(port->interrupt_out_urb, ++ GFP_ATOMIC); + dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result); + todo = priv->filled - priv->cur_pos; + +@@ -351,7 +352,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port, + if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID || + priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) { + result = usb_submit_urb(port->interrupt_in_urb, +- GFP_NOIO); ++ GFP_ATOMIC); + dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result); + } + } +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c +index 0f16bf6ea71c..ddc71d706ac6 100644 +--- a/drivers/usb/serial/mos7720.c ++++ b/drivers/usb/serial/mos7720.c +@@ -1250,7 +1250,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port, + + if (urb->transfer_buffer == NULL) { + urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, +- GFP_KERNEL); ++ GFP_ATOMIC); + if (urb->transfer_buffer == NULL) { + dev_err_console(port, "%s no more kernel memory...\n", + __func__); +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c +index d06013033def..7df7df62e177 100644 +--- a/drivers/usb/serial/mos7840.c ++++ b/drivers/usb/serial/mos7840.c +@@ -1438,8 +1438,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, + } + + if (urb->transfer_buffer == NULL) { +- urb->transfer_buffer = +- kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); ++ urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, ++ GFP_ATOMIC); + + if (urb->transfer_buffer == NULL) { + dev_err_console(port, "%s no more kernel memory...\n", +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c +index 80d689f0fda9..faeb36d6958d 100644 +--- a/drivers/usb/serial/usb-serial.c ++++ b/drivers/usb/serial/usb-serial.c +@@ -1444,7 +1444,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] + + rc = usb_register(udriver); + if (rc) +- return rc; ++ goto failed_usb_register; + + for (sd = serial_drivers; *sd; ++sd) { + (*sd)->usb_driver = udriver; +@@ -1462,6 +1462,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] + while (sd-- > serial_drivers) + usb_serial_deregister(*sd); + usb_deregister(udriver); ++failed_usb_register: ++ kfree(udriver); + return rc; + } + EXPORT_SYMBOL_GPL(usb_serial_register_drivers); +diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c +index 5e67f63..2155831 100644 +--- a/drivers/usb/storage/transport.c ++++ b/drivers/usb/storage/transport.c +@@ -919,10 +919,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us) + + /* COMMAND STAGE */ + /* let's send the command via the control pipe */ ++ /* ++ * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack. ++ * Stack may be vmallocated. So no DMA for us. Make a copy. ++ */ ++ memcpy(us->iobuf, srb->cmnd, srb->cmd_len); + result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, + US_CBI_ADSC, + USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, +- us->ifnum, srb->cmnd, srb->cmd_len); ++ us->ifnum, us->iobuf, srb->cmd_len); + + /* check the return code for the command */ + usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n", +diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c +index 50fe668c6172..08dbe8ae0212 100644 +--- a/drivers/video/efifb.c ++++ b/drivers/video/efifb.c +@@ -270,9 +270,9 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, + return 1; + + if (regno < 16) { +- red >>= 8; +- green >>= 8; +- blue >>= 8; ++ red >>= 16 - info->var.red.length; ++ green >>= 16 - info->var.green.length; ++ blue >>= 16 - info->var.blue.length; + ((u32 *)(info->pseudo_palette))[regno] = + (red << info->var.red.offset) | + (green << info->var.green.offset) | +diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c +index ba3fac8318bb..47a4177b16d2 100644 +--- a/drivers/xen/xen-pciback/conf_space.c ++++ b/drivers/xen/xen-pciback/conf_space.c +@@ -16,8 +16,8 @@ + #include "conf_space.h" + #include "conf_space_quirks.h" + +-bool permissive; +-module_param(permissive, bool, 0644); ++bool xen_pcibk_permissive; ++module_param_named(permissive, xen_pcibk_permissive, bool, 0644); + + /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, + * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */ +@@ -260,7 +260,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) + * This means that some fields may still be read-only because + * they have entries in the config_field list that intercept + * the write and do nothing. */ +- if (dev_data->permissive || permissive) { ++ if (dev_data->permissive || xen_pcibk_permissive) { + switch (size) { + case 1: + err = pci_write_config_byte(dev, offset, +diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h +index 2e1d73d1d5d0..62461a8ba1d6 100644 +--- a/drivers/xen/xen-pciback/conf_space.h ++++ b/drivers/xen/xen-pciback/conf_space.h +@@ -64,7 +64,7 @@ struct config_field_entry { + void *data; + }; + +-extern bool permissive; ++extern bool xen_pcibk_permissive; + + #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) + +diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c +index a5bb81a600f7..1667a9089a4a 100644 +--- a/drivers/xen/xen-pciback/conf_space_header.c ++++ b/drivers/xen/xen-pciback/conf_space_header.c +@@ -105,7 +105,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) + + cmd->val = value; + +- if (!permissive && (!dev_data || !dev_data->permissive)) ++ if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive)) + return 0; + + /* Only allow the guest to control certain bits. */ +diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h +index f72af87640e0..560b3ecbcba8 100644 +--- a/drivers/xen/xen-pciback/pciback.h ++++ b/drivers/xen/xen-pciback/pciback.h +@@ -37,6 +37,7 @@ struct xen_pcibk_device { + struct xen_pci_sharedinfo *sh_info; + unsigned long flags; + struct work_struct op_work; ++ struct xen_pci_op op; + }; + + struct xen_pcibk_dev_data { +diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c +index b98cf0c35725..6c17f92341f5 100644 +--- a/drivers/xen/xen-pciback/pciback_ops.c ++++ b/drivers/xen/xen-pciback/pciback_ops.c +@@ -67,6 +67,13 @@ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset) + enable ? "enable" : "disable"); + + if (enable) { ++ /* ++ * The MSI or MSI-X should not have an IRQ handler. Otherwise ++ * if the guest terminates we BUG_ON in free_msi_irqs. ++ */ ++ if (dev->msi_enabled || dev->msix_enabled) ++ goto out; ++ + rc = request_irq(dev_data->irq, + xen_pcibk_guest_interrupt, IRQF_SHARED, + dev_data->irq_name, dev); +@@ -141,7 +148,12 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, + if (unlikely(verbose_request)) + printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); + +- status = pci_enable_msi(dev); ++ if (dev->msi_enabled) ++ status = -EALREADY; ++ else if (dev->msix_enabled) ++ status = -ENXIO; ++ else ++ status = pci_enable_msi(dev); + + if (status) { + pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n", +@@ -170,20 +182,23 @@ static + int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, + struct pci_dev *dev, struct xen_pci_op *op) + { +- struct xen_pcibk_dev_data *dev_data; +- + if (unlikely(verbose_request)) + printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", + pci_name(dev)); +- pci_disable_msi(dev); + ++ if (dev->msi_enabled) { ++ struct xen_pcibk_dev_data *dev_data; ++ ++ pci_disable_msi(dev); ++ ++ dev_data = pci_get_drvdata(dev); ++ if (dev_data) ++ dev_data->ack_intr = 1; ++ } + op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; + if (unlikely(verbose_request)) + printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), + op->value); +- dev_data = pci_get_drvdata(dev); +- if (dev_data) +- dev_data->ack_intr = 1; + return 0; + } + +@@ -194,13 +209,26 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, + struct xen_pcibk_dev_data *dev_data; + int i, result; + struct msix_entry *entries; ++ u16 cmd; + + if (unlikely(verbose_request)) + printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", + pci_name(dev)); ++ + if (op->value > SH_INFO_MAX_VEC) + return -EINVAL; + ++ if (dev->msix_enabled) ++ return -EALREADY; ++ ++ /* ++ * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able ++ * to access the BARs where the MSI-X entries reside. ++ */ ++ pci_read_config_word(dev, PCI_COMMAND, &cmd); ++ if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) ++ return -ENXIO; ++ + entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); + if (entries == NULL) + return -ENOMEM; +@@ -242,23 +270,27 @@ static + int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, + struct pci_dev *dev, struct xen_pci_op *op) + { +- struct xen_pcibk_dev_data *dev_data; + if (unlikely(verbose_request)) + printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", + pci_name(dev)); +- pci_disable_msix(dev); + ++ if (dev->msix_enabled) { ++ struct xen_pcibk_dev_data *dev_data; ++ ++ pci_disable_msix(dev); ++ ++ dev_data = pci_get_drvdata(dev); ++ if (dev_data) ++ dev_data->ack_intr = 1; ++ } + /* + * SR-IOV devices (which don't have any legacy IRQ) have + * an undefined IRQ value of zero. + */ + op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; + if (unlikely(verbose_request)) +- printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev), +- op->value); +- dev_data = pci_get_drvdata(dev); +- if (dev_data) +- dev_data->ack_intr = 1; ++ printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", ++ pci_name(dev), op->value); + return 0; + } + #endif +@@ -295,9 +327,14 @@ void xen_pcibk_do_op(struct work_struct *data) + container_of(data, struct xen_pcibk_device, op_work); + struct pci_dev *dev; + struct xen_pcibk_dev_data *dev_data = NULL; +- struct xen_pci_op *op = &pdev->sh_info->op; ++ struct xen_pci_op *op = &pdev->op; + int test_intx = 0; ++#ifdef CONFIG_PCI_MSI ++ unsigned int nr = 0; ++#endif + ++ *op = pdev->sh_info->op; ++ barrier(); + dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); + + if (dev == NULL) +@@ -323,6 +360,7 @@ void xen_pcibk_do_op(struct work_struct *data) + op->err = xen_pcibk_disable_msi(pdev, dev, op); + break; + case XEN_PCI_OP_enable_msix: ++ nr = op->value; + op->err = xen_pcibk_enable_msix(pdev, dev, op); + break; + case XEN_PCI_OP_disable_msix: +@@ -339,6 +377,17 @@ void xen_pcibk_do_op(struct work_struct *data) + if ((dev_data->enable_intx != test_intx)) + xen_pcibk_control_isr(dev, 0 /* no reset */); + } ++ pdev->sh_info->op.err = op->err; ++ pdev->sh_info->op.value = op->value; ++#ifdef CONFIG_PCI_MSI ++ if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { ++ unsigned int i; ++ ++ for (i = 0; i < nr; i++) ++ pdev->sh_info->op.msix_entries[i].vector = ++ op->msix_entries[i].vector; ++ } ++#endif + /* Tell the driver domain that we're done. */ + wmb(); + clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index dbefa6c609f4..296cc1b49446 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -1496,6 +1496,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, + int namelen; + int ret = 0; + ++ if (!S_ISDIR(file_inode(file)->i_mode)) ++ return -ENOTDIR; ++ + ret = mnt_want_write_file(file); + if (ret) + goto out; +@@ -1553,6 +1556,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, + struct btrfs_ioctl_vol_args *vol_args; + int ret; + ++ if (!S_ISDIR(file_inode(file)->i_mode)) ++ return -ENOTDIR; ++ + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); +@@ -1576,6 +1582,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, + bool readonly = false; + struct btrfs_qgroup_inherit *inherit = NULL; + ++ if (!S_ISDIR(file_inode(file)->i_mode)) ++ return -ENOTDIR; ++ + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); +@@ -2081,6 +2090,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, + int ret; + int err = 0; + ++ if (!S_ISDIR(dir->i_mode)) ++ return -ENOTDIR; ++ + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); +diff --git a/fs/coredump.c b/fs/coredump.c +index 4f03b2b50375..a94f94d4f1a1 100644 +--- a/fs/coredump.c ++++ b/fs/coredump.c +@@ -1,6 +1,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -375,7 +376,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state) + if (core_waiters > 0) { + struct core_thread *ptr; + ++ freezer_do_not_count(); + wait_for_completion(&core_state->startup); ++ freezer_count(); + /* + * Wait for all the threads to become inactive, so that + * all the thread context (extended register state, like +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 046e3e93783e..f9c938e21e65 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -246,6 +246,7 @@ struct ext4_io_submit { + #define EXT4_MAX_BLOCK_SIZE 65536 + #define EXT4_MIN_BLOCK_LOG_SIZE 10 + #define EXT4_MAX_BLOCK_LOG_SIZE 16 ++#define EXT4_MAX_CLUSTER_LOG_SIZE 30 + #ifdef __KERNEL__ + # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) + #else +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 221b58298847..31179ba2072c 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -53,25 +53,31 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, + struct ext4_inode_info *ei) + { + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); +- __u16 csum_lo; +- __u16 csum_hi = 0; + __u32 csum; ++ __u16 dummy_csum = 0; ++ int offset = offsetof(struct ext4_inode, i_checksum_lo); ++ unsigned int csum_size = sizeof(dummy_csum); + +- csum_lo = le16_to_cpu(raw->i_checksum_lo); +- raw->i_checksum_lo = 0; +- if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && +- EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { +- csum_hi = le16_to_cpu(raw->i_checksum_hi); +- raw->i_checksum_hi = 0; +- } ++ csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset); ++ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); ++ offset += csum_size; ++ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, ++ EXT4_GOOD_OLD_INODE_SIZE - offset); + +- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, +- EXT4_INODE_SIZE(inode->i_sb)); +- +- raw->i_checksum_lo = cpu_to_le16(csum_lo); +- if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && +- EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) +- raw->i_checksum_hi = cpu_to_le16(csum_hi); ++ if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { ++ offset = offsetof(struct ext4_inode, i_checksum_hi); ++ csum = ext4_chksum(sbi, csum, (__u8 *)raw + ++ EXT4_GOOD_OLD_INODE_SIZE, ++ offset - EXT4_GOOD_OLD_INODE_SIZE); ++ if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { ++ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, ++ csum_size); ++ offset += csum_size; ++ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, ++ EXT4_INODE_SIZE(inode->i_sb) - ++ offset); ++ } ++ } + + return csum; + } +@@ -3604,7 +3610,7 @@ int ext4_can_truncate(struct inode *inode) + } + + /* +- * ext4_punch_hole: punches a hole in a file by releaseing the blocks ++ * ext4_punch_hole: punches a hole in a file by releasing the blocks + * associated with the given offset and length + * + * @inode: File inode +@@ -3640,7 +3646,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) + * Write out all dirty pages to avoid race conditions + * Then release them. + */ +- if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { ++ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { + ret = filemap_write_and_wait_range(mapping, offset, + offset + length - 1); + if (ret) +@@ -4474,14 +4480,14 @@ static int ext4_do_update_inode(handle_t *handle, + * Fix up interoperability with old kernels. Otherwise, old inodes get + * re-used with the upper 16 bits of the uid/gid intact + */ +- if (!ei->i_dtime) { ++ if (ei->i_dtime && list_empty(&ei->i_orphan)) { ++ raw_inode->i_uid_high = 0; ++ raw_inode->i_gid_high = 0; ++ } else { + raw_inode->i_uid_high = + cpu_to_le16(high_16_bits(i_uid)); + raw_inode->i_gid_high = + cpu_to_le16(high_16_bits(i_gid)); +- } else { +- raw_inode->i_uid_high = 0; +- raw_inode->i_gid_high = 0; + } + } else { + raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 08b4495c1b12..cb9eec025ba8 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -808,7 +808,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b) + * for this page; do not hold this lock when calling this routine! + */ + +-static int ext4_mb_init_cache(struct page *page, char *incore) ++static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) + { + ext4_group_t ngroups; + int blocksize; +@@ -841,7 +841,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore) + /* allocate buffer_heads to read bitmaps */ + if (groups_per_page > 1) { + i = sizeof(struct buffer_head *) * groups_per_page; +- bh = kzalloc(i, GFP_NOFS); ++ bh = kzalloc(i, gfp); + if (bh == NULL) { + err = -ENOMEM; + goto out; +@@ -966,7 +966,7 @@ out: + * are on the same page e4b->bd_buddy_page is NULL and return value is 0. + */ + static int ext4_mb_get_buddy_page_lock(struct super_block *sb, +- ext4_group_t group, struct ext4_buddy *e4b) ++ ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) + { + struct inode *inode = EXT4_SB(sb)->s_buddy_cache; + int block, pnum, poff; +@@ -985,7 +985,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb, + block = group * 2; + pnum = block / blocks_per_page; + poff = block % blocks_per_page; +- page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); ++ page = find_or_create_page(inode->i_mapping, pnum, gfp); + if (!page) + return -EIO; + BUG_ON(page->mapping != inode->i_mapping); +@@ -999,7 +999,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb, + + block++; + pnum = block / blocks_per_page; +- page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); ++ page = find_or_create_page(inode->i_mapping, pnum, gfp); + if (!page) + return -EIO; + BUG_ON(page->mapping != inode->i_mapping); +@@ -1025,7 +1025,7 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) + * calling this routine! + */ + static noinline_for_stack +-int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) ++int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) + { + + struct ext4_group_info *this_grp; +@@ -1043,7 +1043,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) + * have taken a reference using ext4_mb_load_buddy and that + * would have pinned buddy page to page cache. + */ +- ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); ++ ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); + if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { + /* + * somebody initialized the group +@@ -1053,7 +1053,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) + } + + page = e4b.bd_bitmap_page; +- ret = ext4_mb_init_cache(page, NULL); ++ ret = ext4_mb_init_cache(page, NULL, gfp); + if (ret) + goto err; + if (!PageUptodate(page)) { +@@ -1073,7 +1073,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) + } + /* init buddy cache */ + page = e4b.bd_buddy_page; +- ret = ext4_mb_init_cache(page, e4b.bd_bitmap); ++ ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); + if (ret) + goto err; + if (!PageUptodate(page)) { +@@ -1092,8 +1092,8 @@ err: + * calling this routine! + */ + static noinline_for_stack int +-ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, +- struct ext4_buddy *e4b) ++ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, ++ struct ext4_buddy *e4b, gfp_t gfp) + { + int blocks_per_page; + int block; +@@ -1123,7 +1123,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, + * we need full data about the group + * to make a good selection + */ +- ret = ext4_mb_init_group(sb, group); ++ ret = ext4_mb_init_group(sb, group, gfp); + if (ret) + return ret; + } +@@ -1151,11 +1151,11 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, + * wait for it to initialize. + */ + page_cache_release(page); +- page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); ++ page = find_or_create_page(inode->i_mapping, pnum, gfp); + if (page) { + BUG_ON(page->mapping != inode->i_mapping); + if (!PageUptodate(page)) { +- ret = ext4_mb_init_cache(page, NULL); ++ ret = ext4_mb_init_cache(page, NULL, gfp); + if (ret) { + unlock_page(page); + goto err; +@@ -1182,11 +1182,12 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, + if (page == NULL || !PageUptodate(page)) { + if (page) + page_cache_release(page); +- page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); ++ page = find_or_create_page(inode->i_mapping, pnum, gfp); + if (page) { + BUG_ON(page->mapping != inode->i_mapping); + if (!PageUptodate(page)) { +- ret = ext4_mb_init_cache(page, e4b->bd_bitmap); ++ ret = ext4_mb_init_cache(page, e4b->bd_bitmap, ++ gfp); + if (ret) { + unlock_page(page); + goto err; +@@ -1220,6 +1221,12 @@ err: + return ret; + } + ++static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, ++ struct ext4_buddy *e4b) ++{ ++ return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); ++} ++ + static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) + { + if (e4b->bd_bitmap_page) +@@ -1993,7 +2000,7 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac, + + /* We only do this if the grp has never been initialized */ + if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { +- int ret = ext4_mb_init_group(ac->ac_sb, group); ++ int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS); + if (ret) + return 0; + } +@@ -4748,7 +4755,9 @@ do_more: + #endif + trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); + +- err = ext4_mb_load_buddy(sb, block_group, &e4b); ++ /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ ++ err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, ++ GFP_NOFS|__GFP_NOFAIL); + if (err) + goto error_return; + +@@ -5159,7 +5168,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) + grp = ext4_get_group_info(sb, group); + /* We only do this if the grp has never been initialized */ + if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { +- ret = ext4_mb_init_group(sb, group); ++ ret = ext4_mb_init_group(sb, group, GFP_NOFS); + if (ret) + break; + } +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index facf8590b714..407bcf79aa31 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -417,15 +417,14 @@ static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent, + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + struct ext4_inode_info *ei = EXT4_I(inode); + __u32 csum; +- __le32 save_csum; + int size; ++ __u32 dummy_csum = 0; ++ int offset = offsetof(struct dx_tail, dt_checksum); + + size = count_offset + (count * sizeof(struct dx_entry)); +- save_csum = t->dt_checksum; +- t->dt_checksum = 0; + csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); +- csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail)); +- t->dt_checksum = save_csum; ++ csum = ext4_chksum(sbi, csum, (__u8 *)t, offset); ++ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum)); + + return cpu_to_le32(csum); + } +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 15a81897df4e..faa192087033 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -1936,23 +1936,25 @@ failed: + static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, + struct ext4_group_desc *gdp) + { +- int offset; ++ int offset = offsetof(struct ext4_group_desc, bg_checksum); + __u16 crc = 0; + __le32 le_group = cpu_to_le32(block_group); + + if ((sbi->s_es->s_feature_ro_compat & + cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) { + /* Use new metadata_csum algorithm */ +- __le16 save_csum; + __u32 csum32; ++ __u16 dummy_csum = 0; + +- save_csum = gdp->bg_checksum; +- gdp->bg_checksum = 0; + csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group, + sizeof(le_group)); +- csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, +- sbi->s_desc_size); +- gdp->bg_checksum = save_csum; ++ csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset); ++ csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum, ++ sizeof(dummy_csum)); ++ offset += sizeof(dummy_csum); ++ if (offset < sbi->s_desc_size) ++ csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset, ++ sbi->s_desc_size - offset); + + crc = csum32 & 0xFFFF; + goto out; +@@ -1963,8 +1965,6 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, + cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM))) + return 0; + +- offset = offsetof(struct ext4_group_desc, bg_checksum); +- + crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); + crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); + crc = crc16(crc, (__u8 *)gdp, offset); +@@ -2002,6 +2002,7 @@ void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group, + + /* Called at mount-time, super-block is locked */ + static int ext4_check_descriptors(struct super_block *sb, ++ ext4_fsblk_t sb_block, + ext4_group_t *first_not_zeroed) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); +@@ -2032,6 +2033,11 @@ static int ext4_check_descriptors(struct super_block *sb, + grp = i; + + block_bitmap = ext4_block_bitmap(sb, gdp); ++ if (block_bitmap == sb_block) { ++ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ++ "Block bitmap for group %u overlaps " ++ "superblock", i); ++ } + if (block_bitmap < first_block || block_bitmap > last_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Block bitmap for group %u not in group " +@@ -2039,6 +2045,11 @@ static int ext4_check_descriptors(struct super_block *sb, + return 0; + } + inode_bitmap = ext4_inode_bitmap(sb, gdp); ++ if (inode_bitmap == sb_block) { ++ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ++ "Inode bitmap for group %u overlaps " ++ "superblock", i); ++ } + if (inode_bitmap < first_block || inode_bitmap > last_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Inode bitmap for group %u not in group " +@@ -2046,6 +2057,11 @@ static int ext4_check_descriptors(struct super_block *sb, + return 0; + } + inode_table = ext4_inode_table(sb, gdp); ++ if (inode_table == sb_block) { ++ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ++ "Inode table for group %u overlaps " ++ "superblock", i); ++ } + if (inode_table < first_block || + inode_table + sbi->s_itb_per_group - 1 > last_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " +@@ -3521,7 +3537,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + if (blocksize < EXT4_MIN_BLOCK_SIZE || + blocksize > EXT4_MAX_BLOCK_SIZE) { + ext4_msg(sb, KERN_ERR, +- "Unsupported filesystem blocksize %d", blocksize); ++ "Unsupported filesystem blocksize %d (%d log_block_size)", ++ blocksize, le32_to_cpu(es->s_log_block_size)); ++ goto failed_mount; ++ } ++ if (le32_to_cpu(es->s_log_block_size) > ++ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { ++ ext4_msg(sb, KERN_ERR, ++ "Invalid log block size: %u", ++ le32_to_cpu(es->s_log_block_size)); + goto failed_mount; + } + +@@ -3636,6 +3660,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + "block size (%d)", clustersize, blocksize); + goto failed_mount; + } ++ if (le32_to_cpu(es->s_log_cluster_size) > ++ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { ++ ext4_msg(sb, KERN_ERR, ++ "Invalid log cluster size: %u", ++ le32_to_cpu(es->s_log_cluster_size)); ++ goto failed_mount; ++ } + sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - + le32_to_cpu(es->s_log_block_size); + sbi->s_clusters_per_group = +@@ -3766,7 +3797,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + goto failed_mount2; + } + } +- if (!ext4_check_descriptors(sb, &first_not_zeroed)) { ++ if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { + ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); + goto failed_mount2; + } +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index a20816e7eb3a..92850bab4513 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -123,17 +123,18 @@ static __le32 ext4_xattr_block_csum(struct inode *inode, + { + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + __u32 csum; +- __le32 save_csum; + __le64 dsk_block_nr = cpu_to_le64(block_nr); ++ __u32 dummy_csum = 0; ++ int offset = offsetof(struct ext4_xattr_header, h_checksum); + +- save_csum = hdr->h_checksum; +- hdr->h_checksum = 0; + csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr, + sizeof(dsk_block_nr)); +- csum = ext4_chksum(sbi, csum, (__u8 *)hdr, +- EXT4_BLOCK_SIZE(inode->i_sb)); ++ csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset); ++ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum)); ++ offset += sizeof(dummy_csum); ++ csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset, ++ EXT4_BLOCK_SIZE(inode->i_sb) - offset); + +- hdr->h_checksum = save_csum; + return cpu_to_le32(csum); + } + +diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c +index b58a9cbb9695..f0faa87e23d3 100644 +--- a/fs/hostfs/hostfs_kern.c ++++ b/fs/hostfs/hostfs_kern.c +@@ -942,10 +942,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) + + if (S_ISLNK(root_inode->i_mode)) { + char *name = follow_link(host_root_path); +- if (IS_ERR(name)) ++ if (IS_ERR(name)) { + err = PTR_ERR(name); +- else +- err = read_name(root_inode, name); ++ goto out_put; ++ } ++ err = read_name(root_inode, name); + kfree(name); + if (err) + goto out_put; +diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c +index 10489bbd40fc..955fabf46a72 100644 +--- a/fs/isofs/inode.c ++++ b/fs/isofs/inode.c +@@ -726,6 +726,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) + pri_bh = NULL; + + root_found: ++ /* We don't support read-write mounts */ ++ if (!(s->s_flags & MS_RDONLY)) { ++ error = -EACCES; ++ goto out_freebh; ++ } + + if (joliet_level && (pri == NULL || !opt.rock)) { + /* This is the case of Joliet with the norock mount flag. +@@ -1538,9 +1543,6 @@ struct inode *__isofs_iget(struct super_block *sb, + static struct dentry *isofs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) + { +- /* We don't support read-write mounts */ +- if (!(flags & MS_RDONLY)) +- return ERR_PTR(-EACCES); + return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super); + } + +diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c +index e05c96ebb27d..57d3b5ef22a0 100644 +--- a/fs/nfs/callback.c ++++ b/fs/nfs/callback.c +@@ -302,6 +302,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, struct n + err_socks: + svc_rpcb_cleanup(serv, net); + err_bind: ++ nn->cb_users[minorversion]--; + dprintk("NFS: Couldn't create callback socket: err = %d; " + "net = %p\n", ret, net); + return ret; +diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c +index e98ecf8d2588..7f7a89a67c6d 100644 +--- a/fs/nfs/callback_xdr.c ++++ b/fs/nfs/callback_xdr.c +@@ -884,7 +884,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r + if (hdr_arg.minorversion == 0) { + cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident); + if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) +- return rpc_drop_reply; ++ goto out_invalidcred; + } + + hdr_res.taglen = hdr_arg.taglen; +@@ -911,6 +911,10 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r + nfs_put_client(cps.clp); + dprintk("%s: done, status = %u\n", __func__, ntohl(status)); + return rpc_success; ++ ++out_invalidcred: ++ pr_warn_ratelimited("NFS: NFSv4 callback contains invalid cred\n"); ++ return rpc_autherr_badcred; + } + + /* +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index 2bdaf57c82d0..7d45b38aeb08 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -1464,6 +1464,9 @@ restart: + "Zeroing state\n", __func__, status); + case -ENOENT: + case -ENOMEM: ++ case -EACCES: ++ case -EROFS: ++ case -EIO: + case -ESTALE: + /* + * Open state on this file cannot be recovered +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index 4a58afa99654..b0878e1921be 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -2193,7 +2193,8 @@ out: + if (!list_empty(&clp->cl_revoked)) + seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; + out_no_session: +- kfree(conn); ++ if (conn) ++ free_conn(conn); + spin_unlock(&nn->client_lock); + return status; + out_put_session: +diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c +index f65bdcf61526..6d97883e2652 100644 +--- a/fs/ocfs2/dlm/dlmconvert.c ++++ b/fs/ocfs2/dlm/dlmconvert.c +@@ -265,7 +265,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, + struct dlm_lock *lock, int flags, int type) + { + enum dlm_status status; +- u8 old_owner = res->owner; + + mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, + lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); +@@ -332,7 +331,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, + + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_IN_PROGRESS; +- lock->convert_pending = 0; + /* if it failed, move it back to granted queue. + * if master returns DLM_NORMAL and then down before sending ast, + * it may have already been moved to granted queue, reset to +@@ -341,12 +339,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, + if (status != DLM_NOTQUEUED) + dlm_error(status); + dlm_revert_pending_convert(res, lock); +- } else if ((res->state & DLM_LOCK_RES_RECOVERING) || +- (old_owner != res->owner)) { +- mlog(0, "res %.*s is in recovering or has been recovered.\n", +- res->lockname.len, res->lockname.name); ++ } else if (!lock->convert_pending) { ++ mlog(0, "%s: res %.*s, owner died and lock has been moved back " ++ "to granted list, retry convert.\n", ++ dlm->name, res->lockname.len, res->lockname.name); + status = DLM_RECOVERING; + } ++ ++ lock->convert_pending = 0; + bail: + spin_unlock(&res->spinlock); + +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c +index d0e8c0b1767f..496af7fd87d5 100644 +--- a/fs/ocfs2/file.c ++++ b/fs/ocfs2/file.c +@@ -1499,7 +1499,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, + u64 start, u64 len) + { + int ret = 0; +- u64 tmpend, end = start + len; ++ u64 tmpend = 0; ++ u64 end = start + len; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + unsigned int csize = osb->s_clustersize; + handle_t *handle; +@@ -1531,18 +1532,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, + } + + /* +- * We want to get the byte offset of the end of the 1st cluster. ++ * If start is on a cluster boundary and end is somewhere in another ++ * cluster, we have not COWed the cluster starting at start, unless ++ * end is also within the same cluster. So, in this case, we skip this ++ * first call to ocfs2_zero_range_for_truncate() truncate and move on ++ * to the next one. + */ +- tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1)); +- if (tmpend > end) +- tmpend = end; ++ if ((start & (csize - 1)) != 0) { ++ /* ++ * We want to get the byte offset of the end of the 1st ++ * cluster. ++ */ ++ tmpend = (u64)osb->s_clustersize + ++ (start & ~(osb->s_clustersize - 1)); ++ if (tmpend > end) ++ tmpend = end; + +- trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start, +- (unsigned long long)tmpend); ++ trace_ocfs2_zero_partial_clusters_range1( ++ (unsigned long long)start, ++ (unsigned long long)tmpend); + +- ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend); +- if (ret) +- mlog_errno(ret); ++ ret = ocfs2_zero_range_for_truncate(inode, handle, start, ++ tmpend); ++ if (ret) ++ mlog_errno(ret); ++ } + + if (tmpend < end) { + /* +diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c +old mode 100644 +new mode 100755 +index 76ab5d4..26c5779 +--- a/fs/pstore/ram_core.c ++++ b/fs/pstore/ram_core.c +@@ -91,43 +91,10 @@ static inline int atomic_cmpxchg__(atomic_t *v, int old, int new) + return ret; + } + +-/* increase and wrap the start pointer, returning the old value */ +-static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a) +-{ +- int old; +- int new; +- +- do { +- old = atomic_read(&prz->buffer->start); +- new = old + a; +- while (unlikely(new > prz->buffer_size)) +- new -= prz->buffer_size; +- } while (atomic_cmpxchg__(&prz->buffer->start, old, new) != old); +- +- return old; +-} +- +-/* increase the size counter until it hits the max size */ +-static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a) +-{ +- size_t old; +- size_t new; +- +- if (atomic_read(&prz->buffer->size) == prz->buffer_size) +- return; +- +- do { +- old = atomic_read(&prz->buffer->size); +- new = old + a; +- if (new > prz->buffer_size) +- new = prz->buffer_size; +- } while (atomic_cmpxchg__(&prz->buffer->size, old, new) != old); +-} +- + static DEFINE_RAW_SPINLOCK(buffer_lock); + + /* increase and wrap the start pointer, returning the old value */ +-static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) ++static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) + { + int old; + int new; +@@ -137,7 +104,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) + + old = atomic_read(&prz->buffer->start); + new = old + a; +- while (unlikely(new > prz->buffer_size)) ++ while (unlikely(new >= prz->buffer_size)) + new -= prz->buffer_size; + atomic_set(&prz->buffer->start, new); + +@@ -147,7 +114,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) + } + + /* increase the size counter until it hits the max size */ +-static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a) ++static void buffer_size_add(struct persistent_ram_zone *prz, size_t a) + { + size_t old; + size_t new; +@@ -168,9 +135,6 @@ exit: + raw_spin_unlock_irqrestore(&buffer_lock, flags); + } + +-static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic; +-static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic; +- + static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, + uint8_t *data, size_t len, uint8_t *ecc) + { +@@ -345,7 +309,7 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz, + const void *s, unsigned int start, unsigned int count) + { + struct persistent_ram_buffer *buffer = prz->buffer; +- memcpy(buffer->data + start, s, count); ++ memcpy_toio(buffer->data + start, s, count); + persistent_ram_update_ecc(prz, start, count); + } + +@@ -374,8 +338,8 @@ void persistent_ram_save_old(struct persistent_ram_zone *prz) + } + + prz->old_log_size = size; +- memcpy(prz->old_log, &buffer->data[start], size - start); +- memcpy(prz->old_log + size - start, &buffer->data[0], start); ++ memcpy_fromio(prz->old_log, &buffer->data[start], size - start); ++ memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start); + } + + int notrace persistent_ram_write(struct persistent_ram_zone *prz, +@@ -479,9 +443,6 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size, + return NULL; + } + +- buffer_start_add = buffer_start_add_locked; +- buffer_size_add = buffer_size_add_locked; +- + if (memtype) + va = ioremap(start, size); + else +diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c +index e1978fd895f5..58cce0c606f1 100644 +--- a/fs/reiserfs/ibalance.c ++++ b/fs/reiserfs/ibalance.c +@@ -1082,8 +1082,9 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure + insert_ptr); + } + +- memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE); + insert_ptr[0] = new_insert_ptr; ++ if (new_insert_ptr) ++ memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE); + + return order; + } +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c +index e2e202a07b31..7ff27fa3a453 100644 +--- a/fs/reiserfs/super.c ++++ b/fs/reiserfs/super.c +@@ -184,7 +184,15 @@ static int remove_save_link_only(struct super_block *s, + static int reiserfs_quota_on_mount(struct super_block *, int); + #endif + +-/* look for uncompleted unlinks and truncates and complete them */ ++/* ++ * Look for uncompleted unlinks and truncates and complete them ++ * ++ * Called with superblock write locked. If quotas are enabled, we have to ++ * release/retake lest we call dquot_quota_on_mount(), proceed to ++ * schedule_on_each_cpu() in invalidate_bdev() and deadlock waiting for the per ++ * cpu worklets to complete flush_async_commits() that in turn wait for the ++ * superblock write lock. ++ */ + static int finish_unfinished(struct super_block *s) + { + INITIALIZE_PATH(path); +@@ -231,7 +239,9 @@ static int finish_unfinished(struct super_block *s) + quota_enabled[i] = 0; + continue; + } ++ reiserfs_write_unlock(s); + ret = reiserfs_quota_on_mount(s, i); ++ reiserfs_write_lock(s); + if (ret < 0) + reiserfs_warning(s, "reiserfs-2500", + "cannot turn on journaled " +diff --git a/fs/seq_file.c b/fs/seq_file.c +index 3dd44db1465e..c009e605c7c9 100644 +--- a/fs/seq_file.c ++++ b/fs/seq_file.c +@@ -206,8 +206,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) + size -= n; + buf += n; + copied += n; +- if (!m->count) ++ if (!m->count) { ++ m->from = 0; + m->index++; ++ } + if (!size) + goto Done; + } +diff --git a/fs/super.c b/fs/super.c +index 97280e76179c..fd3281d1ec45 100644 +--- a/fs/super.c ++++ b/fs/super.c +@@ -1327,8 +1327,8 @@ int freeze_super(struct super_block *sb) + } + } + /* +- * This is just for debugging purposes so that fs can warn if it +- * sees write activity when frozen is set to SB_FREEZE_COMPLETE. ++ * For debugging purposes so that fs can warn if it sees write activity ++ * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super(). + */ + sb->s_writers.frozen = SB_FREEZE_COMPLETE; + up_write(&sb->s_umount); +@@ -1347,7 +1347,7 @@ int thaw_super(struct super_block *sb) + int error; + + down_write(&sb->s_umount); +- if (sb->s_writers.frozen == SB_UNFROZEN) { ++ if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) { + up_write(&sb->s_umount); + return -EINVAL; + } +diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c +index 605af512aec2..db364d4d0d18 100644 +--- a/fs/ubifs/dir.c ++++ b/fs/ubifs/dir.c +@@ -348,7 +348,8 @@ static unsigned int vfs_dent_type(uint8_t type) + */ + static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) + { +- int err, over = 0; ++ int err = 0; ++ int over = 0; + loff_t pos = file->f_pos; + struct qstr nm; + union ubifs_key key; +@@ -467,16 +468,23 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) + } + + out: +- if (err != -ENOENT) { +- ubifs_err("cannot find next direntry, error %d", err); +- return err; +- } +- + kfree(file->private_data); + file->private_data = NULL; ++ ++ if (err != -ENOENT) ++ ubifs_err("cannot find next direntry, error %d", err); ++ else ++ /* ++ * -ENOENT is a non-fatal error in this context, the TNC uses ++ * it to indicate that the cursor moved past the current directory ++ * and readdir() has to stop. ++ */ ++ err = 0; ++ ++ + /* 2 is a special value indicating that there are no more direntries */ + file->f_pos = 2; +- return 0; ++ return err; + } + + static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence) +diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c +index 52a6559275c4..3f620c0ba0a6 100644 +--- a/fs/ubifs/tnc_commit.c ++++ b/fs/ubifs/tnc_commit.c +@@ -370,7 +370,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) + + p = c->gap_lebs; + do { +- ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs); ++ ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs); + written = layout_leb_in_gaps(c, p); + if (written < 0) { + err = written; +diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c +index 0f7139bdb2c2..69a42f36b421 100644 +--- a/fs/ubifs/xattr.c ++++ b/fs/ubifs/xattr.c +@@ -167,6 +167,7 @@ out_cancel: + host_ui->xattr_cnt -= 1; + host_ui->xattr_size -= CALC_DENT_SIZE(nm->len); + host_ui->xattr_size -= CALC_XATTR_BYTES(size); ++ host_ui->xattr_names -= nm->len; + mutex_unlock(&host_ui->ui_mutex); + out_free: + make_bad_inode(inode); +@@ -514,6 +515,7 @@ out_cancel: + host_ui->xattr_cnt += 1; + host_ui->xattr_size += CALC_DENT_SIZE(nm->len); + host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); ++ host_ui->xattr_names += nm->len; + mutex_unlock(&host_ui->ui_mutex); + ubifs_release_budget(c, &req); + make_bad_inode(inode); +diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c +index bac3e1635b7d..e59f309efbee 100644 +--- a/fs/xfs/xfs_dquot.c ++++ b/fs/xfs/xfs_dquot.c +@@ -309,8 +309,7 @@ xfs_dquot_buf_verify_crc( + if (mp->m_quotainfo) + ndquots = mp->m_quotainfo->qi_dqperchunk; + else +- ndquots = xfs_qm_calc_dquots_per_chunk(mp, +- XFS_BB_TO_FSB(mp, bp->b_length)); ++ ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length); + + for (i = 0; i < ndquots; i++, d++) { + if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), +diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c +index e8e310c05097..363c4cc9bfd5 100644 +--- a/fs/xfs/xfs_mount.c ++++ b/fs/xfs/xfs_mount.c +@@ -689,7 +689,8 @@ xfs_sb_verify( + * Only check the in progress field for the primary superblock as + * mkfs.xfs doesn't clear it from secondary superblocks. + */ +- return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR, ++ return xfs_mount_validate_sb(mp, &sb, ++ bp->b_maps[0].bm_bn == XFS_SB_DADDR, + check_version); + } + +diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h +index c184aa8ec8cd..a8203040f27a 100644 +--- a/include/asm-generic/uaccess.h ++++ b/include/asm-generic/uaccess.h +@@ -228,14 +228,18 @@ extern int __put_user_bad(void) __attribute__((noreturn)); + might_sleep(); \ + access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \ + __get_user(x, ptr) : \ +- -EFAULT; \ ++ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ + }) + + #ifndef __get_user_fn + static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) + { +- size = __copy_from_user(x, ptr, size); +- return size ? -EFAULT : size; ++ size_t n = __copy_from_user(x, ptr, size); ++ if (unlikely(n)) { ++ memset(x + (size - n), 0, n); ++ return -EFAULT; ++ } ++ return 0; + } + + #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) +@@ -255,11 +259,13 @@ extern int __get_user_bad(void) __attribute__((noreturn)); + static inline long copy_from_user(void *to, + const void __user * from, unsigned long n) + { ++ unsigned long res = n; + might_sleep(); +- if (access_ok(VERIFY_READ, from, n)) +- return __copy_from_user(to, from, n); +- else +- return n; ++ if (likely(access_ok(VERIFY_READ, from, n))) ++ res = __copy_from_user(to, from, n); ++ if (unlikely(res)) ++ memset(to + (n - res), 0, res); ++ return res; + } + + static inline long copy_to_user(void __user *to, +diff --git a/include/crypto/hash.h b/include/crypto/hash.h +index 26cb1eb16f4c..c8c79878c082 100644 +--- a/include/crypto/hash.h ++++ b/include/crypto/hash.h +@@ -94,6 +94,7 @@ struct crypto_ahash { + unsigned int keylen); + + unsigned int reqsize; ++ bool has_setkey; + struct crypto_tfm base; + }; + +@@ -181,6 +182,11 @@ static inline void *ahash_request_ctx(struct ahash_request *req) + + int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); ++static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm) ++{ ++ return tfm->has_setkey; ++} ++ + int crypto_ahash_finup(struct ahash_request *req); + int crypto_ahash_final(struct ahash_request *req); + int crypto_ahash_digest(struct ahash_request *req); +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h +index d61c11170213..bfefd8139e18 100644 +--- a/include/crypto/if_alg.h ++++ b/include/crypto/if_alg.h +@@ -30,6 +30,9 @@ struct alg_sock { + + struct sock *parent; + ++ unsigned int refcnt; ++ unsigned int nokey_refcnt; ++ + const struct af_alg_type *type; + void *private; + }; +@@ -49,8 +52,10 @@ struct af_alg_type { + void (*release)(void *private); + int (*setkey)(void *private, const u8 *key, unsigned int keylen); + int (*accept)(void *private, struct sock *sk); ++ int (*accept_nokey)(void *private, struct sock *sk); + + struct proto_ops *ops; ++ struct proto_ops *ops_nokey; + struct module *owner; + char name[14]; + }; +@@ -64,6 +69,7 @@ int af_alg_register_type(const struct af_alg_type *type); + int af_alg_unregister_type(const struct af_alg_type *type); + + int af_alg_release(struct socket *sock); ++void af_alg_release_parent(struct sock *sk); + int af_alg_accept(struct sock *sk, struct socket *newsock); + + int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len, +@@ -80,11 +86,6 @@ static inline struct alg_sock *alg_sk(struct sock *sk) + return (struct alg_sock *)sk; + } + +-static inline void af_alg_release_parent(struct sock *sk) +-{ +- sock_put(alg_sk(sk)->parent); +-} +- + static inline void af_alg_init_completion(struct af_alg_completion *completion) + { + init_completion(&completion->completion); +diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h +index fb0ab651a041..fb9fbe2f63e7 100644 +--- a/include/linux/can/dev.h ++++ b/include/linux/can/dev.h +@@ -31,6 +31,7 @@ enum can_mode { + * CAN common private data + */ + struct can_priv { ++ struct net_device *dev; + struct can_device_stats can_stats; + + struct can_bittiming bittiming; +@@ -42,7 +43,7 @@ struct can_priv { + u32 ctrlmode_supported; + + int restart_ms; +- struct timer_list restart_timer; ++ struct delayed_work restart_work; + + int (*do_set_bittiming)(struct net_device *dev); + int (*do_set_mode)(struct net_device *dev, enum can_mode mode); +diff --git a/include/linux/crypto.h b/include/linux/crypto.h +index 2b00d92a6e6f..61dd0b15d21c 100644 +--- a/include/linux/crypto.h ++++ b/include/linux/crypto.h +@@ -354,6 +354,7 @@ struct ablkcipher_tfm { + + unsigned int ivsize; + unsigned int reqsize; ++ bool has_setkey; + }; + + struct aead_tfm { +@@ -664,6 +665,13 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, + return crt->setkey(crt->base, key, keylen); + } + ++static inline bool crypto_ablkcipher_has_setkey(struct crypto_ablkcipher *tfm) ++{ ++ struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); ++ ++ return crt->has_setkey; ++} ++ + static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( + struct ablkcipher_request *req) + { +diff --git a/include/linux/filter.h b/include/linux/filter.h +index f65f5a69db8f..c2bea01d0466 100644 +--- a/include/linux/filter.h ++++ b/include/linux/filter.h +@@ -36,7 +36,11 @@ static inline unsigned int sk_filter_len(const struct sk_filter *fp) + return fp->len * sizeof(struct sock_filter) + sizeof(*fp); + } + +-extern int sk_filter(struct sock *sk, struct sk_buff *skb); ++int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); ++static inline int sk_filter(struct sock *sk, struct sk_buff *skb) ++{ ++ return sk_filter_trim_cap(sk, skb, 1); ++} + extern unsigned int sk_run_filter(const struct sk_buff *skb, + const struct sock_filter *filter); + extern int sk_unattached_filter_create(struct sk_filter **pfp, +diff --git a/include/linux/i8042.h b/include/linux/i8042.h +index a986ff588944..801c307f6fcc 100644 +--- a/include/linux/i8042.h ++++ b/include/linux/i8042.h +@@ -38,7 +38,6 @@ struct serio; + void i8042_lock_chip(void); + void i8042_unlock_chip(void); + int i8042_command(unsigned char *param, int command); +-bool i8042_check_port_owner(const struct serio *); + int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)); + int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, +@@ -59,11 +58,6 @@ static inline int i8042_command(unsigned char *param, int command) + return -ENODEV; + } + +-static inline bool i8042_check_port_owner(const struct serio *serio) +-{ +- return false; +-} +- + static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)) + { +diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h +index e94537befabd..b55c95dd8748 100644 +--- a/include/linux/mfd/88pm80x.h ++++ b/include/linux/mfd/88pm80x.h +@@ -345,7 +345,7 @@ static inline int pm80x_dev_suspend(struct device *dev) + int irq = platform_get_irq(pdev, 0); + + if (device_may_wakeup(dev)) +- set_bit((1 << irq), &chip->wu_flag); ++ set_bit(irq, &chip->wu_flag); + + return 0; + } +@@ -357,7 +357,7 @@ static inline int pm80x_dev_resume(struct device *dev) + int irq = platform_get_irq(pdev, 0); + + if (device_may_wakeup(dev)) +- clear_bit((1 << irq), &chip->wu_flag); ++ clear_bit(irq, &chip->wu_flag); + + return 0; + } +diff --git a/include/linux/mroute.h b/include/linux/mroute.h +index 79aaa9fc1a15..d5277fc3ce2e 100644 +--- a/include/linux/mroute.h ++++ b/include/linux/mroute.h +@@ -103,5 +103,5 @@ struct mfc_cache { + struct rtmsg; + extern int ipmr_get_route(struct net *net, struct sk_buff *skb, + __be32 saddr, __be32 daddr, +- struct rtmsg *rtm, int nowait); ++ struct rtmsg *rtm, int nowait, u32 portid); + #endif +diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h +index 66982e764051..f831155dc7d1 100644 +--- a/include/linux/mroute6.h ++++ b/include/linux/mroute6.h +@@ -115,7 +115,7 @@ struct mfc6_cache { + + struct rtmsg; + extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, +- struct rtmsg *rtm, int nowait); ++ struct rtmsg *rtm, int nowait, u32 portid); + + #ifdef CONFIG_IPV6_MROUTE + extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 4d2e0418ab5a..45a618b58864 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -2223,6 +2223,7 @@ static inline void napi_free_frags(struct napi_struct *napi) + napi->skb = NULL; + } + ++bool netdev_is_rx_handler_busy(struct net_device *dev); + extern int netdev_rx_handler_register(struct net_device *dev, + rx_handler_func_t *rx_handler, + void *rx_handler_data); +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h +index e3dea75a078b..9497527daba3 100644 +--- a/include/linux/pagemap.h ++++ b/include/linux/pagemap.h +@@ -482,56 +482,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) + */ + static inline int fault_in_multipages_writeable(char __user *uaddr, int size) + { +- int ret = 0; + char __user *end = uaddr + size - 1; + + if (unlikely(size == 0)) +- return ret; ++ return 0; + ++ if (unlikely(uaddr > end)) ++ return -EFAULT; + /* + * Writing zeroes into userspace here is OK, because we know that if + * the zero gets there, we'll be overwriting it. + */ +- while (uaddr <= end) { +- ret = __put_user(0, uaddr); +- if (ret != 0) +- return ret; ++ do { ++ if (unlikely(__put_user(0, uaddr) != 0)) ++ return -EFAULT; + uaddr += PAGE_SIZE; +- } ++ } while (uaddr <= end); + + /* Check whether the range spilled into the next page. */ + if (((unsigned long)uaddr & PAGE_MASK) == + ((unsigned long)end & PAGE_MASK)) +- ret = __put_user(0, end); ++ return __put_user(0, end); + +- return ret; ++ return 0; + } + + static inline int fault_in_multipages_readable(const char __user *uaddr, + int size) + { + volatile char c; +- int ret = 0; + const char __user *end = uaddr + size - 1; + + if (unlikely(size == 0)) +- return ret; ++ return 0; + +- while (uaddr <= end) { +- ret = __get_user(c, uaddr); +- if (ret != 0) +- return ret; ++ if (unlikely(uaddr > end)) ++ return -EFAULT; ++ ++ do { ++ if (unlikely(__get_user(c, uaddr) != 0)) ++ return -EFAULT; + uaddr += PAGE_SIZE; +- } ++ } while (uaddr <= end); + + /* Check whether the range spilled into the next page. */ + if (((unsigned long)uaddr & PAGE_MASK) == + ((unsigned long)end & PAGE_MASK)) { +- ret = __get_user(c, end); +- (void)c; ++ return __get_user(c, end); + } + +- return ret; ++ return 0; + } + + int add_to_page_cache_locked(struct page *page, struct address_space *mapping, +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h +index 229a757e1c13..3204422317e0 100644 +--- a/include/linux/perf_event.h ++++ b/include/linux/perf_event.h +@@ -430,11 +430,6 @@ struct perf_event { + #endif /* CONFIG_PERF_EVENTS */ + }; + +-enum perf_event_context_type { +- task_context, +- cpu_context, +-}; +- + /** + * struct perf_event_context - event context structure + * +@@ -442,7 +437,6 @@ enum perf_event_context_type { + */ + struct perf_event_context { + struct pmu *pmu; +- enum perf_event_context_type type; + /* + * Protect the states of the events in the list, + * nr_active, and the list: +diff --git a/include/linux/serio.h b/include/linux/serio.h +index 36aac733840a..deffa4746e16 100644 +--- a/include/linux/serio.h ++++ b/include/linux/serio.h +@@ -28,7 +28,8 @@ struct serio { + + struct serio_device_id id; + +- spinlock_t lock; /* protects critical sections from port's interrupt handler */ ++ /* Protects critical sections from port's interrupt handler */ ++ spinlock_t lock; + + int (*write)(struct serio *, unsigned char); + int (*open)(struct serio *); +@@ -37,16 +38,29 @@ struct serio { + void (*stop)(struct serio *); + + struct serio *parent; +- struct list_head child_node; /* Entry in parent->children list */ ++ /* Entry in parent->children list */ ++ struct list_head child_node; + struct list_head children; +- unsigned int depth; /* level of nesting in serio hierarchy */ ++ /* Level of nesting in serio hierarchy */ ++ unsigned int depth; + +- struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */ +- struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */ ++ /* ++ * serio->drv is accessed from interrupt handlers; when modifying ++ * caller should acquire serio->drv_mutex and serio->lock. ++ */ ++ struct serio_driver *drv; ++ /* Protects serio->drv so attributes can pin current driver */ ++ struct mutex drv_mutex; + + struct device dev; + + struct list_head node; ++ ++ /* ++ * For use by PS/2 layer when several ports share hardware and ++ * may get indigestion when exposed to concurrent access (i8042). ++ */ ++ struct mutex *ps2_cmd_mutex; + }; + #define to_serio_port(d) container_of(d, struct serio, dev) + +diff --git a/include/linux/stddef.h b/include/linux/stddef.h +index f4aec0e75c3a..9c61c7cda936 100644 +--- a/include/linux/stddef.h ++++ b/include/linux/stddef.h +@@ -3,7 +3,6 @@ + + #include + +- + #undef NULL + #define NULL ((void *)0) + +@@ -14,8 +13,18 @@ enum { + + #undef offsetof + #ifdef __compiler_offsetof +-#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER) ++#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER) + #else +-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) ++#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) + #endif ++ ++/** ++ * offsetofend(TYPE, MEMBER) ++ * ++ * @TYPE: The type of the structure ++ * @MEMBER: The member within the structure to get the end offset of ++ */ ++#define offsetofend(TYPE, MEMBER) \ ++ (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) ++ + #endif +diff --git a/include/linux/vfio.h b/include/linux/vfio.h +index ac8d488e4372..ef4f73739a76 100644 +--- a/include/linux/vfio.h ++++ b/include/linux/vfio.h +@@ -76,18 +76,4 @@ extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); + extern void vfio_unregister_iommu_driver( + const struct vfio_iommu_driver_ops *ops); + +-/** +- * offsetofend(TYPE, MEMBER) +- * +- * @TYPE: The type of the structure +- * @MEMBER: The member within the structure to get the end offset of +- * +- * Simple helper macro for dealing with variable sized structures passed +- * from user space. This allows us to easily determine if the provided +- * structure is sized to include various fields. +- */ +-#define offsetofend(TYPE, MEMBER) ({ \ +- TYPE tmp; \ +- offsetof(TYPE, MEMBER) + sizeof(tmp.MEMBER); }) \ +- + #endif /* VFIO_H */ +diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h +index 100fb8cec17c..a49b65029164 100644 +--- a/include/net/if_inet6.h ++++ b/include/net/if_inet6.h +@@ -31,8 +31,10 @@ + #define IF_PREFIX_AUTOCONF 0x02 + + enum { ++ INET6_IFADDR_STATE_PREDAD, + INET6_IFADDR_STATE_DAD, + INET6_IFADDR_STATE_POSTDAD, ++ INET6_IFADDR_STATE_ERRDAD, + INET6_IFADDR_STATE_UP, + INET6_IFADDR_STATE_DEAD, + }; +@@ -50,7 +52,7 @@ struct inet6_ifaddr { + + int state; + +- __u8 probes; ++ __u8 dad_probes; + __u8 flags; + + __u16 scope; +@@ -58,7 +60,7 @@ struct inet6_ifaddr { + unsigned long cstamp; /* created timestamp */ + unsigned long tstamp; /* updated timestamp */ + +- struct timer_list timer; ++ struct delayed_work dad_work; + + struct inet6_dev *idev; + struct rt6_info *rt; +@@ -195,6 +197,10 @@ struct inet6_dev { + struct inet6_dev *next; + struct ipv6_devconf cnf; + struct ipv6_devstat stats; ++ ++ struct timer_list rs_timer; ++ __u8 rs_probes; ++ + unsigned long tstamp; /* ipv6InterfaceTable update timestamp */ + struct rcu_head rcu; + }; +diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h +index 4da5de10d1d4..b140c6079e34 100644 +--- a/include/net/ip6_tunnel.h ++++ b/include/net/ip6_tunnel.h +@@ -75,6 +75,7 @@ static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev) + int pkt_len, err; + + nf_reset(skb); ++ memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); + pkt_len = skb->len; + err = ip6_local_out(skb); + +diff --git a/include/net/ndisc.h b/include/net/ndisc.h +index 5043f8b08053..4b12d99a13cf 100644 +--- a/include/net/ndisc.h ++++ b/include/net/ndisc.h +@@ -190,7 +190,9 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons + } + + extern int ndisc_init(void); ++extern int ndisc_late_init(void); + ++extern void ndisc_late_cleanup(void); + extern void ndisc_cleanup(void); + + extern int ndisc_rcv(struct sk_buff *skb); +diff --git a/include/net/sock.h b/include/net/sock.h +index 2317d122874e..a46dd30ea58b 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -1358,7 +1358,7 @@ static inline struct inode *SOCK_INODE(struct socket *socket) + * Functions for memory accounting + */ + extern int __sk_mem_schedule(struct sock *sk, int size, int kind); +-extern void __sk_mem_reclaim(struct sock *sk); ++void __sk_mem_reclaim(struct sock *sk, int amount); + + #define SK_MEM_QUANTUM ((int)PAGE_SIZE) + #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) +@@ -1399,7 +1399,7 @@ static inline void sk_mem_reclaim(struct sock *sk) + if (!sk_has_account(sk)) + return; + if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) +- __sk_mem_reclaim(sk); ++ __sk_mem_reclaim(sk, sk->sk_forward_alloc); + } + + static inline void sk_mem_reclaim_partial(struct sock *sk) +@@ -1407,7 +1407,7 @@ static inline void sk_mem_reclaim_partial(struct sock *sk) + if (!sk_has_account(sk)) + return; + if (sk->sk_forward_alloc > SK_MEM_QUANTUM) +- __sk_mem_reclaim(sk); ++ __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); + } + + static inline void sk_mem_charge(struct sock *sk, int size) +@@ -1422,6 +1422,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) + if (!sk_has_account(sk)) + return; + sk->sk_forward_alloc += size; ++ ++ /* Avoid a possible overflow. ++ * TCP send queues can make this happen, if sk_mem_reclaim() ++ * is not called and more than 2 GBytes are released at once. ++ * ++ * If we reach 2 MBytes, reclaim 1 MBytes right now, there is ++ * no need to hold that much forward allocation anyway. ++ */ ++ if (unlikely(sk->sk_forward_alloc >= 1 << 21)) ++ __sk_mem_reclaim(sk, 1 << 20); + } + + static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 29a1a63cd303..79cd118d5994 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -1029,6 +1029,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) + } + + extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); ++int tcp_filter(struct sock *sk, struct sk_buff *skb); + + #undef STATE_TRACE + +@@ -1392,6 +1393,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli + { + if (sk->sk_send_head == skb_unlinked) + sk->sk_send_head = NULL; ++ if (tcp_sk(sk)->highest_sack == skb_unlinked) ++ tcp_sk(sk)->highest_sack = NULL; + } + + static inline void tcp_init_send_head(struct sock *sk) +diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h +index 75271b9a8f61..50983a61eba3 100644 +--- a/include/xen/interface/io/ring.h ++++ b/include/xen/interface/io/ring.h +@@ -181,6 +181,20 @@ struct __name##_back_ring { \ + #define RING_GET_REQUEST(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) + ++/* ++ * Get a local copy of a request. ++ * ++ * Use this in preference to RING_GET_REQUEST() so all processing is ++ * done on a local copy that cannot be modified by the other end. ++ * ++ * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this ++ * to be ineffective where _req is a struct which consists of only bitfields. ++ */ ++#define RING_COPY_REQUEST(_r, _idx, _req) do { \ ++ /* Use volatile to force the copy into _req. */ \ ++ *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ ++} while (0) ++ + #define RING_GET_RESPONSE(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) + +diff --git a/ipc/sem.c b/ipc/sem.c +index 47a15192b8b8..3b968a028ccf 100644 +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -267,20 +267,12 @@ static void sem_rcu_free(struct rcu_head *head) + * Caller must own sem_perm.lock. + * New simple ops cannot start, because simple ops first check + * that sem_perm.lock is free. +- * that a) sem_perm.lock is free and b) complex_count is 0. + */ + static void sem_wait_array(struct sem_array *sma) + { + int i; + struct sem *sem; + +- if (sma->complex_count) { +- /* The thread that increased sma->complex_count waited on +- * all sem->lock locks. Thus we don't need to wait again. +- */ +- return; +- } +- + for (i = 0; i < sma->sem_nsems; i++) { + sem = sma->sem_base + i; + spin_unlock_wait(&sem->lock); +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 0f5207839673..76e26b8e4e41 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -6249,7 +6249,6 @@ skip_type: + __perf_event_init_context(&cpuctx->ctx); + lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); + lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); +- cpuctx->ctx.type = cpu_context; + cpuctx->ctx.pmu = pmu; + cpuctx->jiffies_interval = 1; + INIT_LIST_HEAD(&cpuctx->rotation_list); +@@ -6856,7 +6855,19 @@ SYSCALL_DEFINE5(perf_event_open, + * task or CPU context: + */ + if (move_group) { +- if (group_leader->ctx->type != ctx->type) ++ /* ++ * Make sure we're both on the same task, or both ++ * per-cpu events. ++ */ ++ if (group_leader->ctx->task != ctx->task) ++ goto err_context; ++ ++ /* ++ * Make sure we're both events for the same CPU; ++ * grouping events for different CPUs is broken; since ++ * you can never concurrently schedule them anyhow. ++ */ ++ if (group_leader->cpu != event->cpu) + goto err_context; + } else { + if (group_leader->ctx != ctx) +diff --git a/kernel/fork.c b/kernel/fork.c +index 2358bd4c8757..612e78d82194 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -775,14 +775,12 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) + deactivate_mm(tsk, mm); + + /* +- * If we're exiting normally, clear a user-space tid field if +- * requested. We leave this alone when dying by signal, to leave +- * the value intact in a core dump, and to save the unnecessary +- * trouble, say, a killed vfork parent shouldn't touch this mm. +- * Userland only wants this done for a sys_exit. ++ * Signal userspace if we're not exiting with a core dump ++ * because we want to leave the value intact for debugging ++ * purposes. + */ + if (tsk->clear_child_tid) { +- if (!(tsk->flags & PF_SIGNALED) && ++ if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && + atomic_read(&mm->mm_users) > 1) { + /* + * We don't check the error code - if userspace has +diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c +index 269b097e78ea..743615bfdcec 100644 +--- a/kernel/power/suspend_test.c ++++ b/kernel/power/suspend_test.c +@@ -169,8 +169,10 @@ static int __init test_suspend(void) + + /* RTCs have initialized by now too ... can we use one? */ + dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm); +- if (dev) ++ if (dev) { + rtc = rtc_class_open(dev_name(dev)); ++ put_device(dev); ++ } + if (!rtc) { + printk(warn_no_rtc); + goto done; +diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h +index 3db5a375d8dd..468786bee4e3 100644 +--- a/kernel/rcutree_plugin.h ++++ b/kernel/rcutree_plugin.h +@@ -2243,6 +2243,7 @@ static int rcu_nocb_kthread(void *arg) + cl++; + c++; + local_bh_enable(); ++ cond_resched(); + list = next; + } + trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 655d6110a6e1..6a366f9d08db 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1501,11 +1501,52 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) + success = 1; /* we're going to change ->state */ + cpu = task_cpu(p); + ++ /* ++ * Ensure we load p->on_rq _after_ p->state, otherwise it would ++ * be possible to, falsely, observe p->on_rq == 0 and get stuck ++ * in smp_cond_load_acquire() below. ++ * ++ * sched_ttwu_pending() try_to_wake_up() ++ * [S] p->on_rq = 1; [L] P->state ++ * UNLOCK rq->lock -----. ++ * \ ++ * +--- RMB ++ * schedule() / ++ * LOCK rq->lock -----' ++ * UNLOCK rq->lock ++ * ++ * [task p] ++ * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq ++ * ++ * Pairs with the UNLOCK+LOCK on rq->lock from the ++ * last wakeup of our task and the schedule that got our task ++ * current. ++ */ ++ smp_rmb(); + if (p->on_rq && ttwu_remote(p, wake_flags)) + goto stat; + + #ifdef CONFIG_SMP + /* ++ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be ++ * possible to, falsely, observe p->on_cpu == 0. ++ * ++ * One must be running (->on_cpu == 1) in order to remove oneself ++ * from the runqueue. ++ * ++ * [S] ->on_cpu = 1; [L] ->on_rq ++ * UNLOCK rq->lock ++ * RMB ++ * LOCK rq->lock ++ * [S] ->on_rq = 0; [L] ->on_cpu ++ * ++ * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock ++ * from the consecutive calls to schedule(); the first switching to our ++ * task, the second putting it to sleep. ++ */ ++ smp_rmb(); ++ ++ /* + * If the owning (remote) cpu is still in the middle of schedule() with + * this task as prev, wait until its done referencing the task. + */ +diff --git a/kernel/timer.c b/kernel/timer.c +index 20f45ea6f5a4..be22e45dc36f 100644 +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -923,13 +923,26 @@ EXPORT_SYMBOL(add_timer); + */ + void add_timer_on(struct timer_list *timer, int cpu) + { +- struct tvec_base *base = per_cpu(tvec_bases, cpu); ++ struct tvec_base *new_base = per_cpu(tvec_bases, cpu); ++ struct tvec_base *base; + unsigned long flags; + + timer_stats_timer_set_start_info(timer); + BUG_ON(timer_pending(timer) || !timer->function); +- spin_lock_irqsave(&base->lock, flags); +- timer_set_base(timer, base); ++ ++ /* ++ * If @timer was on a different CPU, it should be migrated with the ++ * old base locked to prevent other operations proceeding with the ++ * wrong base locked. See lock_timer_base(). ++ */ ++ base = lock_timer_base(timer, &flags); ++ if (base != new_base) { ++ timer_set_base(timer, NULL); ++ spin_unlock(&base->lock); ++ base = new_base; ++ spin_lock(&base->lock); ++ timer_set_base(timer, base); ++ } + debug_activate(timer, timer->expires); + internal_add_timer(base, timer); + /* +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index eff26a976f02..d6e72522fc4e 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -4121,13 +4121,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, + struct trace_array *tr = iter->tr; + ssize_t sret; + +- /* return any leftover data */ +- sret = trace_seq_to_user(&iter->seq, ubuf, cnt); +- if (sret != -EBUSY) +- return sret; +- +- trace_seq_init(&iter->seq); +- + /* copy the tracer to avoid using a global lock all around */ + mutex_lock(&trace_types_lock); + if (unlikely(iter->trace->name != tr->current_trace->name)) +@@ -4140,6 +4133,14 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, + * is protected. + */ + mutex_lock(&iter->mutex); ++ ++ /* return any leftover data */ ++ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); ++ if (sret != -EBUSY) ++ goto out; ++ ++ trace_seq_init(&iter->seq); ++ + if (iter->trace->read) { + sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); + if (sret) +@@ -5168,11 +5169,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, + } + #endif + +- if (splice_grow_spd(pipe, &spd)) { +- ret = -ENOMEM; +- goto out; +- } +- + if (*ppos & (PAGE_SIZE - 1)) { + ret = -EINVAL; + goto out; +@@ -5186,6 +5182,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, + len &= PAGE_MASK; + } + ++ if (splice_grow_spd(pipe, &spd)) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ + again: + trace_access_lock(iter->cpu_file); + entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); +@@ -5241,21 +5242,22 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, + if (!spd.nr_pages) { + if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { + ret = -EAGAIN; +- goto out; ++ goto out_shrink; + } + mutex_unlock(&trace_types_lock); + ret = iter->trace->wait_pipe(iter); + mutex_lock(&trace_types_lock); + if (ret) +- goto out; ++ goto out_shrink; + if (signal_pending(current)) { + ret = -EINTR; +- goto out; ++ goto out_shrink; + } + goto again; + } + + ret = splice_to_pipe(pipe, &spd); ++out_shrink: + splice_shrink_spd(&spd); + out: + mutex_unlock(&trace_types_lock); +diff --git a/lib/genalloc.c b/lib/genalloc.c +index 2a39bf62d8c1..ac5fba950eb1 100644 +--- a/lib/genalloc.c ++++ b/lib/genalloc.c +@@ -273,7 +273,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) + struct gen_pool_chunk *chunk; + unsigned long addr = 0; + int order = pool->min_alloc_order; +- int nbits, start_bit = 0, end_bit, remain; ++ int nbits, start_bit, end_bit, remain; + + #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + BUG_ON(in_nmi()); +@@ -288,6 +288,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) + if (size > atomic_read(&chunk->avail)) + continue; + ++ start_bit = 0; + end_bit = chunk_size(chunk) >> order; + retry: + start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, +diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c +index 5464c8744ea9..e24388a863a7 100644 +--- a/lib/mpi/mpi-pow.c ++++ b/lib/mpi/mpi-pow.c +@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) + if (!esize) { + /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 + * depending on if MOD equals 1. */ +- rp[0] = 1; + res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; ++ if (res->nlimbs) { ++ if (mpi_resize(res, 1) < 0) ++ goto enomem; ++ rp = res->d; ++ rp[0] = 1; ++ } + res->sign = 0; + goto leave; + } +diff --git a/lib/ratelimit.c b/lib/ratelimit.c +index 40e03ea2a967..2c5de86460c5 100644 +--- a/lib/ratelimit.c ++++ b/lib/ratelimit.c +@@ -49,7 +49,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) + if (rs->missed) + printk(KERN_WARNING "%s: %d callbacks suppressed\n", + func, rs->missed); +- rs->begin = 0; ++ rs->begin = jiffies; + rs->printed = 0; + rs->missed = 0; + } +diff --git a/mm/ksm.c b/mm/ksm.c +index 7bf748f30aab..d1b19b9e888e 100644 +--- a/mm/ksm.c ++++ b/mm/ksm.c +@@ -283,7 +283,8 @@ static inline struct rmap_item *alloc_rmap_item(void) + { + struct rmap_item *rmap_item; + +- rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); ++ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); + if (rmap_item) + ksm_rmap_items++; + return rmap_item; +diff --git a/mm/swapfile.c b/mm/swapfile.c +index 746af55b8455..d0a89838b99a 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -1922,6 +1922,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p, + swab32s(&swap_header->info.version); + swab32s(&swap_header->info.last_page); + swab32s(&swap_header->info.nr_badpages); ++ if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) ++ return 0; + for (i = 0; i < swap_header->info.nr_badpages; i++) + swab32s(&swap_header->info.badpages[i]); + } +diff --git a/net/can/bcm.c b/net/can/bcm.c +index 35cf02d92766..dd0781c49ebb 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -1500,24 +1500,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, + struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; + struct sock *sk = sock->sk; + struct bcm_sock *bo = bcm_sk(sk); ++ int ret = 0; + + if (len < sizeof(*addr)) + return -EINVAL; + +- if (bo->bound) +- return -EISCONN; ++ lock_sock(sk); ++ ++ if (bo->bound) { ++ ret = -EISCONN; ++ goto fail; ++ } + + /* bind a device to this socket */ + if (addr->can_ifindex) { + struct net_device *dev; + + dev = dev_get_by_index(&init_net, addr->can_ifindex); +- if (!dev) +- return -ENODEV; +- ++ if (!dev) { ++ ret = -ENODEV; ++ goto fail; ++ } + if (dev->type != ARPHRD_CAN) { + dev_put(dev); +- return -ENODEV; ++ ret = -ENODEV; ++ goto fail; + } + + bo->ifindex = dev->ifindex; +@@ -1528,17 +1535,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, + bo->ifindex = 0; + } + +- bo->bound = 1; +- + if (proc_dir) { + /* unique socket address as filename */ + sprintf(bo->procname, "%lu", sock_i_ino(sk)); + bo->bcm_proc_read = proc_create_data(bo->procname, 0644, + proc_dir, + &bcm_proc_fops, sk); ++ if (!bo->bcm_proc_read) { ++ ret = -ENOMEM; ++ goto fail; ++ } + } + +- return 0; ++ bo->bound = 1; ++ ++fail: ++ release_sock(sk); ++ ++ return ret; + } + + static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, +diff --git a/net/core/dev.c b/net/core/dev.c +index 1ccfc49683b3..6494918b3eaa 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -2234,7 +2234,7 @@ int skb_checksum_help(struct sk_buff *skb) + goto out; + } + +- *(__sum16 *)(skb->data + offset) = csum_fold(csum); ++ *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; + out_set_summed: + skb->ip_summed = CHECKSUM_NONE; + out: +@@ -3346,6 +3346,22 @@ out: + #endif + + /** ++ * netdev_is_rx_handler_busy - check if receive handler is registered ++ * @dev: device to check ++ * ++ * Check if a receive handler is already registered for a given device. ++ * Return true if there one. ++ * ++ * The caller must hold the rtnl_mutex. ++ */ ++bool netdev_is_rx_handler_busy(struct net_device *dev) ++{ ++ ASSERT_RTNL(); ++ return dev && rtnl_dereference(dev->rx_handler); ++} ++EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); ++ ++/** + * netdev_rx_handler_register - register receive handler + * @dev: device to register a handler for + * @rx_handler: receive handler to register +diff --git a/net/core/dst.c b/net/core/dst.c +index 1bf6842b89b8..582b861aeba6 100644 +--- a/net/core/dst.c ++++ b/net/core/dst.c +@@ -283,7 +283,9 @@ void dst_release(struct dst_entry *dst) + unsigned short nocache = dst->flags & DST_NOCACHE; + + newrefcnt = atomic_dec_return(&dst->__refcnt); +- WARN_ON(newrefcnt < 0); ++ if (unlikely(newrefcnt < 0)) ++ net_warn_ratelimited("%s: dst:%p refcnt:%d\n", ++ __func__, dst, newrefcnt); + if (!newrefcnt && unlikely(nocache)) + call_rcu(&dst->rcu_head, dst_destroy_rcu); + } +diff --git a/net/core/filter.c b/net/core/filter.c +index c6c18d8a2d88..65f2a65b5333 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -67,9 +67,10 @@ static inline void *load_pointer(const struct sk_buff *skb, int k, + } + + /** +- * sk_filter - run a packet through a socket filter ++ * sk_filter_trim_cap - run a packet through a socket filter + * @sk: sock associated with &sk_buff + * @skb: buffer to filter ++ * @cap: limit on how short the eBPF program may trim the packet + * + * Run the filter code and then cut skb->data to correct size returned by + * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller +@@ -78,7 +79,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k, + * be accepted or -EPERM if the packet should be tossed. + * + */ +-int sk_filter(struct sock *sk, struct sk_buff *skb) ++int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) + { + int err; + struct sk_filter *filter; +@@ -99,14 +100,13 @@ int sk_filter(struct sock *sk, struct sk_buff *skb) + filter = rcu_dereference(sk->sk_filter); + if (filter) { + unsigned int pkt_len = SK_RUN_FILTER(filter, skb); +- +- err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; ++ err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; + } + rcu_read_unlock(); + + return err; + } +-EXPORT_SYMBOL(sk_filter); ++EXPORT_SYMBOL(sk_filter_trim_cap); + + /** + * sk_run_filter - run a filter on a socket +diff --git a/net/core/sock.c b/net/core/sock.c +index 5a954fccc7d3..e3cb45411f34 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -1515,6 +1515,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) + } + + newsk->sk_err = 0; ++ newsk->sk_err_soft = 0; + newsk->sk_priority = 0; + /* + * Before updating sk_refcnt, we must commit prior changes to memory +@@ -2048,12 +2049,13 @@ EXPORT_SYMBOL(__sk_mem_schedule); + /** + * __sk_reclaim - reclaim memory_allocated + * @sk: socket ++ * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple) + */ +-void __sk_mem_reclaim(struct sock *sk) ++void __sk_mem_reclaim(struct sock *sk, int amount) + { +- sk_memory_allocated_sub(sk, +- sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); +- sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; ++ amount >>= SK_MEM_QUANTUM_SHIFT; ++ sk_memory_allocated_sub(sk, amount); ++ sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT; + + if (sk_under_memory_pressure(sk) && + (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c +index ebc54fef85a5..294c642fbebb 100644 +--- a/net/dccp/ipv4.c ++++ b/net/dccp/ipv4.c +@@ -212,7 +212,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) + { + const struct iphdr *iph = (struct iphdr *)skb->data; + const u8 offset = iph->ihl << 2; +- const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); ++ const struct dccp_hdr *dh; + struct dccp_sock *dp; + struct inet_sock *inet; + const int type = icmp_hdr(skb)->type; +@@ -222,11 +222,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) + int err; + struct net *net = dev_net(skb->dev); + +- if (skb->len < offset + sizeof(*dh) || +- skb->len < offset + __dccp_basic_hdr_len(dh)) { +- ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); +- return; +- } ++ /* Only need dccph_dport & dccph_sport which are the first ++ * 4 bytes in dccp header. ++ * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us. ++ */ ++ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); ++ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); ++ dh = (struct dccp_hdr *)(skb->data + offset); + + sk = inet_lookup(net, &dccp_hashinfo, + iph->daddr, dh->dccph_dport, +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c +index 6cf9f7782ad4..94f8224d543e 100644 +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -83,7 +83,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + u8 type, u8 code, int offset, __be32 info) + { + const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; +- const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); ++ const struct dccp_hdr *dh; + struct dccp_sock *dp; + struct ipv6_pinfo *np; + struct sock *sk; +@@ -91,12 +91,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + __u64 seq; + struct net *net = dev_net(skb->dev); + +- if (skb->len < offset + sizeof(*dh) || +- skb->len < offset + __dccp_basic_hdr_len(dh)) { +- ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), +- ICMP6_MIB_INERRORS); +- return; +- } ++ /* Only need dccph_dport & dccph_sport which are the first ++ * 4 bytes in dccp header. ++ * Our caller (icmpv6_notify()) already pulled 8 bytes for us. ++ */ ++ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); ++ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); ++ dh = (struct dccp_hdr *)(skb->data + offset); + + sk = inet6_lookup(net, &dccp_hashinfo, + &hdr->daddr, dh->dccph_dport, +@@ -1013,6 +1014,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { + .getsockopt = ipv6_getsockopt, + .addr2sockaddr = inet6_csk_addr2sockaddr, + .sockaddr_len = sizeof(struct sockaddr_in6), ++ .bind_conflict = inet6_csk_bind_conflict, + #ifdef CONFIG_COMPAT + .compat_setsockopt = compat_ipv6_setsockopt, + .compat_getsockopt = compat_ipv6_getsockopt, +diff --git a/net/dccp/proto.c b/net/dccp/proto.c +index 6c7c78b83940..cb55fb912401 100644 +--- a/net/dccp/proto.c ++++ b/net/dccp/proto.c +@@ -1012,6 +1012,10 @@ void dccp_close(struct sock *sk, long timeout) + __kfree_skb(skb); + } + ++ /* If socket has been already reset kill it. */ ++ if (sk->sk_state == DCCP_CLOSED) ++ goto adjudge_to_death; ++ + if (data_was_unread) { + /* Unread data was tossed, send an appropriate Reset Code */ + DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c +index 4d98a6b80b04..04c7e4618008 100644 +--- a/net/ipv4/ip_fragment.c ++++ b/net/ipv4/ip_fragment.c +@@ -656,6 +656,9 @@ int ip_defrag(struct sk_buff *skb, u32 user) + net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); + ++ if (!net->ipv4.frags.high_thresh) ++ goto fail; ++ + /* Start by cleaning up the memory. */ + ip_evictor(net); + +@@ -672,6 +675,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) + return ret; + } + ++fail: + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); + kfree_skb(skb); + return -ENOMEM; +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 57e745086302..5f077efad29d 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -97,6 +97,9 @@ int __ip_local_out(struct sk_buff *skb) + + iph->tot_len = htons(skb->len); + ip_send_check(iph); ++ ++ skb->protocol = htons(ETH_P_IP); ++ + return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, + skb_dst(skb)->dev, dst_output); + } +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c +index 89570f070e0e..a429ac69af78 100644 +--- a/net/ipv4/ipmr.c ++++ b/net/ipv4/ipmr.c +@@ -2190,7 +2190,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + + int ipmr_get_route(struct net *net, struct sk_buff *skb, + __be32 saddr, __be32 daddr, +- struct rtmsg *rtm, int nowait) ++ struct rtmsg *rtm, int nowait, u32 portid) + { + struct mfc_cache *cache; + struct mr_table *mrt; +@@ -2235,6 +2235,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, + return -ENOMEM; + } + ++ NETLINK_CB(skb2).portid = portid; + skb_push(skb2, sizeof(struct iphdr)); + skb_reset_network_header(skb2); + iph = ip_hdr(skb2); +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 624ca8ed350c..e59d6332458b 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -713,8 +713,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow + goto reject_redirect; + } + +- n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); +- if (n) { ++ n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); ++ if (!n) ++ n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); ++ if (!IS_ERR(n)) { + if (!(n->nud_state & NUD_VALID)) { + neigh_event_send(n, NULL); + } else { +@@ -2325,7 +2327,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, + IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { + int err = ipmr_get_route(net, skb, + fl4->saddr, fl4->daddr, +- r, nowait); ++ r, nowait, portid); ++ + if (err <= 0) { + if (!nowait) { + if (err == 0) +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index 11f27a45b8ef..6504a085ca60 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, + */ + tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ? + tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, +- tcp_rsk(req)->rcv_nxt, req->rcv_wnd, ++ tcp_rsk(req)->rcv_nxt, ++ req->rcv_wnd >> inet_rsk(req)->rcv_wscale, + tcp_time_stamp, + req->ts_recent, + 0, +@@ -1958,6 +1959,21 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) + } + EXPORT_SYMBOL(tcp_prequeue); + ++int tcp_filter(struct sock *sk, struct sk_buff *skb) ++{ ++ struct tcphdr *th = (struct tcphdr *)skb->data; ++ unsigned int eaten = skb->len; ++ int err; ++ ++ err = sk_filter_trim_cap(sk, skb, th->doff * 4); ++ if (!err) { ++ eaten -= skb->len; ++ TCP_SKB_CB(skb)->end_seq -= eaten; ++ } ++ return err; ++} ++EXPORT_SYMBOL(tcp_filter); ++ + /* + * From tcp_input.c + */ +@@ -2020,8 +2036,10 @@ process: + goto discard_and_relse; + nf_reset(skb); + +- if (sk_filter(sk, skb)) ++ if (tcp_filter(sk, skb)) + goto discard_and_relse; ++ th = (const struct tcphdr *)skb->data; ++ iph = ip_hdr(skb); + + skb->dev = NULL; + +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 276b28301a6b..1f2f6b5406ee 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1753,12 +1753,14 @@ static int tcp_mtu_probe(struct sock *sk) + len = 0; + tcp_for_write_queue_from_safe(skb, next, sk) { + copy = min_t(int, skb->len, probe_size - len); +- if (nskb->ip_summed) ++ if (nskb->ip_summed) { + skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); +- else +- nskb->csum = skb_copy_and_csum_bits(skb, 0, +- skb_put(nskb, copy), +- copy, nskb->csum); ++ } else { ++ __wsum csum = skb_copy_and_csum_bits(skb, 0, ++ skb_put(nskb, copy), ++ copy, 0); ++ nskb->csum = csum_block_add(nskb->csum, csum, len); ++ } + + if (skb->len <= copy) { + /* We've eaten all the data from this skb. +@@ -2327,7 +2329,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) + * copying overhead: fragmentation, tunneling, mangling etc. + */ + if (atomic_read(&sk->sk_wmem_alloc) > +- min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) ++ min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), ++ sk->sk_sndbuf)) + return -EAGAIN; + + if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index d0912acd9522..a3e2c34d5b7a 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -139,10 +139,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev); + static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE]; + static DEFINE_SPINLOCK(addrconf_hash_lock); + +-static void addrconf_verify(unsigned long); ++static void addrconf_verify(void); ++static void addrconf_verify_rtnl(void); ++static void addrconf_verify_work(struct work_struct *); + +-static DEFINE_TIMER(addr_chk_timer, addrconf_verify, 0, 0); +-static DEFINE_SPINLOCK(addrconf_verify_lock); ++static struct workqueue_struct *addrconf_wq; ++static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work); + + static void addrconf_join_anycast(struct inet6_ifaddr *ifp); + static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); +@@ -157,7 +159,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, + u32 flags, u32 noflags); + + static void addrconf_dad_start(struct inet6_ifaddr *ifp); +-static void addrconf_dad_timer(unsigned long data); ++static void addrconf_dad_work(struct work_struct *w); + static void addrconf_dad_completed(struct inet6_ifaddr *ifp); + static void addrconf_dad_run(struct inet6_dev *idev); + static void addrconf_rs_timer(unsigned long data); +@@ -253,37 +255,32 @@ static inline bool addrconf_qdisc_ok(const struct net_device *dev) + return !qdisc_tx_is_noop(dev); + } + +-static void addrconf_del_timer(struct inet6_ifaddr *ifp) ++static void addrconf_del_rs_timer(struct inet6_dev *idev) + { +- if (del_timer(&ifp->timer)) ++ if (del_timer(&idev->rs_timer)) ++ __in6_dev_put(idev); ++} ++ ++static void addrconf_del_dad_work(struct inet6_ifaddr *ifp) ++{ ++ if (cancel_delayed_work(&ifp->dad_work)) + __in6_ifa_put(ifp); + } + +-enum addrconf_timer_t { +- AC_NONE, +- AC_DAD, +- AC_RS, +-}; ++static void addrconf_mod_rs_timer(struct inet6_dev *idev, ++ unsigned long when) ++{ ++ if (!timer_pending(&idev->rs_timer)) ++ in6_dev_hold(idev); ++ mod_timer(&idev->rs_timer, jiffies + when); ++} + +-static void addrconf_mod_timer(struct inet6_ifaddr *ifp, +- enum addrconf_timer_t what, +- unsigned long when) ++static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, ++ unsigned long delay) + { +- if (!del_timer(&ifp->timer)) ++ if (!delayed_work_pending(&ifp->dad_work)) + in6_ifa_hold(ifp); +- +- switch (what) { +- case AC_DAD: +- ifp->timer.function = addrconf_dad_timer; +- break; +- case AC_RS: +- ifp->timer.function = addrconf_rs_timer; +- break; +- default: +- break; +- } +- ifp->timer.expires = jiffies + when; +- add_timer(&ifp->timer); ++ mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); + } + + static int snmp6_alloc_dev(struct inet6_dev *idev) +@@ -326,6 +323,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) + + WARN_ON(!list_empty(&idev->addr_list)); + WARN_ON(idev->mc_list != NULL); ++ WARN_ON(timer_pending(&idev->rs_timer)); + + #ifdef NET_REFCNT_DEBUG + pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL"); +@@ -357,7 +355,8 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) + rwlock_init(&ndev->lock); + ndev->dev = dev; + INIT_LIST_HEAD(&ndev->addr_list); +- ++ setup_timer(&ndev->rs_timer, addrconf_rs_timer, ++ (unsigned long)ndev); + memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); + ndev->cnf.mtu6 = dev->mtu; + ndev->cnf.sysctl = NULL; +@@ -776,8 +775,9 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) + + in6_dev_put(ifp->idev); + +- if (del_timer(&ifp->timer)) +- pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); ++ if (cancel_delayed_work(&ifp->dad_work)) ++ pr_notice("delayed DAD work was pending while freeing ifa=%p\n", ++ ifp); + + if (ifp->state != INET6_IFADDR_STATE_DEAD) { + pr_warn("Freeing alive inet6 address %p\n", ifp); +@@ -869,9 +869,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, + + spin_lock_init(&ifa->lock); + spin_lock_init(&ifa->state_lock); +- init_timer(&ifa->timer); ++ INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work); + INIT_HLIST_NODE(&ifa->addr_lst); +- ifa->timer.data = (unsigned long) ifa; + ifa->scope = scope; + ifa->prefix_len = pfxlen; + ifa->flags = flags | IFA_F_TENTATIVE; +@@ -930,6 +929,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) + int deleted = 0, onlink = 0; + unsigned long expires = jiffies; + ++ ASSERT_RTNL(); ++ + spin_lock_bh(&ifp->state_lock); + state = ifp->state; + ifp->state = INET6_IFADDR_STATE_DEAD; +@@ -994,7 +995,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) + } + write_unlock_bh(&idev->lock); + +- addrconf_del_timer(ifp); ++ addrconf_del_dad_work(ifp); + + ipv6_ifa_notify(RTM_DELADDR, ifp); + +@@ -1617,7 +1618,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) + { + if (ifp->flags&IFA_F_PERMANENT) { + spin_lock_bh(&ifp->lock); +- addrconf_del_timer(ifp); ++ addrconf_del_dad_work(ifp); + ifp->flags |= IFA_F_TENTATIVE; + if (dad_failed) + ifp->flags |= IFA_F_DADFAILED; +@@ -1640,20 +1641,21 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) + } + ipv6_del_addr(ifp); + #endif +- } else ++ } else { + ipv6_del_addr(ifp); ++ } + } + + static int addrconf_dad_end(struct inet6_ifaddr *ifp) + { + int err = -ENOENT; + +- spin_lock(&ifp->state_lock); ++ spin_lock_bh(&ifp->state_lock); + if (ifp->state == INET6_IFADDR_STATE_DAD) { + ifp->state = INET6_IFADDR_STATE_POSTDAD; + err = 0; + } +- spin_unlock(&ifp->state_lock); ++ spin_unlock_bh(&ifp->state_lock); + + return err; + } +@@ -1686,11 +1688,17 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) + } + } + +- addrconf_dad_stop(ifp, 1); +-} ++ spin_lock_bh(&ifp->state_lock); ++ /* transition from _POSTDAD to _ERRDAD */ ++ ifp->state = INET6_IFADDR_STATE_ERRDAD; ++ spin_unlock_bh(&ifp->state_lock); + +-/* Join to solicited addr multicast group. */ ++ addrconf_mod_dad_work(ifp, 0); ++ in6_ifa_put(ifp); ++} + ++/* Join to solicited addr multicast group. ++ * caller must hold RTNL */ + void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) + { + struct in6_addr maddr; +@@ -1702,6 +1710,7 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) + ipv6_dev_mc_inc(dev, &maddr); + } + ++/* caller must hold RTNL */ + void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) + { + struct in6_addr maddr; +@@ -1713,9 +1722,11 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) + __ipv6_dev_mc_dec(idev, &maddr); + } + ++/* caller must hold RTNL */ + static void addrconf_join_anycast(struct inet6_ifaddr *ifp) + { + struct in6_addr addr; ++ + if (ifp->prefix_len == 127) /* RFC 6164 */ + return; + ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); +@@ -1724,9 +1735,11 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp) + ipv6_dev_ac_inc(ifp->idev->dev, &addr); + } + ++/* caller must hold RTNL */ + static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) + { + struct in6_addr addr; ++ + if (ifp->prefix_len == 127) /* RFC 6164 */ + return; + ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); +@@ -2361,7 +2374,7 @@ ok: + } + #endif + in6_ifa_put(ifp); +- addrconf_verify(0); ++ addrconf_verify(); + } + } + inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo); +@@ -2504,7 +2517,7 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p + */ + addrconf_dad_start(ifp); + in6_ifa_put(ifp); +- addrconf_verify(0); ++ addrconf_verify_rtnl(); + return 0; + } + +@@ -2696,7 +2709,7 @@ static void init_loopback(struct net_device *dev) + * lo device down, release this obsolete dst and + * reallocate a new router for ifa. + */ +- if (sp_ifa->rt->dst.obsolete > 0) { ++ if (!atomic_read(&sp_ifa->rt->rt6i_ref)) { + ip6_rt_put(sp_ifa->rt); + sp_ifa->rt = NULL; + } else { +@@ -3085,7 +3098,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) + hlist_for_each_entry_rcu(ifa, h, addr_lst) { + if (ifa->idev == idev) { + hlist_del_init_rcu(&ifa->addr_lst); +- addrconf_del_timer(ifa); ++ addrconf_del_dad_work(ifa); + goto restart; + } + } +@@ -3094,6 +3107,8 @@ static int addrconf_ifdown(struct net_device *dev, int how) + + write_lock_bh(&idev->lock); + ++ addrconf_del_rs_timer(idev); ++ + /* Step 2: clear flags for stateless addrconf */ + if (!how) + idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); +@@ -3123,7 +3138,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) + while (!list_empty(&idev->addr_list)) { + ifa = list_first_entry(&idev->addr_list, + struct inet6_ifaddr, if_list); +- addrconf_del_timer(ifa); ++ addrconf_del_dad_work(ifa); + + list_del(&ifa->if_list); + +@@ -3165,10 +3180,10 @@ static int addrconf_ifdown(struct net_device *dev, int how) + + static void addrconf_rs_timer(unsigned long data) + { +- struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; +- struct inet6_dev *idev = ifp->idev; ++ struct inet6_dev *idev = (struct inet6_dev *)data; ++ struct in6_addr lladdr; + +- read_lock(&idev->lock); ++ write_lock(&idev->lock); + if (idev->dead || !(idev->if_flags & IF_READY)) + goto out; + +@@ -3179,18 +3194,19 @@ static void addrconf_rs_timer(unsigned long data) + if (idev->if_flags & IF_RA_RCVD) + goto out; + +- spin_lock(&ifp->lock); +- if (ifp->probes++ < idev->cnf.rtr_solicits) { +- /* The wait after the last probe can be shorter */ +- addrconf_mod_timer(ifp, AC_RS, +- (ifp->probes == idev->cnf.rtr_solicits) ? +- idev->cnf.rtr_solicit_delay : +- idev->cnf.rtr_solicit_interval); +- spin_unlock(&ifp->lock); ++ if (idev->rs_probes++ < idev->cnf.rtr_solicits) { ++ if (!__ipv6_get_lladdr(idev, &lladdr, IFA_F_TENTATIVE)) ++ ndisc_send_rs(idev->dev, &lladdr, ++ &in6addr_linklocal_allrouters); ++ else ++ goto out; + +- ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); ++ /* The wait after the last probe can be shorter */ ++ addrconf_mod_rs_timer(idev, (idev->rs_probes == ++ idev->cnf.rtr_solicits) ? ++ idev->cnf.rtr_solicit_delay : ++ idev->cnf.rtr_solicit_interval); + } else { +- spin_unlock(&ifp->lock); + /* + * Note: we do not support deprecated "all on-link" + * assumption any longer. +@@ -3199,8 +3215,8 @@ static void addrconf_rs_timer(unsigned long data) + } + + out: +- read_unlock(&idev->lock); +- in6_ifa_put(ifp); ++ write_unlock(&idev->lock); ++ in6_dev_put(idev); + } + + /* +@@ -3216,11 +3232,11 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp) + else + rand_num = net_random() % (idev->cnf.rtr_solicit_delay ? : 1); + +- ifp->probes = idev->cnf.dad_transmits; +- addrconf_mod_timer(ifp, AC_DAD, rand_num); ++ ifp->dad_probes = idev->cnf.dad_transmits; ++ addrconf_mod_dad_work(ifp, rand_num); + } + +-static void addrconf_dad_start(struct inet6_ifaddr *ifp) ++static void addrconf_dad_begin(struct inet6_ifaddr *ifp) + { + struct inet6_dev *idev = ifp->idev; + struct net_device *dev = idev->dev; +@@ -3272,57 +3288,105 @@ out: + read_unlock_bh(&idev->lock); + } + +-static void addrconf_dad_timer(unsigned long data) ++static void addrconf_dad_start(struct inet6_ifaddr *ifp) + { +- struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; ++ bool begin_dad = false; ++ ++ spin_lock_bh(&ifp->state_lock); ++ if (ifp->state != INET6_IFADDR_STATE_DEAD) { ++ ifp->state = INET6_IFADDR_STATE_PREDAD; ++ begin_dad = true; ++ } ++ spin_unlock_bh(&ifp->state_lock); ++ ++ if (begin_dad) ++ addrconf_mod_dad_work(ifp, 0); ++} ++ ++static void addrconf_dad_work(struct work_struct *w) ++{ ++ struct inet6_ifaddr *ifp = container_of(to_delayed_work(w), ++ struct inet6_ifaddr, ++ dad_work); + struct inet6_dev *idev = ifp->idev; + struct in6_addr mcaddr; + +- if (!ifp->probes && addrconf_dad_end(ifp)) ++ enum { ++ DAD_PROCESS, ++ DAD_BEGIN, ++ DAD_ABORT, ++ } action = DAD_PROCESS; ++ ++ rtnl_lock(); ++ ++ spin_lock_bh(&ifp->state_lock); ++ if (ifp->state == INET6_IFADDR_STATE_PREDAD) { ++ action = DAD_BEGIN; ++ ifp->state = INET6_IFADDR_STATE_DAD; ++ } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) { ++ action = DAD_ABORT; ++ ifp->state = INET6_IFADDR_STATE_POSTDAD; ++ } ++ spin_unlock_bh(&ifp->state_lock); ++ ++ if (action == DAD_BEGIN) { ++ addrconf_dad_begin(ifp); ++ goto out; ++ } else if (action == DAD_ABORT) { ++ in6_ifa_hold(ifp); ++ addrconf_dad_stop(ifp, 1); + goto out; ++ } + +- read_lock(&idev->lock); ++ if (!ifp->dad_probes && addrconf_dad_end(ifp)) ++ goto out; ++ ++ write_lock_bh(&idev->lock); + if (idev->dead || !(idev->if_flags & IF_READY)) { +- read_unlock(&idev->lock); ++ write_unlock_bh(&idev->lock); + goto out; + } + + spin_lock(&ifp->lock); + if (ifp->state == INET6_IFADDR_STATE_DEAD) { + spin_unlock(&ifp->lock); +- read_unlock(&idev->lock); ++ write_unlock_bh(&idev->lock); + goto out; + } + +- if (ifp->probes == 0) { ++ if (ifp->dad_probes == 0) { + /* + * DAD was successful + */ + + ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); + spin_unlock(&ifp->lock); +- read_unlock(&idev->lock); ++ write_unlock_bh(&idev->lock); + + addrconf_dad_completed(ifp); + + goto out; + } + +- ifp->probes--; +- addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time); ++ ifp->dad_probes--; ++ addrconf_mod_dad_work(ifp, ifp->idev->nd_parms->retrans_time); + spin_unlock(&ifp->lock); +- read_unlock(&idev->lock); ++ write_unlock_bh(&idev->lock); + + /* send a neighbour solicitation for our addr */ + addrconf_addr_solict_mult(&ifp->addr, &mcaddr); + ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any); + out: + in6_ifa_put(ifp); ++ rtnl_unlock(); + } + + static void addrconf_dad_completed(struct inet6_ifaddr *ifp) + { + struct net_device *dev = ifp->idev->dev; ++ struct in6_addr lladdr; ++ ++ addrconf_del_dad_work(ifp); + + /* + * Configure the address for reception. Now it is valid. +@@ -3343,13 +3407,20 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) + * [...] as part of DAD [...] there is no need + * to delay again before sending the first RS + */ +- ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); ++ if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) ++ ndisc_send_rs(dev, &lladdr, ++ &in6addr_linklocal_allrouters); ++ else ++ return; + +- spin_lock_bh(&ifp->lock); +- ifp->probes = 1; ++ write_lock_bh(&ifp->idev->lock); ++ spin_lock(&ifp->lock); ++ ifp->idev->rs_probes = 1; + ifp->idev->if_flags |= IF_RS_SENT; +- addrconf_mod_timer(ifp, AC_RS, ifp->idev->cnf.rtr_solicit_interval); +- spin_unlock_bh(&ifp->lock); ++ addrconf_mod_rs_timer(ifp->idev, ++ ifp->idev->cnf.rtr_solicit_interval); ++ spin_unlock(&ifp->lock); ++ write_unlock_bh(&ifp->idev->lock); + } + } + +@@ -3547,23 +3618,23 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr) + * Periodic address status verification + */ + +-static void addrconf_verify(unsigned long foo) ++static void addrconf_verify_rtnl(void) + { + unsigned long now, next, next_sec, next_sched; + struct inet6_ifaddr *ifp; + int i; + ++ ASSERT_RTNL(); ++ + rcu_read_lock_bh(); +- spin_lock(&addrconf_verify_lock); + now = jiffies; + next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); + +- del_timer(&addr_chk_timer); ++ cancel_delayed_work(&addr_chk_work); + + for (i = 0; i < IN6_ADDR_HSIZE; i++) { + restart: +- hlist_for_each_entry_rcu_bh(ifp, +- &inet6_addr_lst[i], addr_lst) { ++ hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) { + unsigned long age; + + if (ifp->flags & IFA_F_PERMANENT) +@@ -3654,13 +3725,22 @@ restart: + + ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", + now, next, next_sec, next_sched)); +- +- addr_chk_timer.expires = next_sched; +- add_timer(&addr_chk_timer); +- spin_unlock(&addrconf_verify_lock); ++ mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now); + rcu_read_unlock_bh(); + } + ++static void addrconf_verify_work(struct work_struct *w) ++{ ++ rtnl_lock(); ++ addrconf_verify_rtnl(); ++ rtnl_unlock(); ++} ++ ++static void addrconf_verify(void) ++{ ++ mod_delayed_work(addrconf_wq, &addr_chk_work, 0); ++} ++ + static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local) + { + struct in6_addr *pfx = NULL; +@@ -3712,6 +3792,8 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags, + clock_t expires; + unsigned long timeout; + ++ ASSERT_RTNL(); ++ + if (!valid_lft || (prefered_lft > valid_lft)) + return -EINVAL; + +@@ -3745,7 +3827,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags, + + addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev, + expires, flags); +- addrconf_verify(0); ++ addrconf_verify_rtnl(); + + return 0; + } +@@ -4354,6 +4436,8 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) + bool update_rs = false; + struct in6_addr ll_addr; + ++ ASSERT_RTNL(); ++ + if (token == NULL) + return -EINVAL; + if (ipv6_addr_any(token)) +@@ -4399,6 +4483,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) + } + + write_unlock_bh(&idev->lock); ++ addrconf_verify_rtnl(); + return 0; + } + +@@ -4600,6 +4685,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) + { + struct net *net = dev_net(ifp->idev->dev); + ++ if (event) ++ ASSERT_RTNL(); ++ + inet6_ifa_notify(event ? : RTM_NEWADDR, ifp); + + switch (event) { +@@ -5128,6 +5216,12 @@ int __init addrconf_init(void) + if (err < 0) + goto out_addrlabel; + ++ addrconf_wq = create_workqueue("ipv6_addrconf"); ++ if (!addrconf_wq) { ++ err = -ENOMEM; ++ goto out_nowq; ++ } ++ + /* The addrconf netdev notifier requires that loopback_dev + * has it's ipv6 private information allocated and setup + * before it can bring up and give link-local addresses +@@ -5158,7 +5252,7 @@ int __init addrconf_init(void) + + register_netdevice_notifier(&ipv6_dev_notf); + +- addrconf_verify(0); ++ addrconf_verify(); + + err = rtnl_af_register(&inet6_ops); + if (err < 0) +@@ -5189,6 +5283,8 @@ errout: + errout_af: + unregister_netdevice_notifier(&ipv6_dev_notf); + errlo: ++ destroy_workqueue(addrconf_wq); ++out_nowq: + unregister_pernet_subsys(&addrconf_ops); + out_addrlabel: + ipv6_addr_label_cleanup(); +@@ -5224,7 +5320,8 @@ void addrconf_cleanup(void) + for (i = 0; i < IN6_ADDR_HSIZE; i++) + WARN_ON(!hlist_empty(&inet6_addr_lst[i])); + spin_unlock_bh(&addrconf_hash_lock); +- +- del_timer(&addr_chk_timer); ++ cancel_delayed_work(&addr_chk_work); + rtnl_unlock(); ++ ++ destroy_workqueue(addrconf_wq); + } +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c +index a944f1313c5f..9443af7d7ecb 100644 +--- a/net/ipv6/af_inet6.c ++++ b/net/ipv6/af_inet6.c +@@ -900,6 +900,9 @@ static int __init inet6_init(void) + err = ip6_route_init(); + if (err) + goto ip6_route_fail; ++ err = ndisc_late_init(); ++ if (err) ++ goto ndisc_late_fail; + err = ip6_flowlabel_init(); + if (err) + goto ip6_flowlabel_fail; +@@ -960,6 +963,8 @@ ipv6_exthdrs_fail: + addrconf_fail: + ip6_flowlabel_cleanup(); + ip6_flowlabel_fail: ++ ndisc_late_cleanup(); ++ndisc_late_fail: + ip6_route_cleanup(); + ip6_route_fail: + #ifdef CONFIG_PROC_FS +@@ -1020,6 +1025,7 @@ static void __exit inet6_exit(void) + ipv6_exthdrs_exit(); + addrconf_cleanup(); + ip6_flowlabel_cleanup(); ++ ndisc_late_cleanup(); + ip6_route_cleanup(); + #ifdef CONFIG_PROC_FS + +diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c +index 5a80f15a9de2..c59083c2a656 100644 +--- a/net/ipv6/anycast.c ++++ b/net/ipv6/anycast.c +@@ -77,6 +77,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) + pac->acl_next = NULL; + pac->acl_addr = *addr; + ++ rtnl_lock(); + rcu_read_lock(); + if (ifindex == 0) { + struct rt6_info *rt; +@@ -137,6 +138,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) + + error: + rcu_read_unlock(); ++ rtnl_unlock(); + if (pac) + sock_kfree_s(sk, pac, sizeof(*pac)); + return err; +@@ -171,13 +173,17 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) + + spin_unlock_bh(&ipv6_sk_ac_lock); + ++ rtnl_lock(); + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, pac->acl_ifindex); + if (dev) + ipv6_dev_ac_dec(dev, &pac->acl_addr); + rcu_read_unlock(); ++ rtnl_unlock(); + + sock_kfree_s(sk, pac, sizeof(*pac)); ++ if (!dev) ++ return -ENODEV; + return 0; + } + +@@ -198,6 +204,7 @@ void ipv6_sock_ac_close(struct sock *sk) + spin_unlock_bh(&ipv6_sk_ac_lock); + + prev_index = 0; ++ rtnl_lock(); + rcu_read_lock(); + while (pac) { + struct ipv6_ac_socklist *next = pac->acl_next; +@@ -212,6 +219,7 @@ void ipv6_sock_ac_close(struct sock *sk) + pac = next; + } + rcu_read_unlock(); ++ rtnl_unlock(); + } + + static void aca_put(struct ifacaddr6 *ac) +@@ -233,6 +241,8 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr) + struct rt6_info *rt; + int err; + ++ ASSERT_RTNL(); ++ + idev = in6_dev_get(dev); + + if (idev == NULL) +@@ -302,6 +312,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr) + { + struct ifacaddr6 *aca, *prev_aca; + ++ ASSERT_RTNL(); ++ + write_lock_bh(&idev->lock); + prev_aca = NULL; + for (aca = idev->ac_list; aca; aca = aca->aca_next) { +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c +index 7eb7267861ac..603f251b6ca2 100644 +--- a/net/ipv6/ip6_gre.c ++++ b/net/ipv6/ip6_gre.c +@@ -890,7 +890,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) + encap_limit = t->parms.encap_limit; + + memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); +- fl6.flowi6_proto = skb->protocol; + + err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu); + +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index 31bab1ab007c..12984e6794b9 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -950,12 +950,21 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, + struct ipv6_tel_txoption opt; + struct dst_entry *dst = NULL, *ndst = NULL; + struct net_device *tdev; ++ bool use_cache = false; + int mtu; + unsigned int max_headroom = sizeof(struct ipv6hdr); + u8 proto; + int err = -1; + +- if (!fl6->flowi6_mark) ++ if (!(t->parms.flags & ++ (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { ++ /* enable the cache only only if the routing decision does ++ * not depend on the current inner header value ++ */ ++ use_cache = true; ++ } ++ ++ if (use_cache) + dst = ip6_tnl_dst_check(t); + if (!dst) { + ndst = ip6_route_output(net, NULL, fl6); +@@ -1012,7 +1021,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, + skb = new_skb; + } + skb_dst_drop(skb); +- if (fl6->flowi6_mark) { ++ if (!use_cache) { + skb_dst_set(skb, dst); + ndst = NULL; + } else { +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c +index 107f75283b1b..8344f686335d 100644 +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -2275,8 +2275,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, + return 1; + } + +-int ip6mr_get_route(struct net *net, +- struct sk_buff *skb, struct rtmsg *rtm, int nowait) ++int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, ++ int nowait, u32 portid) + { + int err; + struct mr6_table *mrt; +@@ -2321,6 +2321,7 @@ int ip6mr_get_route(struct net *net, + return -ENOMEM; + } + ++ NETLINK_CB(skb2).portid = portid; + skb_reset_transport_header(skb2); + + skb_put(skb2, sizeof(struct ipv6hdr)); +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c +index 7ba6180ff8bd..cf16eb484cfe 100644 +--- a/net/ipv6/mcast.c ++++ b/net/ipv6/mcast.c +@@ -157,6 +157,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) + mc_lst->next = NULL; + mc_lst->addr = *addr; + ++ rtnl_lock(); + rcu_read_lock(); + if (ifindex == 0) { + struct rt6_info *rt; +@@ -170,6 +171,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) + + if (dev == NULL) { + rcu_read_unlock(); ++ rtnl_unlock(); + sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); + return -ENODEV; + } +@@ -187,6 +189,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) + + if (err) { + rcu_read_unlock(); ++ rtnl_unlock(); + sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); + return err; + } +@@ -197,6 +200,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) + spin_unlock(&ipv6_sk_mc_lock); + + rcu_read_unlock(); ++ rtnl_unlock(); + + return 0; + } +@@ -214,6 +218,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) + if (!ipv6_addr_is_multicast(addr)) + return -EINVAL; + ++ rtnl_lock(); + spin_lock(&ipv6_sk_mc_lock); + for (lnk = &np->ipv6_mc_list; + (mc_lst = rcu_dereference_protected(*lnk, +@@ -237,12 +242,15 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) + } else + (void) ip6_mc_leave_src(sk, mc_lst, NULL); + rcu_read_unlock(); ++ rtnl_unlock(); ++ + atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); + kfree_rcu(mc_lst, rcu); + return 0; + } + } + spin_unlock(&ipv6_sk_mc_lock); ++ rtnl_unlock(); + + return -EADDRNOTAVAIL; + } +@@ -287,6 +295,7 @@ void ipv6_sock_mc_close(struct sock *sk) + if (!rcu_access_pointer(np->ipv6_mc_list)) + return; + ++ rtnl_lock(); + spin_lock(&ipv6_sk_mc_lock); + while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list, + lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) { +@@ -313,6 +322,7 @@ void ipv6_sock_mc_close(struct sock *sk) + spin_lock(&ipv6_sk_mc_lock); + } + spin_unlock(&ipv6_sk_mc_lock); ++ rtnl_unlock(); + } + + int ip6_mc_source(int add, int omode, struct sock *sk, +@@ -830,6 +840,8 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) + struct ifmcaddr6 *mc; + struct inet6_dev *idev; + ++ ASSERT_RTNL(); ++ + /* we need to take a reference on idev */ + idev = in6_dev_get(dev); + +@@ -901,6 +913,8 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) + { + struct ifmcaddr6 *ma, **map; + ++ ASSERT_RTNL(); ++ + write_lock_bh(&idev->lock); + for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) { + if (ipv6_addr_equal(&ma->mca_addr, addr)) { +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c +index deedf7ddbc6e..de10ccfe7f7e 100644 +--- a/net/ipv6/ndisc.c ++++ b/net/ipv6/ndisc.c +@@ -1716,24 +1716,28 @@ int __init ndisc_init(void) + if (err) + goto out_unregister_pernet; + #endif +- err = register_netdevice_notifier(&ndisc_netdev_notifier); +- if (err) +- goto out_unregister_sysctl; + out: + return err; + +-out_unregister_sysctl: + #ifdef CONFIG_SYSCTL +- neigh_sysctl_unregister(&nd_tbl.parms); + out_unregister_pernet: +-#endif + unregister_pernet_subsys(&ndisc_net_ops); + goto out; ++#endif + } + +-void ndisc_cleanup(void) ++int __init ndisc_late_init(void) ++{ ++ return register_netdevice_notifier(&ndisc_netdev_notifier); ++} ++ ++void ndisc_late_cleanup(void) + { + unregister_netdevice_notifier(&ndisc_netdev_notifier); ++} ++ ++void ndisc_cleanup(void) ++{ + #ifdef CONFIG_SYSCTL + neigh_sysctl_unregister(&nd_tbl.parms); + #endif +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c +index 7cd623588532..c11a40caf5b6 100644 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c +@@ -569,6 +569,9 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) + if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) + return skb; + ++ if (!net->nf_frag.frags.high_thresh) ++ return skb; ++ + clone = skb_clone(skb, GFP_ATOMIC); + if (clone == NULL) { + pr_debug("Can't clone skb\n"); +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c +index a1fb511da3b5..1a5318efa31c 100644 +--- a/net/ipv6/reassembly.c ++++ b/net/ipv6/reassembly.c +@@ -556,6 +556,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb) + return 1; + } + ++ if (!net->ipv6.frags.high_thresh) ++ goto fail_mem; ++ + evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false); + if (evicted) + IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), +@@ -575,6 +578,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) + return ret; + } + ++fail_mem: + IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); + kfree_skb(skb); + return -1; +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 6ebefd46f718..fb5010c27a22 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -2536,7 +2536,9 @@ static int rt6_fill_node(struct net *net, + if (iif) { + #ifdef CONFIG_IPV6_MROUTE + if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { +- int err = ip6mr_get_route(net, skb, rtm, nowait); ++ int err = ip6mr_get_route(net, skb, rtm, nowait, ++ portid); ++ + if (err <= 0) { + if (!nowait) { + if (err == 0) +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 41c026f11edc..70b10ed169ae 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -902,8 +902,14 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) + static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, + struct request_sock *req) + { ++ /* RFC 7323 2.3 ++ * The window field (SEG.WND) of every outgoing segment, with the ++ * exception of segments, MUST be right-shifted by ++ * Rcv.Wind.Shift bits: ++ */ + tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, +- req->rcv_wnd, tcp_time_stamp, req->ts_recent, ++ req->rcv_wnd >> inet_rsk(req)->rcv_wscale, ++ tcp_time_stamp, req->ts_recent, + tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0); + } + +@@ -1324,7 +1330,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) + goto discard; + #endif + +- if (sk_filter(sk, skb)) ++ if (tcp_filter(sk, skb)) + goto discard; + + /* +@@ -1495,8 +1501,10 @@ process: + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) + goto discard_and_relse; + +- if (sk_filter(sk, skb)) ++ if (tcp_filter(sk, skb)) + goto discard_and_relse; ++ th = (const struct tcphdr *)skb->data; ++ hdr = ipv6_hdr(skb); + + skb->dev = NULL; + +diff --git a/net/irda/iriap.c b/net/irda/iriap.c +index e1b37f5a2691..bd42516e268b 100644 +--- a/net/irda/iriap.c ++++ b/net/irda/iriap.c +@@ -191,8 +191,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv, + + self->magic = IAS_MAGIC; + self->mode = mode; +- if (mode == IAS_CLIENT) +- iriap_register_lsap(self, slsap_sel, mode); ++ if (mode == IAS_CLIENT) { ++ if (iriap_register_lsap(self, slsap_sel, mode)) { ++ kfree(self); ++ return NULL; ++ } ++ } + + self->confirm = callback; + self->priv = priv; +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index e922bf3f422c..11a10d580d9e 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -1072,7 +1072,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) + + /* free all potentially still buffered bcast frames */ + local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); +- skb_queue_purge(&sdata->u.ap.ps.bc_buf); ++ ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf); + + ieee80211_vif_copy_chanctx_to_vlans(sdata, true); + ieee80211_vif_release_channel(sdata); +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index cd60be8d9aba..f8c7f46008ee 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -1952,16 +1952,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) + if (!(status->rx_flags & IEEE80211_RX_AMSDU)) + return RX_CONTINUE; + +- if (ieee80211_has_a4(hdr->frame_control) && +- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && +- !rx->sdata->u.vlan.sta) +- return RX_DROP_UNUSABLE; ++ if (unlikely(ieee80211_has_a4(hdr->frame_control))) { ++ switch (rx->sdata->vif.type) { ++ case NL80211_IFTYPE_AP_VLAN: ++ if (!rx->sdata->u.vlan.sta) ++ return RX_DROP_UNUSABLE; ++ break; ++ case NL80211_IFTYPE_STATION: ++ if (!rx->sdata->u.mgd.use_4addr) ++ return RX_DROP_UNUSABLE; ++ break; ++ default: ++ return RX_DROP_UNUSABLE; ++ } ++ } + +- if (is_multicast_ether_addr(hdr->addr1) && +- ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && +- rx->sdata->u.vlan.sta) || +- (rx->sdata->vif.type == NL80211_IFTYPE_STATION && +- rx->sdata->u.mgd.use_4addr))) ++ if (is_multicast_ether_addr(hdr->addr1)) + return RX_DROP_UNUSABLE; + + skb->dev = dev; +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index e960fbe9e271..129905342fc3 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -335,7 +335,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) + skb = skb_dequeue(&ps->bc_buf); + if (skb) { + purged++; +- dev_kfree_skb(skb); ++ ieee80211_free_txskb(&local->hw, skb); + } + total += skb_queue_len(&ps->bc_buf); + } +@@ -417,7 +417,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) + if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { + ps_dbg(tx->sdata, + "BC TX buffer full - dropping the oldest frame\n"); +- dev_kfree_skb(skb_dequeue(&ps->bc_buf)); ++ ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf)); + } else + tx->local->total_ps_buffered++; + +@@ -2711,7 +2711,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, + sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); + if (!ieee80211_tx_prepare(sdata, &tx, skb)) + break; +- dev_kfree_skb_any(skb); ++ ieee80211_free_txskb(hw, skb); + } + + info = IEEE80211_SKB_CB(skb); +diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c +index 50a15944c6c1..3032ede74e48 100644 +--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c ++++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c +@@ -373,6 +373,20 @@ static const char *const tcp_state_name_table[IP_VS_TCP_S_LAST+1] = { + [IP_VS_TCP_S_LAST] = "BUG!", + }; + ++static const bool tcp_state_active_table[IP_VS_TCP_S_LAST] = { ++ [IP_VS_TCP_S_NONE] = false, ++ [IP_VS_TCP_S_ESTABLISHED] = true, ++ [IP_VS_TCP_S_SYN_SENT] = true, ++ [IP_VS_TCP_S_SYN_RECV] = true, ++ [IP_VS_TCP_S_FIN_WAIT] = false, ++ [IP_VS_TCP_S_TIME_WAIT] = false, ++ [IP_VS_TCP_S_CLOSE] = false, ++ [IP_VS_TCP_S_CLOSE_WAIT] = false, ++ [IP_VS_TCP_S_LAST_ACK] = false, ++ [IP_VS_TCP_S_LISTEN] = false, ++ [IP_VS_TCP_S_SYNACK] = true, ++}; ++ + #define sNO IP_VS_TCP_S_NONE + #define sES IP_VS_TCP_S_ESTABLISHED + #define sSS IP_VS_TCP_S_SYN_SENT +@@ -396,6 +410,13 @@ static const char * tcp_state_name(int state) + return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?"; + } + ++static bool tcp_state_active(int state) ++{ ++ if (state >= IP_VS_TCP_S_LAST) ++ return false; ++ return tcp_state_active_table[state]; ++} ++ + static struct tcp_states_t tcp_states [] = { + /* INPUT */ + /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ +@@ -518,12 +539,12 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, + + if (dest) { + if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && +- (new_state != IP_VS_TCP_S_ESTABLISHED)) { ++ !tcp_state_active(new_state)) { + atomic_dec(&dest->activeconns); + atomic_inc(&dest->inactconns); + cp->flags |= IP_VS_CONN_F_INACTIVE; + } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) && +- (new_state == IP_VS_TCP_S_ESTABLISHED)) { ++ tcp_state_active(new_state)) { + atomic_inc(&dest->activeconns); + atomic_dec(&dest->inactconns); + cp->flags &= ~IP_VS_CONN_F_INACTIVE; +diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c +index 3b18dd1be7d9..07ed65af05a6 100644 +--- a/net/netfilter/nf_log.c ++++ b/net/netfilter/nf_log.c +@@ -253,7 +253,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write, + size_t size = *lenp; + int r = 0; + int tindex = (unsigned long)table->extra1; +- struct net *net = current->nsproxy->net_ns; ++ struct net *net = table->extra2; + + if (write) { + if (size > sizeof(buf)) +@@ -306,7 +306,6 @@ static int netfilter_log_sysctl_init(struct net *net) + 3, "%d", i); + nf_log_sysctl_table[i].procname = + nf_log_sysctl_fnames[i]; +- nf_log_sysctl_table[i].data = NULL; + nf_log_sysctl_table[i].maxlen = + NFLOGGER_NAME_LEN * sizeof(char); + nf_log_sysctl_table[i].mode = 0644; +@@ -317,6 +316,9 @@ static int netfilter_log_sysctl_init(struct net *net) + } + } + ++ for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) ++ table[i].extra2 = net; ++ + net->nf.nf_log_dir_header = register_net_sysctl(net, + "net/netfilter/nf_log", + table); +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 2d454a235e84..24f006623f7c 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -3384,6 +3384,7 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void + } + if (msg == NETDEV_UNREGISTER) { + packet_cached_dev_reset(po); ++ fanout_release(sk); + po->ifindex = -1; + if (po->prot_hook.dev) + dev_put(po->prot_hook.dev); +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index d9cbecb62aca..df938b2ab848 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -3428,6 +3428,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + ++ /* Report violation if chunk len overflows */ ++ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); ++ if (ch_end > skb_tail_pointer(skb)) ++ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, ++ commands); ++ + /* Now that we know we at least have a chunk header, + * do things that are type appropriate. + */ +@@ -3459,12 +3465,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, + } + } + +- /* Report violation if chunk len overflows */ +- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); +- if (ch_end > skb_tail_pointer(skb)) +- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, +- commands); +- + ch = (sctp_chunkhdr_t *) ch_end; + } while (ch_end < skb_tail_pointer(skb)); + +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index bdc3fb66717d..ede7c540ea24 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -1231,9 +1231,12 @@ static int __sctp_connect(struct sock* sk, + + timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); + +- err = sctp_wait_for_connect(asoc, &timeo); +- if ((err == 0 || err == -EINPROGRESS) && assoc_id) ++ if (assoc_id) + *assoc_id = asoc->assoc_id; ++ err = sctp_wait_for_connect(asoc, &timeo); ++ /* Note: the asoc may be freed after the return of ++ * sctp_wait_for_connect. ++ */ + + /* Don't free association on exit. */ + asoc = NULL; +@@ -4259,7 +4262,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, + static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, + int __user *optlen) + { +- if (len <= 0) ++ if (len == 0) + return -EINVAL; + if (len > sizeof(struct sctp_event_subscribe)) + len = sizeof(struct sctp_event_subscribe); +@@ -5770,6 +5773,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, + if (get_user(len, optlen)) + return -EFAULT; + ++ if (len < 0) ++ return -EINVAL; ++ + sctp_lock_sock(sk); + + switch (optname) { +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c +index 89a588b4478b..c996a71fc9f1 100644 +--- a/net/sunrpc/svc.c ++++ b/net/sunrpc/svc.c +@@ -1182,11 +1182,17 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) + *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); + + /* Encode reply */ +- if (rqstp->rq_dropme) { ++ if (*statp == rpc_drop_reply || ++ rqstp->rq_dropme) { + if (procp->pc_release) + procp->pc_release(rqstp, NULL, rqstp->rq_resp); + goto dropit; + } ++ if (*statp == rpc_autherr_badcred) { ++ if (procp->pc_release) ++ procp->pc_release(rqstp, NULL, rqstp->rq_resp); ++ goto err_bad_auth; ++ } + if (*statp == rpc_success && + (xdr = procp->pc_encode) && + !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) { +diff --git a/net/wireless/core.h b/net/wireless/core.h +index fd35dae547c4..d06da43e265f 100644 +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -69,6 +69,7 @@ struct cfg80211_registered_device { + struct list_head bss_list; + struct rb_root bss_tree; + u32 bss_generation; ++ u32 bss_entries; + struct cfg80211_scan_request *scan_req; /* protected by RTNL */ + struct cfg80211_sched_scan_request *sched_scan_req; + unsigned long suspend_at; +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index 81019ee3ddc8..15ef12732d28 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -55,7 +55,19 @@ + * also linked into the probe response struct. + */ + +-#define IEEE80211_SCAN_RESULT_EXPIRE (3 * HZ) ++ /** ++ * Limit the number of BSS entries stored in mac80211. Each one is ++ * a bit over 4k at most, so this limits to roughly 4-5M of memory. ++ * If somebody wants to really attack this though, they'd likely ++ * use small beacons, and only one type of frame, limiting each of ++ * the entries to a much smaller size (in order to generate more ++ * entries in total, so overhead is bigger.) ++ */ ++static int bss_entries_limit = 1000; ++module_param(bss_entries_limit, int, 0644); ++MODULE_PARM_DESC(bss_entries_limit,"limit to number of scan BSS entries (per wiphy, default 1000)"); ++ ++#define IEEE80211_SCAN_RESULT_EXPIRE (7 * HZ) + + static void bss_free(struct cfg80211_internal_bss *bss) + { +@@ -135,6 +148,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev, + + list_del_init(&bss->list); + rb_erase(&bss->rbn, &dev->bss_tree); ++ dev->bss_entries--; ++ WARN_ONCE((dev->bss_entries == 0) ^ list_empty(&dev->bss_list), ++ "rdev bss entries[%d]/list[empty:%d] corruption\n", ++ dev->bss_entries, list_empty(&dev->bss_list)); + bss_ref_put(dev, bss); + return true; + } +@@ -338,6 +355,40 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev) + __cfg80211_bss_expire(dev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE); + } + ++static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev) ++{ ++ struct cfg80211_internal_bss *bss, *oldest = NULL; ++ bool ret; ++ ++ lockdep_assert_held(&rdev->bss_lock); ++ ++ list_for_each_entry(bss, &rdev->bss_list, list) { ++ if (atomic_read(&bss->hold)) ++ continue; ++ ++ if (!list_empty(&bss->hidden_list) && ++ !bss->pub.hidden_beacon_bss) ++ continue; ++ ++ if (oldest && time_before(oldest->ts, bss->ts)) ++ continue; ++ oldest = bss; ++ } ++ ++ if (WARN_ON(!oldest)) ++ return false; ++ ++ /* ++ * The callers make sure to increase rdev->bss_generation if anything ++ * gets removed (and a new entry added), so there's no need to also do ++ * it here. ++ */ ++ ++ ret = __cfg80211_unlink_bss(rdev, oldest); ++ WARN_ON(!ret); ++ return ret; ++} ++ + const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) + { + while (len > 2 && ies[0] != eid) { +@@ -622,6 +673,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev, + const u8 *ie; + int i, ssidlen; + u8 fold = 0; ++ u32 n_entries = 0; + + ies = rcu_access_pointer(new->pub.beacon_ies); + if (WARN_ON(!ies)) +@@ -645,6 +697,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev, + /* This is the bad part ... */ + + list_for_each_entry(bss, &dev->bss_list, list) { ++ /* ++ * we're iterating all the entries anyway, so take the ++ * opportunity to validate the list length accounting ++ */ ++ n_entries++; ++ + if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) + continue; + if (bss->pub.channel != new->pub.channel) +@@ -674,6 +732,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev, + new->pub.beacon_ies); + } + ++ WARN_ONCE(n_entries != dev->bss_entries, ++ "rdev bss entries[%d]/list[len:%d] corruption\n", ++ dev->bss_entries, n_entries); ++ + return true; + } + +@@ -818,7 +880,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, + } + } + ++ if (dev->bss_entries >= bss_entries_limit && ++ !cfg80211_bss_expire_oldest(dev)) { ++ kfree(new); ++ goto drop; ++ } ++ + list_add_tail(&new->list, &dev->bss_list); ++ dev->bss_entries++; + rb_insert_bss(dev, new); + found = new; + } +diff --git a/security/keys/proc.c b/security/keys/proc.c +index 217b6855e815..374c3301b802 100644 +--- a/security/keys/proc.c ++++ b/security/keys/proc.c +@@ -188,7 +188,7 @@ static int proc_keys_show(struct seq_file *m, void *v) + struct timespec now; + unsigned long timo; + key_ref_t key_ref, skey_ref; +- char xbuf[12]; ++ char xbuf[16]; + int rc; + + key_ref = make_key_ref(key, 0); +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c +index 8eddece217bb..dfed3ef02475 100644 +--- a/sound/core/pcm_lib.c ++++ b/sound/core/pcm_lib.c +@@ -1856,10 +1856,10 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) + if (substream->timer_running) + snd_timer_interrupt(substream->timer, 1); + _end: ++ kill_fasync(&runtime->fasync, SIGIO, POLL_IN); + snd_pcm_stream_unlock_irqrestore(substream, flags); + if (runtime->transfer_ack_end) + runtime->transfer_ack_end(substream); +- kill_fasync(&runtime->fasync, SIGIO, POLL_IN); + } + + EXPORT_SYMBOL(snd_pcm_period_elapsed); +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c +index 500765f20843..3e9761685c8c 100644 +--- a/sound/core/rawmidi.c ++++ b/sound/core/rawmidi.c +@@ -1564,10 +1564,12 @@ static int snd_rawmidi_dev_register(struct snd_device *device) + } + list_add_tail(&rmidi->list, &snd_rawmidi_devices); + sprintf(name, "midiC%iD%i", rmidi->card->number, rmidi->device); ++ mutex_unlock(®ister_mutex); + if ((err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI, + rmidi->card, rmidi->device, + &snd_rawmidi_f_ops, rmidi, name)) < 0) { + snd_printk(KERN_ERR "unable to register rawmidi device %i:%i\n", rmidi->card->number, rmidi->device); ++ mutex_lock(®ister_mutex); + list_del(&rmidi->list); + mutex_unlock(®ister_mutex); + return err; +@@ -1575,6 +1577,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device) + if (rmidi->ops && rmidi->ops->dev_register && + (err = rmidi->ops->dev_register(rmidi)) < 0) { + snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device); ++ mutex_lock(®ister_mutex); + list_del(&rmidi->list); + mutex_unlock(®ister_mutex); + return err; +@@ -1603,7 +1606,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device) + } + } + #endif /* CONFIG_SND_OSSEMUL */ +- mutex_unlock(®ister_mutex); + sprintf(name, "midi%d", rmidi->device); + entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root); + if (entry) { +diff --git a/sound/core/timer.c b/sound/core/timer.c +index 3476895ee1fb..749857a889e6 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -291,8 +291,19 @@ int snd_timer_open(struct snd_timer_instance **ti, + } + timeri->slave_class = tid->dev_sclass; + timeri->slave_id = slave_id; +- if (list_empty(&timer->open_list_head) && timer->hw.open) +- timer->hw.open(timer); ++ ++ if (list_empty(&timer->open_list_head) && timer->hw.open) { ++ int err = timer->hw.open(timer); ++ if (err) { ++ kfree(timeri->owner); ++ kfree(timeri); ++ ++ module_put(timer->module); ++ mutex_unlock(®ister_mutex); ++ return err; ++ } ++ } ++ + list_add_tail(&timeri->open_list, &timer->open_list_head); + snd_timer_check_master(timeri); + mutex_unlock(®ister_mutex); +@@ -817,6 +828,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, + timer->tmr_subdevice = tid->subdevice; + if (id) + strlcpy(timer->id, id, sizeof(timer->id)); ++ timer->sticks = 1; + INIT_LIST_HEAD(&timer->device_list); + INIT_LIST_HEAD(&timer->open_list_head); + INIT_LIST_HEAD(&timer->active_list_head); +@@ -1922,19 +1934,23 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, + if (err < 0) + goto _error; + ++ mutex_lock(&tu->ioctl_lock); + if (tu->tread) { + if (copy_to_user(buffer, &tu->tqueue[tu->qhead++], + sizeof(struct snd_timer_tread))) { ++ mutex_unlock(&tu->ioctl_lock); + err = -EFAULT; + goto _error; + } + } else { + if (copy_to_user(buffer, &tu->queue[tu->qhead++], + sizeof(struct snd_timer_read))) { ++ mutex_unlock(&tu->ioctl_lock); + err = -EFAULT; + goto _error; + } + } ++ mutex_unlock(&tu->ioctl_lock); + + tu->qhead %= tu->queue_size; + +diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c +index 53754f5edeb1..097c8c4daaea 100644 +--- a/sound/pci/ali5451/ali5451.c ++++ b/sound/pci/ali5451/ali5451.c +@@ -1422,6 +1422,7 @@ snd_ali_playback_pointer(struct snd_pcm_substream *substream) + spin_unlock(&codec->reg_lock); + snd_ali_printk("playback pointer returned cso=%xh.\n", cso); + ++ cso %= runtime->buffer_size; + return cso; + } + +@@ -1442,6 +1443,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream) + cso = inw(ALI_REG(codec, ALI_CSO_ALPHA_FMS + 2)); + spin_unlock(&codec->reg_lock); + ++ cso %= runtime->buffer_size; + return cso; + } + +diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c +index eb05c7ed6d05..5dc6b23b634e 100644 +--- a/sound/soc/omap/omap-mcpdm.c ++++ b/sound/soc/omap/omap-mcpdm.c +@@ -393,8 +393,8 @@ static int omap_mcpdm_probe(struct snd_soc_dai *dai) + pm_runtime_get_sync(mcpdm->dev); + omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, 0x00); + +- ret = devm_request_irq(mcpdm->dev, mcpdm->irq, omap_mcpdm_irq_handler, +- 0, "McPDM", (void *)mcpdm); ++ ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler, 0, "McPDM", ++ (void *)mcpdm); + + pm_runtime_put_sync(mcpdm->dev); + +@@ -414,6 +414,7 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai) + { + struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); + ++ free_irq(mcpdm->irq, (void *)mcpdm); + pm_runtime_disable(mcpdm->dev); + + return 0; +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c +index 4b12bf850325..f7718c8fc93e 100644 +--- a/tools/perf/util/symbol-elf.c ++++ b/tools/perf/util/symbol-elf.c +@@ -831,8 +831,8 @@ new_symbol: + * For misannotated, zeroed, ASM function sizes. + */ + if (nr > 0) { +- symbols__fixup_duplicate(&dso->symbols[map->type]); + symbols__fixup_end(&dso->symbols[map->type]); ++ symbols__fixup_duplicate(&dso->symbols[map->type]); + if (kmap) { + /* + * We need to fixup this here too because we create new +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c +index 8cf3b5426a9a..a2fe760605e1 100644 +--- a/tools/perf/util/symbol.c ++++ b/tools/perf/util/symbol.c +@@ -673,8 +673,8 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, + if (dso__load_all_kallsyms(dso, filename, map) < 0) + return -1; + +- symbols__fixup_duplicate(&dso->symbols[map->type]); + symbols__fixup_end(&dso->symbols[map->type]); ++ symbols__fixup_duplicate(&dso->symbols[map->type]); + + if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; +diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c +index 808d5a9d5dcf..bcc6125657e5 100644 +--- a/tools/vm/slabinfo.c ++++ b/tools/vm/slabinfo.c +@@ -493,10 +493,11 @@ static void slab_stats(struct slabinfo *s) + s->alloc_node_mismatch, (s->alloc_node_mismatch * 100) / total); + } + +- if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) ++ if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) { + printf("\nCmpxchg_double Looping\n------------------------\n"); + printf("Locked Cmpxchg Double redos %lu\nUnlocked Cmpxchg Double redos %lu\n", + s->cmpxchg_double_fail, s->cmpxchg_double_cpu_fail); ++ } + } + + static void report(struct slabinfo *s) diff --git a/patch/kernel/odroidc1-default/patch-3.10.105-106.patch b/patch/kernel/odroidc1-default/patch-3.10.105-106.patch new file mode 100644 index 000000000..bfe911d2d --- /dev/null +++ b/patch/kernel/odroidc1-default/patch-3.10.105-106.patch @@ -0,0 +1,8745 @@ +diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.txt b/Documentation/devicetree/bindings/clock/imx31-clock.txt +index 19df842c694f..8163d565f697 100644 +--- a/Documentation/devicetree/bindings/clock/imx31-clock.txt ++++ b/Documentation/devicetree/bindings/clock/imx31-clock.txt +@@ -77,7 +77,7 @@ Examples: + clks: ccm@53f80000{ + compatible = "fsl,imx31-ccm"; + reg = <0x53f80000 0x4000>; +- interrupts = <0 31 0x04 0 53 0x04>; ++ interrupts = <31>, <53>; + #clock-cells = <1>; + }; + +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index 98da831a14ba..daf83824fda5 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -955,6 +955,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + When zero, profiling data is discarded and associated + debugfs files are removed at module unload time. + ++ goldfish [X86] Enable the goldfish android emulator platform. ++ Don't use this when you are not running on the ++ android emulator ++ + gpt [EFI] Forces disk with valid GPT signature but + invalid Protective MBR to be treated as GPT. + +diff --git a/Makefile b/Makefile +index 80e180e1e4a2..2f87f67fb9f7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 105 ++SUBLEVEL = 106 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi +index c5449257ad9a..b73190d08baa 100644 +--- a/arch/arm/boot/dts/imx31.dtsi ++++ b/arch/arm/boot/dts/imx31.dtsi +@@ -20,11 +20,11 @@ + serial4 = &uart5; + }; + +- avic: avic-interrupt-controller@60000000 { ++ avic: interrupt-controller@68000000 { + compatible = "fsl,imx31-avic", "fsl,avic"; + interrupt-controller; + #interrupt-cells = <1>; +- reg = <0x60000000 0x100000>; ++ reg = <0x68000000 0x100000>; + }; + + soc { +@@ -93,13 +93,6 @@ + clock-names = "ipg", "per"; + status = "disabled"; + }; +- +- clks: ccm@53f80000{ +- compatible = "fsl,imx31-ccm"; +- reg = <0x53f80000 0x4000>; +- interrupts = <0 31 0x04 0 53 0x04>; +- #clock-cells = <1>; +- }; + }; + + aips@53f00000 { /* AIPS2 */ +@@ -109,6 +102,13 @@ + reg = <0x53f00000 0x100000>; + ranges; + ++ clks: ccm@53f80000{ ++ compatible = "fsl,imx31-ccm"; ++ reg = <0x53f80000 0x4000>; ++ interrupts = <31>, <53>; ++ #clock-cells = <1>; ++ }; ++ + gpt: timer@53f90000 { + compatible = "fsl,imx31-gpt"; + reg = <0x53f90000 0x4000>; +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c +index 4e2110d48c41..dfdd683d373e 100644 +--- a/arch/arm/kernel/ptrace.c ++++ b/arch/arm/kernel/ptrace.c +@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target, + const void *kbuf, const void __user *ubuf) + { + int ret; +- struct pt_regs newregs; ++ struct pt_regs newregs = *task_pt_regs(target); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &newregs, +diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c +index 1a468f0fd22e..9d532568b8b3 100644 +--- a/arch/arm/mach-ux500/pm.c ++++ b/arch/arm/mach-ux500/pm.c +@@ -128,8 +128,8 @@ bool prcmu_pending_irq(void) + */ + bool prcmu_is_cpu_in_wfi(int cpu) + { +- return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : +- PRCM_ARM_WFI_STANDBY_WFI0; ++ return readl(PRCM_ARM_WFI_STANDBY) & ++ (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0); + } + + /* +diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h +index 6913643bbe54..c136fd53c847 100644 +--- a/arch/arm64/include/uapi/asm/ptrace.h ++++ b/arch/arm64/include/uapi/asm/ptrace.h +@@ -75,6 +75,7 @@ struct user_fpsimd_state { + __uint128_t vregs[32]; + __u32 fpsr; + __u32 fpcr; ++ __u32 __reserved[2]; + }; + + struct user_hwdebug_state { +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c +index dfad98fda4f8..015775ad7604 100644 +--- a/arch/arm64/kernel/ptrace.c ++++ b/arch/arm64/kernel/ptrace.c +@@ -464,6 +464,8 @@ static int hw_break_set(struct task_struct *target, + /* (address, ctrl) registers */ + limit = regset->n * regset->size; + while (count && offset < limit) { ++ if (count < PTRACE_HBP_ADDR_SZ) ++ return -EINVAL; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, + offset, offset + PTRACE_HBP_ADDR_SZ); + if (ret) +@@ -473,6 +475,8 @@ static int hw_break_set(struct task_struct *target, + return ret; + offset += PTRACE_HBP_ADDR_SZ; + ++ if (!count) ++ break; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, + offset, offset + PTRACE_HBP_CTRL_SZ); + if (ret) +@@ -509,7 +513,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, + const void *kbuf, const void __user *ubuf) + { + int ret; +- struct user_pt_regs newregs; ++ struct user_pt_regs newregs = task_pt_regs(target)->user_regs; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); + if (ret) +@@ -539,7 +543,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, + const void *kbuf, const void __user *ubuf) + { + int ret; +- struct user_fpsimd_state newstate; ++ struct user_fpsimd_state newstate = ++ target->thread.fpsimd_state.user_fpsimd; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); + if (ret) +@@ -562,7 +567,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, + const void *kbuf, const void __user *ubuf) + { + int ret; +- unsigned long tls; ++ unsigned long tls = target->thread.tp_value; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); + if (ret) +diff --git a/arch/cris/boot/rescue/Makefile b/arch/cris/boot/rescue/Makefile +index 52bd0bd1dd22..d98edbb30a18 100644 +--- a/arch/cris/boot/rescue/Makefile ++++ b/arch/cris/boot/rescue/Makefile +@@ -10,6 +10,9 @@ + + asflags-y += $(LINUXINCLUDE) + ccflags-y += -O2 $(LINUXINCLUDE) ++ ++ifdef CONFIG_ETRAX_AXISFLASHMAP ++ + arch-$(CONFIG_ETRAX_ARCH_V10) = v10 + arch-$(CONFIG_ETRAX_ARCH_V32) = v32 + +@@ -28,6 +31,11 @@ $(obj)/rescue.bin: $(obj)/rescue.o FORCE + $(call if_changed,objcopy) + cp -p $(obj)/rescue.bin $(objtree) + ++else ++$(obj)/rescue.bin: ++ ++endif ++ + $(obj)/testrescue.bin: $(obj)/testrescue.o + $(OBJCOPY) $(OBJCOPYFLAGS) $(obj)/testrescue.o tr.bin + # Pad it to 784 bytes +diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h +index d28fa8fe26fe..c598d847d56b 100644 +--- a/arch/m68k/include/asm/delay.h ++++ b/arch/m68k/include/asm/delay.h +@@ -114,6 +114,6 @@ static inline void __udelay(unsigned long usecs) + */ + #define HZSCALE (268435456 / (1000000 / HZ)) + +-#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000)); ++#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000)) + + #endif /* defined(_M68K_DELAY_H) */ +diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c +index b3ebfe9c8e88..dfe77b26beaa 100644 +--- a/arch/metag/lib/usercopy.c ++++ b/arch/metag/lib/usercopy.c +@@ -260,27 +260,31 @@ + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "22:\n" \ + "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ +- "SUB %3, %3, #32\n" \ + "23:\n" \ +- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ ++ "SUB %3, %3, #32\n" \ + "24:\n" \ ++ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ ++ "25:\n" \ + "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "26:\n" \ + "SUB %3, %3, #32\n" \ + "DCACHE [%1+#-64], D0Ar6\n" \ + "BR $Lloop"id"\n" \ + \ + "MOV RAPF, %1\n" \ +- "25:\n" \ ++ "27:\n" \ + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "26:\n" \ ++ "28:\n" \ + "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "29:\n" \ + "SUB %3, %3, #32\n" \ +- "27:\n" \ ++ "30:\n" \ + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "28:\n" \ ++ "31:\n" \ + "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "32:\n" \ + "SUB %0, %0, #8\n" \ +- "29:\n" \ ++ "33:\n" \ + "SETL [%0++], D0.7, D1.7\n" \ + "SUB %3, %3, #32\n" \ + "1:" \ +@@ -312,11 +316,15 @@ + " .long 26b,3b\n" \ + " .long 27b,3b\n" \ + " .long 28b,3b\n" \ +- " .long 29b,4b\n" \ ++ " .long 29b,3b\n" \ ++ " .long 30b,3b\n" \ ++ " .long 31b,3b\n" \ ++ " .long 32b,3b\n" \ ++ " .long 33b,4b\n" \ + " .previous\n" \ + : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ + : "0" (to), "1" (from), "2" (ret), "3" (n) \ +- : "D1Ar1", "D0Ar2", "memory") ++ : "D1Ar1", "D0Ar2", "cc", "memory") + + /* rewind 'to' and 'from' pointers when a fault occurs + * +@@ -342,7 +350,7 @@ + #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ + __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ + "LSR D0Ar2, D0Ar2, #8\n" \ +- "AND D0Ar2, D0Ar2, #0x7\n" \ ++ "ANDS D0Ar2, D0Ar2, #0x7\n" \ + "ADDZ D0Ar2, D0Ar2, #4\n" \ + "SUB D0Ar2, D0Ar2, #1\n" \ + "MOV D1Ar1, #4\n" \ +@@ -403,47 +411,55 @@ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "22:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ +- "SUB %3, %3, #16\n" \ + "23:\n" \ +- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "24:\n" \ +- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "SUB %3, %3, #16\n" \ +- "25:\n" \ ++ "24:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "26:\n" \ ++ "25:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "26:\n" \ + "SUB %3, %3, #16\n" \ + "27:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "28:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "29:\n" \ ++ "SUB %3, %3, #16\n" \ ++ "30:\n" \ ++ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ ++ "31:\n" \ ++ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "32:\n" \ + "SUB %3, %3, #16\n" \ + "DCACHE [%1+#-64], D0Ar6\n" \ + "BR $Lloop"id"\n" \ + \ + "MOV RAPF, %1\n" \ +- "29:\n" \ ++ "33:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "30:\n" \ ++ "34:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "35:\n" \ + "SUB %3, %3, #16\n" \ +- "31:\n" \ ++ "36:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "32:\n" \ ++ "37:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "38:\n" \ + "SUB %3, %3, #16\n" \ +- "33:\n" \ ++ "39:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "34:\n" \ ++ "40:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "41:\n" \ + "SUB %3, %3, #16\n" \ +- "35:\n" \ ++ "42:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ +- "36:\n" \ ++ "43:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ ++ "44:\n" \ + "SUB %0, %0, #4\n" \ +- "37:\n" \ ++ "45:\n" \ + "SETD [%0++], D0.7\n" \ + "SUB %3, %3, #16\n" \ + "1:" \ +@@ -483,11 +499,19 @@ + " .long 34b,3b\n" \ + " .long 35b,3b\n" \ + " .long 36b,3b\n" \ +- " .long 37b,4b\n" \ ++ " .long 37b,3b\n" \ ++ " .long 38b,3b\n" \ ++ " .long 39b,3b\n" \ ++ " .long 40b,3b\n" \ ++ " .long 41b,3b\n" \ ++ " .long 42b,3b\n" \ ++ " .long 43b,3b\n" \ ++ " .long 44b,3b\n" \ ++ " .long 45b,4b\n" \ + " .previous\n" \ + : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ + : "0" (to), "1" (from), "2" (ret), "3" (n) \ +- : "D1Ar1", "D0Ar2", "memory") ++ : "D1Ar1", "D0Ar2", "cc", "memory") + + /* rewind 'to' and 'from' pointers when a fault occurs + * +@@ -513,7 +537,7 @@ + #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ + __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ + "LSR D0Ar2, D0Ar2, #8\n" \ +- "AND D0Ar2, D0Ar2, #0x7\n" \ ++ "ANDS D0Ar2, D0Ar2, #0x7\n" \ + "ADDZ D0Ar2, D0Ar2, #4\n" \ + "SUB D0Ar2, D0Ar2, #1\n" \ + "MOV D1Ar1, #4\n" \ +@@ -538,23 +562,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, + if ((unsigned long) src & 1) { + __asm_copy_to_user_1(dst, src, retn); + n--; ++ if (retn) ++ return retn + n; + } + if ((unsigned long) dst & 1) { + /* Worst case - byte copy */ + while (n > 0) { + __asm_copy_to_user_1(dst, src, retn); + n--; ++ if (retn) ++ return retn + n; + } + } + if (((unsigned long) src & 2) && n >= 2) { + __asm_copy_to_user_2(dst, src, retn); + n -= 2; ++ if (retn) ++ return retn + n; + } + if ((unsigned long) dst & 2) { + /* Second worst case - word copy */ + while (n >= 2) { + __asm_copy_to_user_2(dst, src, retn); + n -= 2; ++ if (retn) ++ return retn + n; + } + } + +@@ -569,6 +601,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, + while (n >= 8) { + __asm_copy_to_user_8x64(dst, src, retn); + n -= 8; ++ if (retn) ++ return retn + n; + } + } + if (n >= RAPF_MIN_BUF_SIZE) { +@@ -581,6 +615,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, + while (n >= 8) { + __asm_copy_to_user_8x64(dst, src, retn); + n -= 8; ++ if (retn) ++ return retn + n; + } + } + #endif +@@ -588,11 +624,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, + while (n >= 16) { + __asm_copy_to_user_16(dst, src, retn); + n -= 16; ++ if (retn) ++ return retn + n; + } + + while (n >= 4) { + __asm_copy_to_user_4(dst, src, retn); + n -= 4; ++ if (retn) ++ return retn + n; + } + + switch (n) { +@@ -609,6 +649,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, + break; + } + ++ /* ++ * If we get here, retn correctly reflects the number of failing ++ * bytes. ++ */ + return retn; + } + EXPORT_SYMBOL(__copy_user); +@@ -789,29 +833,49 @@ EXPORT_SYMBOL(__copy_user); + * + * Rationale: + * A fault occurs while reading from user buffer, which is the +- * source. Since the fault is at a single address, we only +- * need to rewind by 8 bytes. ++ * source. + * Since we don't write to kernel buffer until we read first, + * the kernel buffer is at the right state and needn't be +- * corrected. ++ * corrected, but the source must be rewound to the beginning of ++ * the block, which is LSM_STEP*8 bytes. ++ * LSM_STEP is bits 10:8 in TXSTATUS which is already read ++ * and stored in D0Ar2 ++ * ++ * NOTE: If a fault occurs at the last operation in M{G,S}ETL ++ * LSM_STEP will be 0. ie: we do 4 writes in our case, if ++ * a fault happens at the 4th write, LSM_STEP will be 0 ++ * instead of 4. The code copes with that. + */ + #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ + __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ +- "SUB %1, %1, #8\n") ++ "LSR D0Ar2, D0Ar2, #5\n" \ ++ "ANDS D0Ar2, D0Ar2, #0x38\n" \ ++ "ADDZ D0Ar2, D0Ar2, #32\n" \ ++ "SUB %1, %1, D0Ar2\n") + + /* rewind 'from' pointer when a fault occurs + * + * Rationale: + * A fault occurs while reading from user buffer, which is the +- * source. Since the fault is at a single address, we only +- * need to rewind by 4 bytes. ++ * source. + * Since we don't write to kernel buffer until we read first, + * the kernel buffer is at the right state and needn't be +- * corrected. ++ * corrected, but the source must be rewound to the beginning of ++ * the block, which is LSM_STEP*4 bytes. ++ * LSM_STEP is bits 10:8 in TXSTATUS which is already read ++ * and stored in D0Ar2 ++ * ++ * NOTE: If a fault occurs at the last operation in M{G,S}ETL ++ * LSM_STEP will be 0. ie: we do 4 writes in our case, if ++ * a fault happens at the 4th write, LSM_STEP will be 0 ++ * instead of 4. The code copes with that. + */ + #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ + __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ +- "SUB %1, %1, #4\n") ++ "LSR D0Ar2, D0Ar2, #6\n" \ ++ "ANDS D0Ar2, D0Ar2, #0x1c\n" \ ++ "ADDZ D0Ar2, D0Ar2, #16\n" \ ++ "SUB %1, %1, D0Ar2\n") + + + /* Copy from user to kernel, zeroing the bytes that were inaccessible in +@@ -830,6 +894,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + if ((unsigned long) src & 1) { + __asm_copy_from_user_1(dst, src, retn); + n--; ++ if (retn) ++ goto copy_exception_bytes; + } + if ((unsigned long) dst & 1) { + /* Worst case - byte copy */ +@@ -843,6 +909,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + if (((unsigned long) src & 2) && n >= 2) { + __asm_copy_from_user_2(dst, src, retn); + n -= 2; ++ if (retn) ++ goto copy_exception_bytes; + } + if ((unsigned long) dst & 2) { + /* Second worst case - word copy */ +@@ -854,12 +922,6 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + } + } + +- /* We only need one check after the unalignment-adjustments, +- because if both adjustments were done, either both or +- neither reference had an exception. */ +- if (retn != 0) +- goto copy_exception_bytes; +- + #ifdef USE_RAPF + /* 64 bit copy loop */ + if (!(((unsigned long) src | (unsigned long) dst) & 7)) { +diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S +index 64e08df51d65..8b7004132491 100644 +--- a/arch/mips/cavium-octeon/octeon-memcpy.S ++++ b/arch/mips/cavium-octeon/octeon-memcpy.S +@@ -208,18 +208,18 @@ EXC( STORE t2, UNIT(6)(dst), s_exc_p10u) + ADD src, src, 16*NBYTES + EXC( STORE t3, UNIT(7)(dst), s_exc_p9u) + ADD dst, dst, 16*NBYTES +-EXC( LOAD t0, UNIT(-8)(src), l_exc_copy) +-EXC( LOAD t1, UNIT(-7)(src), l_exc_copy) +-EXC( LOAD t2, UNIT(-6)(src), l_exc_copy) +-EXC( LOAD t3, UNIT(-5)(src), l_exc_copy) ++EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16) ++EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16) ++EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16) ++EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16) + EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u) + EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u) + EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u) + EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u) +-EXC( LOAD t0, UNIT(-4)(src), l_exc_copy) +-EXC( LOAD t1, UNIT(-3)(src), l_exc_copy) +-EXC( LOAD t2, UNIT(-2)(src), l_exc_copy) +-EXC( LOAD t3, UNIT(-1)(src), l_exc_copy) ++EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16) ++EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16) ++EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16) ++EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16) + EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u) + EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u) + EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u) +@@ -383,6 +383,10 @@ done: + nop + END(memcpy) + ++l_exc_copy_rewind16: ++ /* Rewind src and dst by 16*NBYTES for l_exc_copy */ ++ SUB src, src, 16*NBYTES ++ SUB dst, dst, 16*NBYTES + l_exc_copy: + /* + * Copy bytes from src until faulting load address (or until a +diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig +index 0e36abcd39cc..7446284dd7b3 100644 +--- a/arch/mips/configs/ip27_defconfig ++++ b/arch/mips/configs/ip27_defconfig +@@ -206,7 +206,6 @@ CONFIG_MLX4_EN=m + # CONFIG_MLX4_DEBUG is not set + CONFIG_TEHUTI=m + CONFIG_BNX2X=m +-CONFIG_QLGE=m + CONFIG_SFC=m + CONFIG_BE2NET=m + CONFIG_LIBERTAS_THINFIRM=m +diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c +index fcaac2f132f0..910db386d9ef 100644 +--- a/arch/mips/kernel/kgdb.c ++++ b/arch/mips/kernel/kgdb.c +@@ -236,9 +236,6 @@ static int compute_signal(int tt) + void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) + { + int reg; +- struct thread_info *ti = task_thread_info(p); +- unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32; +- struct pt_regs *regs = (struct pt_regs *)ksp - 1; + #if (KGDB_GDB_REG_SIZE == 32) + u32 *ptr = (u32 *)gdb_regs; + #else +@@ -246,25 +243,46 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) + #endif + + for (reg = 0; reg < 16; reg++) +- *(ptr++) = regs->regs[reg]; ++ *(ptr++) = 0; + + /* S0 - S7 */ +- for (reg = 16; reg < 24; reg++) +- *(ptr++) = regs->regs[reg]; ++ *(ptr++) = p->thread.reg16; ++ *(ptr++) = p->thread.reg17; ++ *(ptr++) = p->thread.reg18; ++ *(ptr++) = p->thread.reg19; ++ *(ptr++) = p->thread.reg20; ++ *(ptr++) = p->thread.reg21; ++ *(ptr++) = p->thread.reg22; ++ *(ptr++) = p->thread.reg23; + + for (reg = 24; reg < 28; reg++) + *(ptr++) = 0; + + /* GP, SP, FP, RA */ +- for (reg = 28; reg < 32; reg++) +- *(ptr++) = regs->regs[reg]; +- +- *(ptr++) = regs->cp0_status; +- *(ptr++) = regs->lo; +- *(ptr++) = regs->hi; +- *(ptr++) = regs->cp0_badvaddr; +- *(ptr++) = regs->cp0_cause; +- *(ptr++) = regs->cp0_epc; ++ *(ptr++) = (long)p; ++ *(ptr++) = p->thread.reg29; ++ *(ptr++) = p->thread.reg30; ++ *(ptr++) = p->thread.reg31; ++ ++ *(ptr++) = p->thread.cp0_status; ++ ++ /* lo, hi */ ++ *(ptr++) = 0; ++ *(ptr++) = 0; ++ ++ /* ++ * BadVAddr, Cause ++ * Ideally these would come from the last exception frame up the stack ++ * but that requires unwinding, otherwise we can't know much for sure. ++ */ ++ *(ptr++) = 0; ++ *(ptr++) = 0; ++ ++ /* ++ * PC ++ * use return address (RA), i.e. the moment after return from resume() ++ */ ++ *(ptr++) = p->thread.reg31; + } + + void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c +index c6a041d9d05d..3cfa3bc288fd 100644 +--- a/arch/mips/kernel/process.c ++++ b/arch/mips/kernel/process.c +@@ -214,11 +214,9 @@ struct mips_frame_info { + #define J_TARGET(pc,target) \ + (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) + +-static inline int is_ra_save_ins(union mips_instruction *ip) ++static inline int is_ra_save_ins(union mips_instruction *ip, int *poff) + { + #ifdef CONFIG_CPU_MICROMIPS +- union mips_instruction mmi; +- + /* + * swsp ra,offset + * swm16 reglist,offset(sp) +@@ -228,29 +226,71 @@ static inline int is_ra_save_ins(union mips_instruction *ip) + * + * microMIPS is way more fun... + */ +- if (mm_insn_16bit(ip->halfword[0])) { +- mmi.word = (ip->halfword[0] << 16); +- return ((mmi.mm16_r5_format.opcode == mm_swsp16_op && +- mmi.mm16_r5_format.rt == 31) || +- (mmi.mm16_m_format.opcode == mm_pool16c_op && +- mmi.mm16_m_format.func == mm_swm16_op)); ++ if (mm_insn_16bit(ip->halfword[1])) { ++ switch (ip->mm16_r5_format.opcode) { ++ case mm_swsp16_op: ++ if (ip->mm16_r5_format.rt != 31) ++ return 0; ++ ++ *poff = ip->mm16_r5_format.simmediate; ++ *poff = (*poff << 2) / sizeof(ulong); ++ return 1; ++ ++ case mm_pool16c_op: ++ switch (ip->mm16_m_format.func) { ++ case mm_swm16_op: ++ *poff = ip->mm16_m_format.imm; ++ *poff += 1 + ip->mm16_m_format.rlist; ++ *poff = (*poff << 2) / sizeof(ulong); ++ return 1; ++ ++ default: ++ return 0; ++ } ++ ++ default: ++ return 0; ++ } + } +- else { +- mmi.halfword[0] = ip->halfword[1]; +- mmi.halfword[1] = ip->halfword[0]; +- return ((mmi.mm_m_format.opcode == mm_pool32b_op && +- mmi.mm_m_format.rd > 9 && +- mmi.mm_m_format.base == 29 && +- mmi.mm_m_format.func == mm_swm32_func) || +- (mmi.i_format.opcode == mm_sw32_op && +- mmi.i_format.rs == 29 && +- mmi.i_format.rt == 31)); ++ ++ switch (ip->i_format.opcode) { ++ case mm_sw32_op: ++ if (ip->i_format.rs != 29) ++ return 0; ++ if (ip->i_format.rt != 31) ++ return 0; ++ ++ *poff = ip->i_format.simmediate / sizeof(ulong); ++ return 1; ++ ++ case mm_pool32b_op: ++ switch (ip->mm_m_format.func) { ++ case mm_swm32_func: ++ if (ip->mm_m_format.rd < 0x10) ++ return 0; ++ if (ip->mm_m_format.base != 29) ++ return 0; ++ ++ *poff = ip->mm_m_format.simmediate; ++ *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32); ++ *poff /= sizeof(ulong); ++ return 1; ++ default: ++ return 0; ++ } ++ ++ default: ++ return 0; + } + #else + /* sw / sd $ra, offset($sp) */ +- return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && +- ip->i_format.rs == 29 && +- ip->i_format.rt == 31; ++ if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && ++ ip->i_format.rs == 29 && ip->i_format.rt == 31) { ++ *poff = ip->i_format.simmediate / sizeof(ulong); ++ return 1; ++ } ++ ++ return 0; + #endif + } + +@@ -265,13 +305,16 @@ static inline int is_jump_ins(union mips_instruction *ip) + * + * microMIPS is kind of more fun... + */ +- union mips_instruction mmi; +- +- mmi.word = (ip->halfword[0] << 16); ++ if (mm_insn_16bit(ip->halfword[1])) { ++ if ((ip->mm16_r5_format.opcode == mm_pool16c_op && ++ (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op)) ++ return 1; ++ return 0; ++ } + +- if ((mmi.mm16_r5_format.opcode == mm_pool16c_op && +- (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) || +- ip->j_format.opcode == mm_jal32_op) ++ if (ip->j_format.opcode == mm_j32_op) ++ return 1; ++ if (ip->j_format.opcode == mm_jal32_op) + return 1; + if (ip->r_format.opcode != mm_pool32a_op || + ip->r_format.func != mm_pool32axf_op) +@@ -299,15 +342,13 @@ static inline int is_sp_move_ins(union mips_instruction *ip) + * + * microMIPS is not more fun... + */ +- if (mm_insn_16bit(ip->halfword[0])) { +- union mips_instruction mmi; +- +- mmi.word = (ip->halfword[0] << 16); +- return ((mmi.mm16_r3_format.opcode == mm_pool16d_op && +- mmi.mm16_r3_format.simmediate && mm_addiusp_func) || +- (mmi.mm16_r5_format.opcode == mm_pool16d_op && +- mmi.mm16_r5_format.rt == 29)); ++ if (mm_insn_16bit(ip->halfword[1])) { ++ return (ip->mm16_r3_format.opcode == mm_pool16d_op && ++ ip->mm16_r3_format.simmediate && mm_addiusp_func) || ++ (ip->mm16_r5_format.opcode == mm_pool16d_op && ++ ip->mm16_r5_format.rt == 29); + } ++ + return (ip->mm_i_format.opcode == mm_addiu32_op && + ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29); + #else +@@ -322,30 +363,36 @@ static inline int is_sp_move_ins(union mips_instruction *ip) + + static int get_frame_info(struct mips_frame_info *info) + { +-#ifdef CONFIG_CPU_MICROMIPS +- union mips_instruction *ip = (void *) (((char *) info->func) - 1); +-#else +- union mips_instruction *ip = info->func; +-#endif +- unsigned max_insns = info->func_size / sizeof(union mips_instruction); +- unsigned i; ++ bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); ++ union mips_instruction insn, *ip, *ip_end; ++ const unsigned int max_insns = 128; ++ unsigned int i; + + info->pc_offset = -1; + info->frame_size = 0; + ++ ip = (void *)msk_isa16_mode((ulong)info->func); + if (!ip) + goto err; + +- if (max_insns == 0) +- max_insns = 128U; /* unknown function size */ +- max_insns = min(128U, max_insns); +- +- for (i = 0; i < max_insns; i++, ip++) { ++ ip_end = (void *)ip + info->func_size; ++ ++ for (i = 0; i < max_insns && ip < ip_end; i++, ip++) { ++ if (is_mmips && mm_insn_16bit(ip->halfword[0])) { ++ insn.halfword[0] = 0; ++ insn.halfword[1] = ip->halfword[0]; ++ } else if (is_mmips) { ++ insn.halfword[0] = ip->halfword[1]; ++ insn.halfword[1] = ip->halfword[0]; ++ } else { ++ insn.word = ip->word; ++ } + +- if (is_jump_ins(ip)) ++ if (is_jump_ins(&insn)) + break; ++ + if (!info->frame_size) { +- if (is_sp_move_ins(ip)) ++ if (is_sp_move_ins(&insn)) + { + #ifdef CONFIG_CPU_MICROMIPS + if (mm_insn_16bit(ip->halfword[0])) +@@ -368,11 +415,9 @@ static int get_frame_info(struct mips_frame_info *info) + } + continue; + } +- if (info->pc_offset == -1 && is_ra_save_ins(ip)) { +- info->pc_offset = +- ip->i_format.simmediate / sizeof(long); ++ if (info->pc_offset == -1 && ++ is_ra_save_ins(&insn, &info->pc_offset)) + break; +- } + } + if (info->frame_size && info->pc_offset >= 0) /* nested */ + return 0; +diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c +index 8220baa46faf..a1812fbc2648 100644 +--- a/arch/powerpc/kernel/ibmebus.c ++++ b/arch/powerpc/kernel/ibmebus.c +@@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn) + static int ibmebus_create_devices(const struct of_device_id *matches) + { + struct device_node *root, *child; ++ struct device *dev; + int ret = 0; + + root = of_find_node_by_path("/"); +@@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches) + if (!of_match_node(matches, child)) + continue; + +- if (bus_find_device(&ibmebus_bus_type, NULL, child, +- ibmebus_match_node)) ++ dev = bus_find_device(&ibmebus_bus_type, NULL, child, ++ ibmebus_match_node); ++ if (dev) { ++ put_device(dev); + continue; ++ } + + ret = ibmebus_create_device(child); + if (ret) { +@@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, + const char *buf, size_t count) + { + struct device_node *dn = NULL; ++ struct device *dev; + char *path; + ssize_t rc = 0; + +@@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, + if (!path) + return -ENOMEM; + +- if (bus_find_device(&ibmebus_bus_type, NULL, path, +- ibmebus_match_path)) { ++ dev = bus_find_device(&ibmebus_bus_type, NULL, path, ++ ibmebus_match_path); ++ if (dev) { ++ put_device(dev); + printk(KERN_WARNING "%s: %s has already been probed\n", + __func__, path); + rc = -EEXIST; +@@ -306,6 +313,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, + if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path, + ibmebus_match_path))) { + of_device_unregister(to_platform_device(dev)); ++ put_device(dev); + + kfree(path); + return count; +diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S +index e11863f4e595..ccef1728a4c9 100644 +--- a/arch/powerpc/kernel/idle_power7.S ++++ b/arch/powerpc/kernel/idle_power7.S +@@ -94,7 +94,7 @@ _GLOBAL(power7_nap) + std r0,0(r1) + ptesync + ld r0,0(r1) +-1: cmp cr0,r0,r0 ++1: cmpd cr0,r0,r0 + bne 1b + PPC_NAP + b . +diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S +index e469f30e6eeb..ad8573f053d5 100644 +--- a/arch/powerpc/kernel/misc_32.S ++++ b/arch/powerpc/kernel/misc_32.S +@@ -295,7 +295,7 @@ _GLOBAL(flush_instruction_cache) + lis r3, KERNELBASE@h + iccci 0,r3 + #endif +-#elif CONFIG_FSL_BOOKE ++#elif defined(CONFIG_FSL_BOOKE) + BEGIN_FTR_SECTION + mfspr r3,SPRN_L1CSR0 + ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c +index 389fb8077cc9..1d3d3d653675 100644 +--- a/arch/powerpc/kernel/setup_64.c ++++ b/arch/powerpc/kernel/setup_64.c +@@ -142,6 +142,15 @@ static void check_smt_enabled(void) + of_node_put(dn); + } + } ++ ++ /* ++ * Fixup HFSCR:TM based on CPU features. The bit is set by our ++ * early asm init because at that point we haven't updated our ++ * CPU features from firmware and device-tree. Here we have, ++ * so let's do it. ++ */ ++ if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP)) ++ mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); + } + + /* Look for smt-enabled= cmdline option */ +diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c +index c4c6a1cf221b..05ab88249251 100644 +--- a/arch/s390/boot/compressed/misc.c ++++ b/arch/s390/boot/compressed/misc.c +@@ -138,31 +138,34 @@ static void check_ipl_parmblock(void *start, unsigned long size) + + unsigned long decompress_kernel(void) + { +- unsigned long output_addr; +- unsigned char *output; ++ void *output, *kernel_end; + +- output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; +- check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); +- memset(&_bss, 0, &_ebss - &_bss); +- free_mem_ptr = (unsigned long)&_end; +- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; +- output = (unsigned char *) output_addr; ++ output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE); ++ kernel_end = output + SZ__bss_start; ++ check_ipl_parmblock((void *) 0, (unsigned long) kernel_end); + + #ifdef CONFIG_BLK_DEV_INITRD + /* + * Move the initrd right behind the end of the decompressed +- * kernel image. ++ * kernel image. This also prevents initrd corruption caused by ++ * bss clearing since kernel_end will always be located behind the ++ * current bss section.. + */ +- if (INITRD_START && INITRD_SIZE && +- INITRD_START < (unsigned long) output + SZ__bss_start) { +- check_ipl_parmblock(output + SZ__bss_start, +- INITRD_START + INITRD_SIZE); +- memmove(output + SZ__bss_start, +- (void *) INITRD_START, INITRD_SIZE); +- INITRD_START = (unsigned long) output + SZ__bss_start; ++ if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) { ++ check_ipl_parmblock(kernel_end, INITRD_SIZE); ++ memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE); ++ INITRD_START = (unsigned long) kernel_end; + } + #endif + ++ /* ++ * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be ++ * initialized afterwards since they reside in bss. ++ */ ++ memset(&_bss, 0, &_ebss - &_bss); ++ free_mem_ptr = (unsigned long) &_end; ++ free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; ++ + puts("Uncompressing Linux... "); + decompress(input_data, input_len, NULL, NULL, output, NULL, error); + puts("Ok, booting the kernel.\n"); +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 81e0fe48b9b0..7e09789d2cf3 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1066,7 +1066,7 @@ static __init int setup_disablecpuid(char *arg) + { + int bit; + +- if (get_option(&arg, &bit) && bit < NCAPINTS*32) ++ if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32) + setup_clear_cpu_cap(bit); + else + return 0; +diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S +index 5c38e2b298cd..c502340ef270 100644 +--- a/arch/x86/kernel/entry_32.S ++++ b/arch/x86/kernel/entry_32.S +@@ -1103,8 +1103,8 @@ ftrace_graph_call: + jmp ftrace_stub + #endif + +-.globl ftrace_stub +-ftrace_stub: ++/* This is weak to keep gas from relaxing the jumps */ ++WEAK(ftrace_stub) + ret + END(ftrace_caller) + +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index ddad189e596e..c96485054f6b 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -906,6 +906,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); + } + ++static int segmented_write_std(struct x86_emulate_ctxt *ctxt, ++ struct segmented_address addr, ++ void *data, ++ unsigned int size) ++{ ++ int rc; ++ ulong linear; ++ ++ rc = linearize(ctxt, addr, size, true, &linear); ++ if (rc != X86EMUL_CONTINUE) ++ return rc; ++ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception); ++} ++ + /* + * Fetch the next byte of the instruction being emulated which is pointed to + * by ctxt->_eip, then increment ctxt->_eip. +@@ -1599,7 +1613,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, + &ctxt->exception); + } + +-/* Does not support long mode */ + static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, + u16 selector, int seg) + { +@@ -1612,6 +1625,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, + int ret; + u16 dummy; + ++ ++ /* ++ * None of MOV, POP and LSS can load a NULL selector in CPL=3, but ++ * they can load it at CPL<3 (Intel's manual says only LSS can, ++ * but it's wrong). ++ * ++ * However, the Intel manual says that putting IST=1/DPL=3 in ++ * an interrupt gate will result in SS=3 (the AMD manual instead ++ * says it doesn't), so allow SS=3 in __load_segment_descriptor ++ * and only forbid it here. ++ */ ++ if (seg == VCPU_SREG_SS && selector == 3 && ++ ctxt->mode == X86EMUL_MODE_PROT64) ++ return emulate_exception(ctxt, GP_VECTOR, 0, true); ++ + memset(&seg_desc, 0, sizeof seg_desc); + + if (ctxt->mode == X86EMUL_MODE_REAL) { +@@ -1634,20 +1662,34 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, + rpl = selector & 3; + cpl = ctxt->ops->cpl(ctxt); + +- /* NULL selector is not valid for TR, CS and SS (except for long mode) */ +- if ((seg == VCPU_SREG_CS +- || (seg == VCPU_SREG_SS +- && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) +- || seg == VCPU_SREG_TR) +- && null_selector) +- goto exception; +- + /* TR should be in GDT only */ + if (seg == VCPU_SREG_TR && (selector & (1 << 2))) + goto exception; + +- if (null_selector) /* for NULL selector skip all following checks */ ++ /* NULL selector is not valid for TR, CS and (except for long mode) SS */ ++ if (null_selector) { ++ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR) ++ goto exception; ++ ++ if (seg == VCPU_SREG_SS) { ++ if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl) ++ goto exception; ++ ++ /* ++ * ctxt->ops->set_segment expects the CPL to be in ++ * SS.DPL, so fake an expand-up 32-bit data segment. ++ */ ++ seg_desc.type = 3; ++ seg_desc.p = 1; ++ seg_desc.s = 1; ++ seg_desc.dpl = cpl; ++ seg_desc.d = 1; ++ seg_desc.g = 1; ++ } ++ ++ /* Skip all following checks */ + goto load; ++ } + + ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); + if (ret != X86EMUL_CONTINUE) +@@ -3333,8 +3375,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, + } + /* Disable writeback. */ + ctxt->dst.type = OP_NONE; +- return segmented_write(ctxt, ctxt->dst.addr.mem, +- &desc_ptr, 2 + ctxt->op_bytes); ++ return segmented_write_std(ctxt, ctxt->dst.addr.mem, ++ &desc_ptr, 2 + ctxt->op_bytes); + } + + static int em_sgdt(struct x86_emulate_ctxt *ctxt) +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c +index 48e8461057ba..6e4580b87600 100644 +--- a/arch/x86/pci/xen.c ++++ b/arch/x86/pci/xen.c +@@ -227,23 +227,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + return 1; + + list_for_each_entry(msidesc, &dev->msi_list, list) { +- __read_msi_msg(msidesc, &msg); +- pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | +- ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); +- if (msg.data != XEN_PIRQ_MSI_DATA || +- xen_irq_from_pirq(pirq) < 0) { +- pirq = xen_allocate_pirq_msi(dev, msidesc); +- if (pirq < 0) { +- irq = -ENODEV; +- goto error; +- } +- xen_msi_compose_msg(dev, pirq, &msg); +- __write_msi_msg(msidesc, &msg); +- dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); +- } else { +- dev_dbg(&dev->dev, +- "xen: msi already bound to pirq=%d\n", pirq); ++ pirq = xen_allocate_pirq_msi(dev, msidesc); ++ if (pirq < 0) { ++ irq = -ENODEV; ++ goto error; + } ++ xen_msi_compose_msg(dev, pirq, &msg); ++ __write_msi_msg(msidesc, &msg); ++ dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, + (type == PCI_CAP_ID_MSIX) ? + "msi-x" : "msi", +diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c +index 1693107a518e..0d17c0aafeb1 100644 +--- a/arch/x86/platform/goldfish/goldfish.c ++++ b/arch/x86/platform/goldfish/goldfish.c +@@ -42,10 +42,22 @@ static struct resource goldfish_pdev_bus_resources[] = { + } + }; + ++static bool goldfish_enable __initdata; ++ ++static int __init goldfish_setup(char *str) ++{ ++ goldfish_enable = true; ++ return 0; ++} ++__setup("goldfish", goldfish_setup); ++ + static int __init goldfish_init(void) + { ++ if (!goldfish_enable) ++ return -ENODEV; ++ + platform_device_register_simple("goldfish_pdev_bus", -1, +- goldfish_pdev_bus_resources, 2); ++ goldfish_pdev_bus_resources, 2); + return 0; + } + device_initcall(goldfish_init); +diff --git a/crypto/Makefile b/crypto/Makefile +index a8e9b0fefbe9..b54916590d3a 100644 +--- a/crypto/Makefile ++++ b/crypto/Makefile +@@ -2,8 +2,13 @@ + # Cryptographic API + # + ++# memneq MUST be built with -Os or -O0 to prevent early-return optimizations ++# that will defeat memneq's actual purpose to prevent timing attacks. ++CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3 ++CFLAGS_memneq.o := -Os ++ + obj-$(CONFIG_CRYPTO) += crypto.o +-crypto-y := api.o cipher.o compress.o ++crypto-y := api.o cipher.o compress.o memneq.o + + obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o + +diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c +index 4a6a0696f8a3..1912b9be5043 100644 +--- a/crypto/asymmetric_keys/rsa.c ++++ b/crypto/asymmetric_keys/rsa.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include "public_key.h" + + MODULE_LICENSE("GPL"); +@@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size, + } + } + +- if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) { ++ if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) { + kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]"); + return -EBADMSG; + } + +- if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) { ++ if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) { + kleave(" = -EKEYREJECTED [EM[T] hash mismatch]"); + return -EKEYREJECTED; + } +diff --git a/crypto/authenc.c b/crypto/authenc.c +index a2cfae251dd5..65bcd076b18b 100644 +--- a/crypto/authenc.c ++++ b/crypto/authenc.c +@@ -188,7 +188,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, + scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, + authsize, 0); + +- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; ++ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + if (err) + goto out; + +@@ -227,7 +227,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, + scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, + authsize, 0); + +- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; ++ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + if (err) + goto out; + +@@ -463,7 +463,7 @@ static int crypto_authenc_verify(struct aead_request *req, + ihash = ohash + authsize; + scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, + authsize, 0); +- return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; ++ return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; + } + + static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, +diff --git a/crypto/authencesn.c b/crypto/authencesn.c +index 16c225cb28c2..a3ef98be2064 100644 +--- a/crypto/authencesn.c ++++ b/crypto/authencesn.c +@@ -247,7 +247,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar + scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, + authsize, 0); + +- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; ++ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + if (err) + goto out; + +@@ -296,7 +296,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a + scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, + authsize, 0); + +- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; ++ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + if (err) + goto out; + +@@ -336,7 +336,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, + scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, + authsize, 0); + +- err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; ++ err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + if (err) + goto out; + +@@ -568,7 +568,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req) + ihash = ohash + authsize; + scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, + authsize, 0); +- return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; ++ return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; + } + + static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, +diff --git a/crypto/ccm.c b/crypto/ccm.c +index c569c9c6afe3..003bbbd21a2b 100644 +--- a/crypto/ccm.c ++++ b/crypto/ccm.c +@@ -364,7 +364,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, + + if (!err) { + err = crypto_ccm_auth(req, req->dst, cryptlen); +- if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize)) ++ if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize)) + err = -EBADMSG; + } + aead_request_complete(req, err); +@@ -423,7 +423,7 @@ static int crypto_ccm_decrypt(struct aead_request *req) + return err; + + /* verify */ +- if (memcmp(authtag, odata, authsize)) ++ if (crypto_memneq(authtag, odata, authsize)) + return -EBADMSG; + + return err; +diff --git a/crypto/gcm.c b/crypto/gcm.c +index a1ec756b8438..49b6fb20cceb 100644 +--- a/crypto/gcm.c ++++ b/crypto/gcm.c +@@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req, + + crypto_xor(auth_tag, iauth_tag, 16); + scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); +- return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; ++ return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; + } + + static void gcm_decrypt_done(struct crypto_async_request *areq, int err) +diff --git a/crypto/memneq.c b/crypto/memneq.c +new file mode 100644 +index 000000000000..cd0162221c14 +--- /dev/null ++++ b/crypto/memneq.c +@@ -0,0 +1,138 @@ ++/* ++ * Constant-time equality testing of memory regions. ++ * ++ * Authors: ++ * ++ * James Yonan ++ * Daniel Borkmann ++ * ++ * This file is provided under a dual BSD/GPLv2 license. When using or ++ * redistributing this file, you may do so under either license. ++ * ++ * GPL LICENSE SUMMARY ++ * ++ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of version 2 of the GNU General Public License as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. ++ * The full GNU General Public License is included in this distribution ++ * in the file called LICENSE.GPL. ++ * ++ * BSD LICENSE ++ * ++ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of OpenVPN Technologies nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++ ++#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ ++ ++/* Generic path for arbitrary size */ ++static inline unsigned long ++__crypto_memneq_generic(const void *a, const void *b, size_t size) ++{ ++ unsigned long neq = 0; ++ ++#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ++ while (size >= sizeof(unsigned long)) { ++ neq |= *(unsigned long *)a ^ *(unsigned long *)b; ++ a += sizeof(unsigned long); ++ b += sizeof(unsigned long); ++ size -= sizeof(unsigned long); ++ } ++#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ ++ while (size > 0) { ++ neq |= *(unsigned char *)a ^ *(unsigned char *)b; ++ a += 1; ++ b += 1; ++ size -= 1; ++ } ++ return neq; ++} ++ ++/* Loop-free fast-path for frequently used 16-byte size */ ++static inline unsigned long __crypto_memneq_16(const void *a, const void *b) ++{ ++#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++ if (sizeof(unsigned long) == 8) ++ return ((*(unsigned long *)(a) ^ *(unsigned long *)(b)) ++ | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8))); ++ else if (sizeof(unsigned int) == 4) ++ return ((*(unsigned int *)(a) ^ *(unsigned int *)(b)) ++ | (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4)) ++ | (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8)) ++ | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12))); ++ else ++#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ ++ return ((*(unsigned char *)(a) ^ *(unsigned char *)(b)) ++ | (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1)) ++ | (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2)) ++ | (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3)) ++ | (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4)) ++ | (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5)) ++ | (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6)) ++ | (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7)) ++ | (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8)) ++ | (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9)) ++ | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10)) ++ | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11)) ++ | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12)) ++ | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13)) ++ | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14)) ++ | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15))); ++} ++ ++/* Compare two areas of memory without leaking timing information, ++ * and with special optimizations for common sizes. Users should ++ * not call this function directly, but should instead use ++ * crypto_memneq defined in crypto/algapi.h. ++ */ ++noinline unsigned long __crypto_memneq(const void *a, const void *b, ++ size_t size) ++{ ++ switch (size) { ++ case 16: ++ return __crypto_memneq_16(a, b); ++ default: ++ return __crypto_memneq_generic(a, b, size); ++ } ++} ++EXPORT_SYMBOL(__crypto_memneq); ++ ++#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ +diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c +index 11441ad69de3..276ea4727ad2 100644 +--- a/drivers/acpi/osl.c ++++ b/drivers/acpi/osl.c +@@ -173,7 +173,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas, + request_mem_region(addr, length, desc); + } + +-static void __init acpi_reserve_resources(void) ++static int __init acpi_reserve_resources(void) + { + acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, + "ACPI PM1a_EVT_BLK"); +@@ -202,7 +202,10 @@ static void __init acpi_reserve_resources(void) + if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) + acpi_request_region(&acpi_gbl_FADT.xgpe1_block, + acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); ++ ++ return 0; + } ++fs_initcall_sync(acpi_reserve_resources); + + void acpi_os_printf(const char *fmt, ...) + { +@@ -1724,7 +1727,6 @@ acpi_status __init acpi_os_initialize(void) + + acpi_status __init acpi_os_initialize1(void) + { +- acpi_reserve_resources(); + kacpid_wq = alloc_workqueue("kacpid", 0, 1); + kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); + kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1); +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig +index 3bb6fa3930be..30878924e65b 100644 +--- a/drivers/char/Kconfig ++++ b/drivers/char/Kconfig +@@ -580,7 +580,6 @@ config TELCLOCK + + config DEVPORT + bool +- depends on !M68K + depends on ISA || PCI + default y + +diff --git a/drivers/char/lp.c b/drivers/char/lp.c +index 0913d79424d3..6b619105dea8 100644 +--- a/drivers/char/lp.c ++++ b/drivers/char/lp.c +@@ -857,7 +857,11 @@ static int __init lp_setup (char *str) + } else if (!strcmp(str, "auto")) { + parport_nr[0] = LP_PARPORT_AUTO; + } else if (!strcmp(str, "none")) { +- parport_nr[parport_ptr++] = LP_PARPORT_NONE; ++ if (parport_ptr < LP_NO) ++ parport_nr[parport_ptr++] = LP_PARPORT_NONE; ++ else ++ printk(KERN_INFO "lp: too many ports, %s ignored.\n", ++ str); + } else if (!strcmp(str, "reset")) { + reset = 1; + } +diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c +index 917a3ab482f9..e2e5e76e9805 100644 +--- a/drivers/clk/clk-wm831x.c ++++ b/drivers/clk/clk-wm831x.c +@@ -248,7 +248,7 @@ static int wm831x_clkout_is_enabled(struct clk_hw *hw) + if (ret < 0) { + dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n", + ret); +- return true; ++ return false; + } + + return (ret & WM831X_CLKOUT_ENA) != 0; +diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c +index bf416a8391a7..0cba9273e6c9 100644 +--- a/drivers/crypto/caam/caamalg.c ++++ b/drivers/crypto/caam/caamalg.c +@@ -422,7 +422,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) + + /* Will read cryptlen */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | ++ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); + + /* Write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | +diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c +index dda43cc4b6cd..e9d8b235f68d 100644 +--- a/drivers/crypto/caam/caamhash.c ++++ b/drivers/crypto/caam/caamhash.c +@@ -1793,6 +1793,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, + template->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->driver_name); ++ t_alg->ahash_alg.setkey = NULL; + } + alg->cra_module = THIS_MODULE; + alg->cra_init = caam_hash_cra_init; +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c +index 53435a9d847e..93c80d7143ef 100644 +--- a/drivers/gpu/drm/i915/intel_crt.c ++++ b/drivers/gpu/drm/i915/intel_crt.c +@@ -428,6 +428,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) + struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; + struct edid *edid; + struct i2c_adapter *i2c; ++ bool ret = false; + + BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); + +@@ -444,17 +445,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) + */ + if (!is_digital) { + DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); +- return true; ++ ret = true; ++ } else { ++ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); + } +- +- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); + } else { + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); + } + + kfree(edid); + +- return false; ++ return ret; + } + + static enum drm_connector_status +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index 8814b0dbfc4f..a7dbdec68994 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -7052,9 +7052,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev, + + wake_up_all(&dev_priv->pending_flip_queue); + +- queue_work(dev_priv->wq, &work->work); +- + trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); ++ ++ queue_work(dev_priv->wq, &work->work); + } + + void intel_finish_page_flip(struct drm_device *dev, int pipe) +diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c +index dd5e01f89f28..969acd36c409 100644 +--- a/drivers/gpu/drm/nouveau/nv50_display.c ++++ b/drivers/gpu/drm/nouveau/nv50_display.c +@@ -1253,7 +1253,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + uint32_t start, uint32_t size) + { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); +- u32 end = max(start + size, (u32)256); ++ u32 end = min_t(u32, start + size, 256); + u32 i; + + for (i = start; i < end; i++) { +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c +index ae4923756d98..b1039552b623 100644 +--- a/drivers/hv/hv.c ++++ b/drivers/hv/hv.c +@@ -193,7 +193,7 @@ cleanup: + * + * This routine is called normally during driver unloading or exiting. + */ +-void hv_cleanup(void) ++void hv_cleanup(bool crash) + { + union hv_x64_msr_hypercall_contents hypercall_msr; + +@@ -203,7 +203,8 @@ void hv_cleanup(void) + if (hv_context.hypercall_page) { + hypercall_msr.as_uint64 = 0; + wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); +- vfree(hv_context.hypercall_page); ++ if (!crash) ++ vfree(hv_context.hypercall_page); + hv_context.hypercall_page = NULL; + } + } +diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h +index 12f2f9e989f7..11d4e6222f52 100644 +--- a/drivers/hv/hyperv_vmbus.h ++++ b/drivers/hv/hyperv_vmbus.h +@@ -519,7 +519,7 @@ extern struct hv_context hv_context; + + extern int hv_init(void); + +-extern void hv_cleanup(void); ++extern void hv_cleanup(bool crash); + + extern int hv_post_message(union hv_connection_id connection_id, + enum hv_message_type message_type, +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c +index 80754e2d8086..3190a1fc7bc8 100644 +--- a/drivers/hv/vmbus_drv.c ++++ b/drivers/hv/vmbus_drv.c +@@ -618,7 +618,7 @@ err_unregister: + bus_unregister(&hv_bus); + + err_cleanup: +- hv_cleanup(); ++ hv_cleanup(false); + + return ret; + } +@@ -841,7 +841,7 @@ static void __exit vmbus_exit(void) + free_irq(irq, hv_acpi_dev); + vmbus_free_channels(); + bus_unregister(&hv_bus); +- hv_cleanup(); ++ hv_cleanup(false); + acpi_bus_unregister_driver(&vmbus_acpi_driver); + hv_cpu_hotplug_quirk(false); + } +diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c +index f1d6b422cf06..c25700f7db93 100644 +--- a/drivers/hwmon/ds620.c ++++ b/drivers/hwmon/ds620.c +@@ -166,7 +166,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da, + if (res) + return res; + +- val = (val * 10 / 625) * 8; ++ val = (clamp_val(val, -128000, 128000) * 10 / 625) * 8; + + mutex_lock(&data->update_lock); + data->temp[attr->index] = val; +diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c +index c880d13f5405..f079877bd4ea 100644 +--- a/drivers/i2c/busses/i2c-at91.c ++++ b/drivers/i2c/busses/i2c-at91.c +@@ -273,8 +273,14 @@ error: + + static void at91_twi_read_next_byte(struct at91_twi_dev *dev) + { +- if (dev->buf_len <= 0) ++ /* ++ * If we are in this case, it means there is garbage data in RHR, so ++ * delete them. ++ */ ++ if (!dev->buf_len) { ++ at91_twi_read(dev, AT91_TWI_RHR); + return; ++ } + + *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff; + --dev->buf_len; +@@ -371,6 +377,24 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id) + + if (!irqstatus) + return IRQ_NONE; ++ /* ++ * In reception, the behavior of the twi device (before sama5d2) is ++ * weird. There is some magic about RXRDY flag! When a data has been ++ * almost received, the reception of a new one is anticipated if there ++ * is no stop command to send. That is the reason why ask for sending ++ * the stop command not on the last data but on the second last one. ++ * ++ * Unfortunately, we could still have the RXRDY flag set even if the ++ * transfer is done and we have read the last data. It might happen ++ * when the i2c slave device sends too quickly data after receiving the ++ * ack from the master. The data has been almost received before having ++ * the order to send stop. In this case, sending the stop command could ++ * cause a RXRDY interrupt with a TXCOMP one. It is better to manage ++ * the RXRDY interrupt first in order to not keep garbage data in the ++ * Receive Holding Register for the next transfer. ++ */ ++ if (irqstatus & AT91_TWI_RXRDY) ++ at91_twi_read_next_byte(dev); + + /* + * When a NACK condition is detected, the I2C controller sets the NACK, +@@ -413,8 +437,6 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id) + if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) { + at91_disable_twi_interrupts(dev); + complete(&dev->cmd_complete); +- } else if (irqstatus & AT91_TWI_RXRDY) { +- at91_twi_read_next_byte(dev); + } else if (irqstatus & AT91_TWI_TXRDY) { + at91_twi_write_next_byte(dev); + } +@@ -429,7 +451,6 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) + { + int ret; + bool has_unre_flag = dev->pdata->has_unre_flag; +- unsigned sr; + + /* + * WARNING: the TXCOMP bit in the Status Register is NOT a clear on +@@ -466,7 +487,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) + dev->transfer_status = 0; + + /* Clear pending interrupts, such as NACK. */ +- sr = at91_twi_read(dev, AT91_TWI_SR); ++ at91_twi_read(dev, AT91_TWI_SR); + + if (!dev->buf_len) { + at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK); +@@ -474,11 +495,6 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) + } else if (dev->msg->flags & I2C_M_RD) { + unsigned start_flags = AT91_TWI_START; + +- if (sr & AT91_TWI_RXRDY) { +- dev_err(dev->dev, "RXRDY still set!"); +- at91_twi_read(dev, AT91_TWI_RHR); +- } +- + /* if only one byte is to be read, immediately stop transfer */ + if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN)) + start_flags |= AT91_TWI_STOP; +diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c +index dc3fd1e8af07..200f6c10eee5 100644 +--- a/drivers/infiniband/core/mad.c ++++ b/drivers/infiniband/core/mad.c +@@ -1598,7 +1598,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, + if (!class) + goto out; + if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >= +- IB_MGMT_MAX_METHODS) ++ ARRAY_SIZE(class->method_table)) + goto out; + method = class->method_table[convert_mgmt_class( + mad->mad_hdr.mgmt_class)]; +diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c +index 180d7f436ed5..2f861b59cbc1 100644 +--- a/drivers/infiniband/core/multicast.c ++++ b/drivers/infiniband/core/multicast.c +@@ -516,8 +516,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec, + if (status) + process_join_error(group, status); + else { +- ib_find_pkey(group->port->dev->device, group->port->port_num, +- be16_to_cpu(rec->pkey), &pkey_index); ++ ++ if (ib_find_pkey(group->port->dev->device, ++ group->port->port_num, be16_to_cpu(rec->pkey), ++ &pkey_index)) ++ pkey_index = MCAST_INVALID_PKEY_INDEX; + + spin_lock_irq(&group->port->lock); + group->rec = *rec; +diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c +index f55d69500a5f..3a85e7669068 100644 +--- a/drivers/infiniband/hw/mlx4/ah.c ++++ b/drivers/infiniband/hw/mlx4/ah.c +@@ -118,7 +118,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr + !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support)) + --ah->av.eth.stat_rate; + } +- ++ ah->av.eth.sl_tclass_flowlabel |= ++ cpu_to_be32((ah_attr->grh.traffic_class << 20) | ++ ah_attr->grh.flow_label); + /* + * HW requires multicast LID so we just choose one. + */ +@@ -126,7 +128,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr + ah->av.ib.dlid = cpu_to_be16(0xc000); + + memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16); +- ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29); ++ ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29); + + return &ah->ibah; + } +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c +index 23d734349d8e..6b810b12433d 100644 +--- a/drivers/infiniband/hw/mlx4/main.c ++++ b/drivers/infiniband/hw/mlx4/main.c +@@ -312,9 +312,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, + if (err) + goto out; + +- props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? +- IB_WIDTH_4X : IB_WIDTH_1X; +- props->active_speed = IB_SPEED_QDR; ++ props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) || ++ (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? ++ IB_WIDTH_4X : IB_WIDTH_1X; ++ props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? ++ IB_SPEED_FDR : IB_SPEED_QDR; + props->port_cap_flags = IB_PORT_CM_SUP; + props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; + props->max_msg_sz = mdev->dev->caps.max_msg_sz; +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c +index aa9ad2d70ddd..c781c7c633fd 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c +@@ -1482,12 +1482,14 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr, + + ret = ipoib_set_mode(dev, buf); + +- rtnl_unlock(); +- +- if (!ret) +- return count; ++ /* The assumption is that the function ipoib_set_mode returned ++ * with the rtnl held by it, if not the value -EBUSY returned, ++ * then no need to rtnl_unlock ++ */ ++ if (ret != -EBUSY) ++ rtnl_unlock(); + +- return ret; ++ return (!ret || ret == -EBUSY) ? count : ret; + } + + static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode); +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c +index 375f9edd4027..b022d7108101 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c +@@ -234,8 +234,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) + priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; + + ipoib_flush_paths(dev); +- rtnl_lock(); +- return 0; ++ return (!rtnl_trylock()) ? -EBUSY : 0; + } + + if (!strcmp(buf, "datagram\n")) { +@@ -244,8 +243,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) + dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); + rtnl_unlock(); + ipoib_flush_paths(dev); +- rtnl_lock(); +- return 0; ++ return (!rtnl_trylock()) ? -EBUSY : 0; + } + + return -EINVAL; +diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c +index 082684e7f390..d6a35a713856 100644 +--- a/drivers/input/misc/cm109.c ++++ b/drivers/input/misc/cm109.c +@@ -669,6 +669,10 @@ static int cm109_usb_probe(struct usb_interface *intf, + int error = -ENOMEM; + + interface = intf->cur_altsetting; ++ ++ if (interface->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + endpoint = &interface->endpoint[0].desc; + + if (!usb_endpoint_is_int_in(endpoint)) +diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c +index 77164dc1bedd..8fb814ccfd7a 100644 +--- a/drivers/input/misc/ims-pcu.c ++++ b/drivers/input/misc/ims-pcu.c +@@ -1437,6 +1437,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc + return -EINVAL; + + alt = pcu->ctrl_intf->cur_altsetting; ++ ++ if (alt->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + pcu->ep_ctrl = &alt->endpoint[0].desc; + pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl); + +diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c +index 285a5bd6cbc9..3b6fdb389a2d 100644 +--- a/drivers/input/misc/yealink.c ++++ b/drivers/input/misc/yealink.c +@@ -876,6 +876,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id) + int ret, pipe, i; + + interface = intf->cur_altsetting; ++ ++ if (interface->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + endpoint = &interface->endpoint[0].desc; + if (!usb_endpoint_is_int_in(endpoint)) + return -ENODEV; +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 5102b4f68f18..875e680e90c2 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { + DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), + }, + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c +index 5cc04124995c..263c85e72e14 100644 +--- a/drivers/input/tablet/hanwang.c ++++ b/drivers/input/tablet/hanwang.c +@@ -341,6 +341,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id + int error; + int i; + ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!hanwang || !input_dev) { +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index 1c62c248da6a..0e7cd14bf7bb 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -1029,7 +1029,7 @@ again: + next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; + left = (head - next_tail) % iommu->cmd_buf_size; + +- if (left <= 2) { ++ if (left <= 0x20) { + struct iommu_cmd sync_cmd; + volatile u64 sem = 0; + int ret; +diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c +index c44950d3eb7b..6d4d9c1c2cf0 100644 +--- a/drivers/isdn/gigaset/bas-gigaset.c ++++ b/drivers/isdn/gigaset/bas-gigaset.c +@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface, + return -ENODEV; + } + ++ if (hostif->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + dev_info(&udev->dev, + "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n", + __func__, le16_to_cpu(udev->descriptor.idVendor), +diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c +index 3ac9c4194814..53dfe1693e50 100644 +--- a/drivers/isdn/gigaset/ser-gigaset.c ++++ b/drivers/isdn/gigaset/ser-gigaset.c +@@ -787,8 +787,10 @@ static int __init ser_gigaset_init(void) + driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, + GIGASET_MODULENAME, GIGASET_DEVNAME, + &ops, THIS_MODULE); +- if (!driver) ++ if (!driver) { ++ rc = -ENOMEM; + goto error; ++ } + + rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc); + if (rc != 0) { +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index a77ef6cac62d..975bb316a552 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -976,11 +976,62 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) + } + EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); + ++/* ++ * Flush current->bio_list when the target map method blocks. ++ * This fixes deadlocks in snapshot and possibly in other targets. ++ */ ++struct dm_offload { ++ struct blk_plug plug; ++ struct blk_plug_cb cb; ++}; ++ ++static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) ++{ ++ struct dm_offload *o = container_of(cb, struct dm_offload, cb); ++ struct bio_list list; ++ struct bio *bio; ++ ++ INIT_LIST_HEAD(&o->cb.list); ++ ++ if (unlikely(!current->bio_list)) ++ return; ++ ++ list = *current->bio_list; ++ bio_list_init(current->bio_list); ++ ++ while ((bio = bio_list_pop(&list))) { ++ struct bio_set *bs = bio->bi_pool; ++ if (unlikely(!bs) || bs == fs_bio_set) { ++ bio_list_add(current->bio_list, bio); ++ continue; ++ } ++ ++ spin_lock(&bs->rescue_lock); ++ bio_list_add(&bs->rescue_list, bio); ++ queue_work(bs->rescue_workqueue, &bs->rescue_work); ++ spin_unlock(&bs->rescue_lock); ++ } ++} ++ ++static void dm_offload_start(struct dm_offload *o) ++{ ++ blk_start_plug(&o->plug); ++ o->cb.callback = flush_current_bio_list; ++ list_add(&o->cb.list, ¤t->plug->cb_list); ++} ++ ++static void dm_offload_end(struct dm_offload *o) ++{ ++ list_del(&o->cb.list); ++ blk_finish_plug(&o->plug); ++} ++ + static void __map_bio(struct dm_target_io *tio) + { + int r; + sector_t sector; + struct mapped_device *md; ++ struct dm_offload o; + struct bio *clone = &tio->clone; + struct dm_target *ti = tio->ti; + +@@ -994,7 +1045,11 @@ static void __map_bio(struct dm_target_io *tio) + */ + atomic_inc(&tio->io->io_count); + sector = clone->bi_sector; ++ ++ dm_offload_start(&o); + r = ti->type->map(ti, clone); ++ dm_offload_end(&o); ++ + if (r == DM_MAPIO_REMAPPED) { + /* the bio has been remapped so dispatch it */ + +diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig +index f981d50a2a8c..936ef2f881f0 100644 +--- a/drivers/media/i2c/Kconfig ++++ b/drivers/media/i2c/Kconfig +@@ -549,6 +549,7 @@ config VIDEO_S5K6AA + config VIDEO_S5K4ECGX + tristate "Samsung S5K4ECGX sensor support" + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API ++ select CRC32 + ---help--- + This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M + camera sensor with an embedded SoC image signal processor. +diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c +index 63b42252166a..7a754ec826ac 100644 +--- a/drivers/media/rc/ite-cir.c ++++ b/drivers/media/rc/ite-cir.c +@@ -263,6 +263,8 @@ static void ite_set_carrier_params(struct ite_dev *dev) + + if (allowance > ITE_RXDCR_MAX) + allowance = ITE_RXDCR_MAX; ++ ++ use_demodulator = true; + } + } + +diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c +index 9771cd83c06e..3a615e4c4991 100644 +--- a/drivers/media/tuners/tuner-xc2028.c ++++ b/drivers/media/tuners/tuner-xc2028.c +@@ -289,6 +289,14 @@ static void free_firmware(struct xc2028_data *priv) + int i; + tuner_dbg("%s called\n", __func__); + ++ /* free allocated f/w string */ ++ if (priv->fname != firmware_name) ++ kfree(priv->fname); ++ priv->fname = NULL; ++ ++ priv->state = XC2028_NO_FIRMWARE; ++ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); ++ + if (!priv->firm) + return; + +@@ -299,9 +307,6 @@ static void free_firmware(struct xc2028_data *priv) + + priv->firm = NULL; + priv->firm_size = 0; +- priv->state = XC2028_NO_FIRMWARE; +- +- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); + } + + static int load_all_firmwares(struct dvb_frontend *fe, +@@ -890,9 +895,9 @@ read_not_reliable: + return 0; + + fail: ++ free_firmware(priv); + priv->state = XC2028_SLEEP; + +- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); + if (retry_count < 8) { + msleep(50); + retry_count++; +@@ -1314,11 +1319,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe) + mutex_lock(&xc2028_list_mutex); + + /* only perform final cleanup if this is the last instance */ +- if (hybrid_tuner_report_instance_count(priv) == 1) { ++ if (hybrid_tuner_report_instance_count(priv) == 1) + free_firmware(priv); +- kfree(priv->ctrl.fname); +- priv->ctrl.fname = NULL; +- } + + if (priv) + hybrid_tuner_release_state(priv); +@@ -1381,16 +1383,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + + /* + * Copy the config data. +- * For the firmware name, keep a local copy of the string, +- * in order to avoid troubles during device release. + */ +- kfree(priv->ctrl.fname); + memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); +- if (p->fname) { +- priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL); +- if (priv->ctrl.fname == NULL) +- rc = -ENOMEM; +- } + + /* + * If firmware name changed, frees firmware. As free_firmware will +@@ -1405,10 +1399,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + + if (priv->state == XC2028_NO_FIRMWARE) { + if (!firmware_name[0]) +- priv->fname = priv->ctrl.fname; ++ priv->fname = kstrdup(p->fname, GFP_KERNEL); + else + priv->fname = firmware_name; + ++ if (!priv->fname) { ++ rc = -ENOMEM; ++ goto unlock; ++ } ++ + rc = request_firmware_nowait(THIS_MODULE, 1, + priv->fname, + priv->i2c_props.adap->dev.parent, +@@ -1421,6 +1420,7 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + } else + priv->state = XC2028_WAITING_FIRMWARE; + } ++unlock: + mutex_unlock(&priv->lock); + + return rc; +diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c +index cd962be860ca..7e743958dbce 100644 +--- a/drivers/media/usb/uvc/uvc_queue.c ++++ b/drivers/media/usb/uvc/uvc_queue.c +@@ -375,7 +375,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, + nextbuf = NULL; + spin_unlock_irqrestore(&queue->irqlock, flags); + +- buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; ++ buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; + vb2_set_plane_payload(&buf->buf, 0, buf->bytesused); + vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE); + +diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c +index ecc137ffa8c3..a28f434147fe 100644 +--- a/drivers/mfd/pm8921-core.c ++++ b/drivers/mfd/pm8921-core.c +@@ -173,11 +173,12 @@ static int pm8921_remove(struct platform_device *pdev) + drvdata = platform_get_drvdata(pdev); + if (drvdata) + pmic = drvdata->pm_chip_data; +- if (pmic) ++ if (pmic) { + mfd_remove_devices(pmic->dev); +- if (pmic->irq_chip) { +- pm8xxx_irq_exit(pmic->irq_chip); +- pmic->irq_chip = NULL; ++ if (pmic->irq_chip) { ++ pm8xxx_irq_exit(pmic->irq_chip); ++ pmic->irq_chip = NULL; ++ } + } + platform_set_drvdata(pdev, NULL); + kfree(pmic); +diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c +index 759714ed6bee..abad0b49bd7a 100644 +--- a/drivers/mmc/card/mmc_test.c ++++ b/drivers/mmc/card/mmc_test.c +@@ -795,7 +795,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, + struct mmc_async_req *cur_areq = &test_areq[0].areq; + struct mmc_async_req *other_areq = &test_areq[1].areq; + int i; +- int ret; ++ int ret = RESULT_OK; + + test_areq[0].test = test; + test_areq[1].test = test; +diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c +index f3a423213108..01951cd6599c 100644 +--- a/drivers/mmc/host/mxs-mmc.c ++++ b/drivers/mmc/host/mxs-mmc.c +@@ -312,6 +312,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) + cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); + cmd1 = cmd->arg; + ++ if (cmd->opcode == MMC_STOP_TRANSMISSION) ++ cmd0 |= BM_SSP_CMD0_APPEND_8CYC; ++ + if (host->sdio_irq_en) { + ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; + cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; +@@ -420,8 +423,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) + ssp->base + HW_SSP_BLOCK_SIZE); + } + +- if ((cmd->opcode == MMC_STOP_TRANSMISSION) || +- (cmd->opcode == SD_IO_RW_EXTENDED)) ++ if (cmd->opcode == SD_IO_RW_EXTENDED) + cmd0 |= BM_SSP_CMD0_APPEND_8CYC; + + cmd1 = cmd->arg; +diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c +index c0105a2e269a..d5493a5a7e7c 100644 +--- a/drivers/mmc/host/ushc.c ++++ b/drivers/mmc/host/ushc.c +@@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id + struct ushc_data *ushc; + int ret; + ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); + if (mmc == NULL) + return -ENOMEM; +diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig +index 50543f166215..faab8cdc1c9b 100644 +--- a/drivers/mtd/nand/Kconfig ++++ b/drivers/mtd/nand/Kconfig +@@ -537,7 +537,7 @@ config MTD_NAND_FSMC + Flexible Static Memory Controller (FSMC) + + config MTD_NAND_XWAY +- tristate "Support for NAND on Lantiq XWAY SoC" ++ bool "Support for NAND on Lantiq XWAY SoC" + depends on LANTIQ && SOC_TYPE_XWAY + select MTD_NAND_PLATFORM + help +diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c +index 004e4231af67..528597f65937 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/cq.c ++++ b/drivers/net/ethernet/mellanox/mlx4/cq.c +@@ -57,13 +57,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) + { + struct mlx4_cq *cq; + ++ rcu_read_lock(); + cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, + cqn & (dev->caps.num_cqs - 1)); ++ rcu_read_unlock(); ++ + if (!cq) { + mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); + return; + } + ++ /* Acessing the CQ outside of rcu_read_lock is safe, because ++ * the CQ is freed only after interrupt handling is completed. ++ */ + ++cq->arm_sn; + + cq->comp(cq); +@@ -74,23 +80,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) + struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; + struct mlx4_cq *cq; + +- spin_lock(&cq_table->lock); +- ++ rcu_read_lock(); + cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); +- if (cq) +- atomic_inc(&cq->refcount); +- +- spin_unlock(&cq_table->lock); ++ rcu_read_unlock(); + + if (!cq) { +- mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); ++ mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn); + return; + } + ++ /* Acessing the CQ outside of rcu_read_lock is safe, because ++ * the CQ is freed only after interrupt handling is completed. ++ */ + cq->event(cq, event_type); +- +- if (atomic_dec_and_test(&cq->refcount)) +- complete(&cq->free); + } + + static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, +@@ -261,9 +263,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, + if (err) + return err; + +- spin_lock_irq(&cq_table->lock); ++ spin_lock(&cq_table->lock); + err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); +- spin_unlock_irq(&cq_table->lock); ++ spin_unlock(&cq_table->lock); + if (err) + goto err_icm; + +@@ -303,9 +305,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, + return 0; + + err_radix: +- spin_lock_irq(&cq_table->lock); ++ spin_lock(&cq_table->lock); + radix_tree_delete(&cq_table->tree, cq->cqn); +- spin_unlock_irq(&cq_table->lock); ++ spin_unlock(&cq_table->lock); + + err_icm: + mlx4_cq_free_icm(dev, cq->cqn); +@@ -324,11 +326,11 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) + if (err) + mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); + +- synchronize_irq(priv->eq_table.eq[cq->vector].irq); +- +- spin_lock_irq(&cq_table->lock); ++ spin_lock(&cq_table->lock); + radix_tree_delete(&cq_table->tree, cq->cqn); +- spin_unlock_irq(&cq_table->lock); ++ spin_unlock(&cq_table->lock); ++ ++ synchronize_irq(priv->eq_table.eq[cq->vector].irq); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c +index 02aee1ebd203..2a541500ad2e 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c +@@ -350,8 +350,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) + ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; + + ring->stride = stride; +- if (ring->stride <= TXBB_SIZE) ++ if (ring->stride <= TXBB_SIZE) { ++ /* Stamp first unused send wqe */ ++ __be32 *ptr = (__be32 *)ring->buf; ++ __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT); ++ *ptr = stamp; ++ /* Move pointer to start of rx section */ + ring->buf += TXBB_SIZE; ++ } + + ring->log_stride = ffs(ring->stride) - 1; + ring->buf_size = ring->size * ring->stride; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index e9eab29db7be..5cb09ecfd75f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2697,12 +2697,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, + spin_lock_init(&priv->lock); + spin_lock_init(&priv->tx_lock); + +- ret = register_netdev(ndev); +- if (ret) { +- pr_err("%s: ERROR %i registering the device\n", __func__, ret); +- goto error_netdev_register; +- } +- + priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME); + if (IS_ERR(priv->stmmac_clk)) { + pr_warn("%s: warning: cannot get CSR clock\n", __func__); +@@ -2733,13 +2727,23 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, + } + } + ++ ret = register_netdev(ndev); ++ if (ret) { ++ netdev_err(priv->dev, "%s: ERROR %i registering the device\n", ++ __func__, ret); ++ goto error_netdev_register; ++ } ++ + return priv; + ++error_netdev_register: ++ if (priv->pcs != STMMAC_PCS_RGMII && ++ priv->pcs != STMMAC_PCS_TBI && ++ priv->pcs != STMMAC_PCS_RTBI) ++ stmmac_mdio_unregister(ndev); + error_mdio_register: + clk_put(priv->stmmac_clk); + error_clk_get: +- unregister_netdev(ndev); +-error_netdev_register: + netif_napi_del(&priv->napi); + error_free_netdev: + free_netdev(ndev); +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c +index 299d35552a3e..bd245c3039ec 100644 +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -474,7 +474,7 @@ void phy_stop_machine(struct phy_device *phydev) + cancel_delayed_work_sync(&phydev->state_queue); + + mutex_lock(&phydev->lock); +- if (phydev->state > PHY_UP) ++ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) + phydev->state = PHY_UP; + mutex_unlock(&phydev->lock); + +diff --git a/drivers/net/tun.c b/drivers/net/tun.c +index 582497103fe8..ea6ada39db15 100644 +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -1272,12 +1272,16 @@ static ssize_t tun_put_user(struct tun_struct *tun, + { + struct tun_pi pi = { 0, skb->protocol }; + ssize_t total = 0; ++ int vnet_hdr_sz = 0; ++ ++ if (tun->flags & TUN_VNET_HDR) ++ vnet_hdr_sz = tun->vnet_hdr_sz; + + if (!(tun->flags & TUN_NO_PI)) { + if ((len -= sizeof(pi)) < 0) + return -EINVAL; + +- if (len < skb->len) { ++ if (len < skb->len + vnet_hdr_sz) { + /* Packet will be striped */ + pi.flags |= TUN_PKT_STRIP; + } +@@ -1287,9 +1291,9 @@ static ssize_t tun_put_user(struct tun_struct *tun, + total += sizeof(pi); + } + +- if (tun->flags & TUN_VNET_HDR) { ++ if (vnet_hdr_sz) { + struct virtio_net_hdr gso = { 0 }; /* no info leak */ +- if ((len -= tun->vnet_hdr_sz) < 0) ++ if ((len -= vnet_hdr_sz) < 0) + return -EINVAL; + + if (skb_is_gso(skb)) { +@@ -1332,7 +1336,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, + if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, + sizeof(gso)))) + return -EFAULT; +- total += tun->vnet_hdr_sz; ++ total += vnet_hdr_sz; + } + + len = min_t(int, skb->len, len); +diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c +index 03e8a15d7deb..f32a57ed1d13 100644 +--- a/drivers/net/usb/pegasus.c ++++ b/drivers/net/usb/pegasus.c +@@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb) + + static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) + { ++ u8 *buf; + int ret; + ++ buf = kmalloc(size, GFP_NOIO); ++ if (!buf) ++ return -ENOMEM; ++ + ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0), + PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0, +- indx, data, size, 1000); ++ indx, buf, size, 1000); + if (ret < 0) + netif_dbg(pegasus, drv, pegasus->net, + "%s returned %d\n", __func__, ret); ++ else if (ret <= size) ++ memcpy(data, buf, ret); ++ kfree(buf); + return ret; + } + +-static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) ++static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, ++ const void *data) + { ++ u8 *buf; + int ret; + ++ buf = kmemdup(data, size, GFP_NOIO); ++ if (!buf) ++ return -ENOMEM; ++ + ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), + PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0, +- indx, data, size, 100); ++ indx, buf, size, 100); + if (ret < 0) + netif_dbg(pegasus, drv, pegasus->net, + "%s returned %d\n", __func__, ret); ++ kfree(buf); + return ret; + } + + static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) + { ++ u8 *buf; + int ret; + ++ buf = kmemdup(&data, 1, GFP_NOIO); ++ if (!buf) ++ return -ENOMEM; ++ + ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), + PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data, +- indx, &data, 1, 1000); ++ indx, buf, 1, 1000); + if (ret < 0) + netif_dbg(pegasus, drv, pegasus->net, + "%s returned %d\n", __func__, ret); ++ kfree(buf); + return ret; + } + +diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c +index 6cbdac67f3a0..59d6a3a5830a 100644 +--- a/drivers/net/usb/rtl8150.c ++++ b/drivers/net/usb/rtl8150.c +@@ -156,16 +156,36 @@ static const char driver_name [] = "rtl8150"; + */ + static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) + { +- return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), +- RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, +- indx, 0, data, size, 500); ++ void *buf; ++ int ret; ++ ++ buf = kmalloc(size, GFP_NOIO); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), ++ RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, ++ indx, 0, buf, size, 500); ++ if (ret > 0 && ret <= size) ++ memcpy(data, buf, ret); ++ kfree(buf); ++ return ret; + } + +-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) ++static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data) + { +- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), +- RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, +- indx, 0, data, size, 500); ++ void *buf; ++ int ret; ++ ++ buf = kmemdup(data, size, GFP_NOIO); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), ++ RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, ++ indx, 0, buf, size, 500); ++ kfree(buf); ++ return ret; + } + + static void async_set_reg_cb(struct urb *urb) +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index a1dc186c6f66..8912ba83fd77 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -1386,7 +1386,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) + + if (data[IFLA_VXLAN_ID]) { + __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); +- if (id >= VXLAN_VID_MASK) ++ if (id >= VXLAN_N_VID) + return -ERANGE; + } + +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +index 874f6570bd1c..d83ad9df6603 100644 +--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h ++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +@@ -71,13 +71,13 @@ + #define AR9300_OTP_BASE \ + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000) + #define AR9300_OTP_STATUS \ +- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18) ++ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18) + #define AR9300_OTP_STATUS_TYPE 0x7 + #define AR9300_OTP_STATUS_VALID 0x4 + #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 + #define AR9300_OTP_STATUS_SM_BUSY 0x1 + #define AR9300_OTP_READ_DATA \ +- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c) ++ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c) + + enum targetPowerHTRates { + HT_TARGET_RATE_0_8_16, +diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c +index b29e20b7862f..ffb2de5c4d50 100644 +--- a/drivers/pci/hotplug/rpadlpar_core.c ++++ b/drivers/pci/hotplug/rpadlpar_core.c +@@ -259,8 +259,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn) + + static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn) + { +- if (vio_find_node(dn)) ++ struct vio_dev *vio_dev; ++ ++ vio_dev = vio_find_node(dn); ++ if (vio_dev) { ++ put_device(&vio_dev->dev); + return -EINVAL; ++ } + + if (!vio_register_device_node(dn)) { + printk(KERN_ERR +@@ -336,6 +341,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn) + return -EINVAL; + + vio_unregister_device(vio_dev); ++ ++ put_device(&vio_dev->dev); ++ + return 0; + } + +diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c +index 92cc4cfafde5..6bcd57cb2f75 100644 +--- a/drivers/platform/goldfish/pdev_bus.c ++++ b/drivers/platform/goldfish/pdev_bus.c +@@ -153,23 +153,26 @@ static int goldfish_new_pdev(void) + static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id) + { + irqreturn_t ret = IRQ_NONE; ++ + while (1) { + u32 op = readl(pdev_bus_base + PDEV_BUS_OP); +- switch (op) { +- case PDEV_BUS_OP_DONE: +- return IRQ_NONE; + ++ switch (op) { + case PDEV_BUS_OP_REMOVE_DEV: + goldfish_pdev_remove(); ++ ret = IRQ_HANDLED; + break; + + case PDEV_BUS_OP_ADD_DEV: + goldfish_new_pdev(); ++ ret = IRQ_HANDLED; + break; ++ ++ case PDEV_BUS_OP_DONE: ++ default: ++ return ret; + } +- ret = IRQ_HANDLED; + } +- return ret; + } + + static int goldfish_pdev_bus_probe(struct platform_device *pdev) +diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c +index f59683aa13d5..fc6d84e202e8 100644 +--- a/drivers/platform/x86/intel_mid_powerbtn.c ++++ b/drivers/platform/x86/intel_mid_powerbtn.c +@@ -78,8 +78,8 @@ static int mfld_pb_probe(struct platform_device *pdev) + + input_set_capability(input, EV_KEY, KEY_POWER); + +- error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND, +- DRIVER_NAME, input); ++ error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND | ++ IRQF_ONESHOT, DRIVER_NAME, input); + if (error) { + dev_err(&pdev->dev, "Unable to request irq %d for mfld power" + "button\n", irq); +diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c +index bde5255200dc..1d1e585bd034 100644 +--- a/drivers/s390/cio/qdio_thinint.c ++++ b/drivers/s390/cio/qdio_thinint.c +@@ -142,11 +142,11 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) + struct qdio_q *q; + int i; + +- for_each_input_queue(irq, q, i) { +- if (!references_shared_dsci(irq) && +- has_multiple_inq_on_dsci(irq)) +- xchg(q->irq_ptr->dsci, 0); ++ if (!references_shared_dsci(irq) && ++ has_multiple_inq_on_dsci(irq)) ++ xchg(irq->dsci, 0); + ++ for_each_input_queue(irq, q, i) { + if (q->u.in.queue_start_poll) { + /* skip if polling is enabled or already in work */ + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, +diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c +index c846a63ea672..bf13e73ecabc 100644 +--- a/drivers/s390/scsi/zfcp_dbf.c ++++ b/drivers/s390/scsi/zfcp_dbf.c +@@ -282,11 +282,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, + + + /** +- * zfcp_dbf_rec_run - trace event related to running recovery ++ * zfcp_dbf_rec_run_lvl - trace event related to running recovery ++ * @level: trace level to be used for event + * @tag: identifier for event + * @erp: erp_action running + */ +-void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) ++void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp) + { + struct zfcp_dbf *dbf = erp->adapter->dbf; + struct zfcp_dbf_rec *rec = &dbf->rec_buf; +@@ -312,11 +313,21 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) + else + rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter); + +- debug_event(dbf->rec, 1, rec, sizeof(*rec)); ++ debug_event(dbf->rec, level, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->rec_lock, flags); + } + + /** ++ * zfcp_dbf_rec_run - trace event related to running recovery ++ * @tag: identifier for event ++ * @erp: erp_action running ++ */ ++void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) ++{ ++ zfcp_dbf_rec_run_lvl(1, tag, erp); ++} ++ ++/** + * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery + * @tag: identifier for event + * @wka_port: well known address port +diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h +index 440aa619da1d..a8165f142550 100644 +--- a/drivers/s390/scsi/zfcp_dbf.h ++++ b/drivers/s390/scsi/zfcp_dbf.h +@@ -2,7 +2,7 @@ + * zfcp device driver + * debug feature declarations + * +- * Copyright IBM Corp. 2008, 2015 ++ * Copyright IBM Corp. 2008, 2016 + */ + + #ifndef ZFCP_DBF_H +@@ -283,6 +283,30 @@ struct zfcp_dbf { + struct zfcp_dbf_scsi scsi_buf; + }; + ++/** ++ * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default ++ * @req: request that has been completed ++ * ++ * Returns true if FCP response with only benign residual under count. ++ */ ++static inline ++bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req) ++{ ++ struct fsf_qtcb *qtcb = req->qtcb; ++ u32 fsf_stat = qtcb->header.fsf_status; ++ struct fcp_resp *fcp_rsp; ++ u8 rsp_flags, fr_status; ++ ++ if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND) ++ return false; /* not an FCP response */ ++ fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp; ++ rsp_flags = fcp_rsp->fr_flags; ++ fr_status = fcp_rsp->fr_status; ++ return (fsf_stat == FSF_FCP_RSP_AVAILABLE) && ++ (rsp_flags == FCP_RESID_UNDER) && ++ (fr_status == SAM_STAT_GOOD); ++} ++ + static inline + void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) + { +@@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) + zfcp_dbf_hba_fsf_resp("fs_perr", 1, req); + + } else if (qtcb->header.fsf_status != FSF_GOOD) { +- zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req); ++ zfcp_dbf_hba_fsf_resp("fs_ferr", ++ zfcp_dbf_hba_fsf_resp_suppress(req) ++ ? 5 : 1, req); + + } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || + (req->fsf_command == FSF_QTCB_OPEN_LUN)) { +@@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag) + _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL); + } + ++/** ++ * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset. ++ * @scmnd: SCSI command that was NULLified. ++ * @fsf_req: request that owned @scmnd. ++ */ ++static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd, ++ struct zfcp_fsf_req *fsf_req) ++{ ++ _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req); ++} ++ + #endif /* ZFCP_DBF_H */ +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c +index b4cd26d24152..f7e720e11093 100644 +--- a/drivers/s390/scsi/zfcp_erp.c ++++ b/drivers/s390/scsi/zfcp_erp.c +@@ -3,7 +3,7 @@ + * + * Error Recovery Procedures (ERP). + * +- * Copyright IBM Corp. 2002, 2015 ++ * Copyright IBM Corp. 2002, 2016 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -1212,6 +1212,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) + } + } + ++/** ++ * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery ++ * @port: zfcp_port whose fc_rport we should try to unblock ++ */ ++static void zfcp_erp_try_rport_unblock(struct zfcp_port *port) ++{ ++ unsigned long flags; ++ struct zfcp_adapter *adapter = port->adapter; ++ int port_status; ++ struct Scsi_Host *shost = adapter->scsi_host; ++ struct scsi_device *sdev; ++ ++ write_lock_irqsave(&adapter->erp_lock, flags); ++ port_status = atomic_read(&port->status); ++ if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 || ++ (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE | ++ ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) { ++ /* new ERP of severity >= port triggered elsewhere meanwhile or ++ * local link down (adapter erp_failed but not clear unblock) ++ */ ++ zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action); ++ write_unlock_irqrestore(&adapter->erp_lock, flags); ++ return; ++ } ++ spin_lock(shost->host_lock); ++ __shost_for_each_device(sdev, shost) { ++ struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); ++ int lun_status; ++ ++ if (zsdev->port != port) ++ continue; ++ /* LUN under port of interest */ ++ lun_status = atomic_read(&zsdev->status); ++ if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0) ++ continue; /* unblock rport despite failed LUNs */ ++ /* LUN recovery not given up yet [maybe follow-up pending] */ ++ if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 || ++ (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) { ++ /* LUN blocked: ++ * not yet unblocked [LUN recovery pending] ++ * or meanwhile blocked [new LUN recovery triggered] ++ */ ++ zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action); ++ spin_unlock(shost->host_lock); ++ write_unlock_irqrestore(&adapter->erp_lock, flags); ++ return; ++ } ++ } ++ /* now port has no child or all children have completed recovery, ++ * and no ERP of severity >= port was meanwhile triggered elsewhere ++ */ ++ zfcp_scsi_schedule_rport_register(port); ++ spin_unlock(shost->host_lock); ++ write_unlock_irqrestore(&adapter->erp_lock, flags); ++} ++ + static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) + { + struct zfcp_adapter *adapter = act->adapter; +@@ -1222,6 +1278,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) + case ZFCP_ERP_ACTION_REOPEN_LUN: + if (!(act->status & ZFCP_STATUS_ERP_NO_REF)) + scsi_device_put(sdev); ++ zfcp_erp_try_rport_unblock(port); + break; + + case ZFCP_ERP_ACTION_REOPEN_PORT: +@@ -1232,7 +1289,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) + */ + if (act->step != ZFCP_ERP_STEP_UNINITIALIZED) + if (result == ZFCP_ERP_SUCCEEDED) +- zfcp_scsi_schedule_rport_register(port); ++ zfcp_erp_try_rport_unblock(port); + /* fall through */ + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: + put_device(&port->dev); +diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h +index 01527c31d1da..fdef6a6fe06b 100644 +--- a/drivers/s390/scsi/zfcp_ext.h ++++ b/drivers/s390/scsi/zfcp_ext.h +@@ -3,7 +3,7 @@ + * + * External function declarations. + * +- * Copyright IBM Corp. 2002, 2015 ++ * Copyright IBM Corp. 2002, 2016 + */ + + #ifndef ZFCP_EXT_H +@@ -49,6 +49,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); + extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, + struct zfcp_port *, struct scsi_device *, u8, u8); + extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); ++extern void zfcp_dbf_rec_run_lvl(int level, char *tag, ++ struct zfcp_erp_action *erp); + extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64); + extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); + extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *); +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c +index f246097b7c6d..ad5718401eab 100644 +--- a/drivers/s390/scsi/zfcp_fsf.c ++++ b/drivers/s390/scsi/zfcp_fsf.c +@@ -1607,7 +1607,7 @@ out: + int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) + { + struct zfcp_qdio *qdio = wka_port->adapter->qdio; +- struct zfcp_fsf_req *req = NULL; ++ struct zfcp_fsf_req *req; + int retval = -EIO; + + spin_lock_irq(&qdio->req_q_lock); +@@ -1636,7 +1636,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) + zfcp_fsf_req_free(req); + out: + spin_unlock_irq(&qdio->req_q_lock); +- if (req && !IS_ERR(req)) ++ if (!retval) + zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); + return retval; + } +@@ -1662,7 +1662,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) + int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) + { + struct zfcp_qdio *qdio = wka_port->adapter->qdio; +- struct zfcp_fsf_req *req = NULL; ++ struct zfcp_fsf_req *req; + int retval = -EIO; + + spin_lock_irq(&qdio->req_q_lock); +@@ -1691,7 +1691,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) + zfcp_fsf_req_free(req); + out: + spin_unlock_irq(&qdio->req_q_lock); +- if (req && !IS_ERR(req)) ++ if (!retval) + zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); + return retval; + } +diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h +index 8cad41ffb6b8..358b92ece8d0 100644 +--- a/drivers/s390/scsi/zfcp_fsf.h ++++ b/drivers/s390/scsi/zfcp_fsf.h +@@ -3,7 +3,7 @@ + * + * Interface to the FSF support functions. + * +- * Copyright IBM Corp. 2002, 2015 ++ * Copyright IBM Corp. 2002, 2016 + */ + + #ifndef FSF_H +@@ -86,6 +86,7 @@ + #define FSF_APP_TAG_CHECK_FAILURE 0x00000082 + #define FSF_REF_TAG_CHECK_FAILURE 0x00000083 + #define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD ++#define FSF_FCP_RSP_AVAILABLE 0x000000AF + #define FSF_UNKNOWN_COMMAND 0x000000E2 + #define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 + #define FSF_INVALID_COMMAND_OPTION 0x000000E5 +diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h +index 7c2c6194dfca..703fce59befe 100644 +--- a/drivers/s390/scsi/zfcp_reqlist.h ++++ b/drivers/s390/scsi/zfcp_reqlist.h +@@ -4,7 +4,7 @@ + * Data structure and helper functions for tracking pending FSF + * requests. + * +- * Copyright IBM Corp. 2009 ++ * Copyright IBM Corp. 2009, 2016 + */ + + #ifndef ZFCP_REQLIST_H +@@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl, + spin_unlock_irqrestore(&rl->lock, flags); + } + ++/** ++ * zfcp_reqlist_apply_for_all() - apply a function to every request. ++ * @rl: the requestlist that contains the target requests. ++ * @f: the function to apply to each request; the first parameter of the ++ * function will be the target-request; the second parameter is the same ++ * pointer as given with the argument @data. ++ * @data: freely chosen argument; passed through to @f as second parameter. ++ * ++ * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash- ++ * table (not a 'safe' variant, so don't modify the list). ++ * ++ * Holds @rl->lock over the entire request-iteration. ++ */ ++static inline void ++zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl, ++ void (*f)(struct zfcp_fsf_req *, void *), void *data) ++{ ++ struct zfcp_fsf_req *req; ++ unsigned long flags; ++ unsigned int i; ++ ++ spin_lock_irqsave(&rl->lock, flags); ++ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++) ++ list_for_each_entry(req, &rl->buckets[i], list) ++ f(req, data); ++ spin_unlock_irqrestore(&rl->lock, flags); ++} ++ + #endif /* ZFCP_REQLIST_H */ +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c +index 38ee0df633a3..66c37e77ac7c 100644 +--- a/drivers/s390/scsi/zfcp_scsi.c ++++ b/drivers/s390/scsi/zfcp_scsi.c +@@ -3,7 +3,7 @@ + * + * Interface to Linux SCSI midlayer. + * +- * Copyright IBM Corp. 2002, 2015 ++ * Copyright IBM Corp. 2002, 2016 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -109,9 +109,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) + } + + if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { +- /* This could be either +- * open LUN pending: this is temporary, will result in +- * open LUN or ERP_FAILED, so retry command ++ /* This could be + * call to rport_delete pending: mimic retry from + * fc_remote_port_chkready until rport is BLOCKED + */ +@@ -230,6 +228,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) + return retval; + } + ++struct zfcp_scsi_req_filter { ++ u8 tmf_scope; ++ u32 lun_handle; ++ u32 port_handle; ++}; ++ ++static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data) ++{ ++ struct zfcp_scsi_req_filter *filter = ++ (struct zfcp_scsi_req_filter *)data; ++ ++ /* already aborted - prevent side-effects - or not a SCSI command */ ++ if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND) ++ return; ++ ++ /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */ ++ if (old_req->qtcb->header.port_handle != filter->port_handle) ++ return; ++ ++ if (filter->tmf_scope == FCP_TMF_LUN_RESET && ++ old_req->qtcb->header.lun_handle != filter->lun_handle) ++ return; ++ ++ zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req); ++ old_req->data = NULL; ++} ++ ++static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags) ++{ ++ struct zfcp_adapter *adapter = zsdev->port->adapter; ++ struct zfcp_scsi_req_filter filter = { ++ .tmf_scope = FCP_TMF_TGT_RESET, ++ .port_handle = zsdev->port->handle, ++ }; ++ unsigned long flags; ++ ++ if (tm_flags == FCP_TMF_LUN_RESET) { ++ filter.tmf_scope = FCP_TMF_LUN_RESET; ++ filter.lun_handle = zsdev->lun_handle; ++ } ++ ++ /* ++ * abort_lock secures against other processings - in the abort-function ++ * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data ++ */ ++ write_lock_irqsave(&adapter->abort_lock, flags); ++ zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd, ++ &filter); ++ write_unlock_irqrestore(&adapter->abort_lock, flags); ++} ++ + static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) + { + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); +@@ -262,8 +311,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) + if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { + zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); + retval = FAILED; +- } else ++ } else { + zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); ++ zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); ++ } + + zfcp_fsf_req_free(fsf_req); + return retval; +diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c +index 7e17107643d4..05c999429ffe 100644 +--- a/drivers/scsi/aacraid/src.c ++++ b/drivers/scsi/aacraid/src.c +@@ -359,16 +359,23 @@ static int aac_src_check_health(struct aac_dev *dev) + u32 status = src_readl(dev, MUnit.OMR); + + /* ++ * Check to see if the board panic'd. ++ */ ++ if (unlikely(status & KERNEL_PANIC)) ++ goto err_blink; ++ ++ /* + * Check to see if the board failed any self tests. + */ + if (unlikely(status & SELF_TEST_FAILED)) +- return -1; ++ goto err_out; + + /* +- * Check to see if the board panic'd. ++ * Check to see if the board failed any self tests. + */ +- if (unlikely(status & KERNEL_PANIC)) +- return (status >> 16) & 0xFF; ++ if (unlikely(status & MONITOR_PANIC)) ++ goto err_out; ++ + /* + * Wait for the adapter to be up and running. + */ +@@ -378,6 +385,12 @@ static int aac_src_check_health(struct aac_dev *dev) + * Everything is OK + */ + return 0; ++ ++err_out: ++ return -1; ++ ++err_blink: ++ return (status > 16) & 0xFF; + } + + /** +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c +index d2895836f9fa..83e3ca703cd1 100644 +--- a/drivers/scsi/libsas/sas_ata.c ++++ b/drivers/scsi/libsas/sas_ata.c +@@ -219,7 +219,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) + task->num_scatter = qc->n_elem; + } else { + for_each_sg(qc->sg, sg, qc->n_elem, si) +- xfer += sg->length; ++ xfer += sg_dma_len(sg); + + task->total_xfer_len = xfer; + task->num_scatter = si; +diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c +index 1e4479f3331a..55716c5184f7 100644 +--- a/drivers/scsi/mvsas/mv_94xx.c ++++ b/drivers/scsi/mvsas/mv_94xx.c +@@ -621,7 +621,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) + { + u32 tmp; + tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3)); +- if (tmp && 1 << (slot_idx % 32)) { ++ if (tmp & 1 << (slot_idx % 32)) { + mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx); + mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3), + 1 << (slot_idx % 32)); +diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c +index d104b4378424..fa763dd9a4b7 100644 +--- a/drivers/staging/iio/adc/ad7606_core.c ++++ b/drivers/staging/iio/adc/ad7606_core.c +@@ -185,7 +185,7 @@ static ssize_t ad7606_store_oversampling_ratio(struct device *dev, + mutex_lock(&indio_dev->mlock); + gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1); + gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1); +- gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1); ++ gpio_set_value(st->pdata->gpio_os2, (ret >> 2) & 1); + st->oversampling = lval; + mutex_unlock(&indio_dev->mlock); + +diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c +index c699a3058b39..cfffdd20e435 100644 +--- a/drivers/staging/vt6656/hostap.c ++++ b/drivers/staging/vt6656/hostap.c +@@ -133,7 +133,8 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked) + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", + pDevice->dev->name, pDevice->apdev->name); + } +- free_netdev(pDevice->apdev); ++ if (pDevice->apdev) ++ free_netdev(pDevice->apdev); + pDevice->apdev = NULL; + pDevice->bEnable8021x = false; + pDevice->bEnableHostWEP = false; +diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c +index 75a4e83842c2..a6801e8a8116 100644 +--- a/drivers/target/iscsi/iscsi_target_tpg.c ++++ b/drivers/target/iscsi/iscsi_target_tpg.c +@@ -256,7 +256,6 @@ err_out: + iscsi_release_param_list(tpg->param_list); + tpg->param_list = NULL; + } +- kfree(tpg); + return -ENOMEM; + } + +diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c +index 1b2db9a3038c..66fb07684133 100644 +--- a/drivers/tty/n_hdlc.c ++++ b/drivers/tty/n_hdlc.c +@@ -114,7 +114,7 @@ + #define DEFAULT_TX_BUF_COUNT 3 + + struct n_hdlc_buf { +- struct n_hdlc_buf *link; ++ struct list_head list_item; + int count; + char buf[1]; + }; +@@ -122,8 +122,7 @@ struct n_hdlc_buf { + #define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe) + + struct n_hdlc_buf_list { +- struct n_hdlc_buf *head; +- struct n_hdlc_buf *tail; ++ struct list_head list; + int count; + spinlock_t spinlock; + }; +@@ -136,7 +135,6 @@ struct n_hdlc_buf_list { + * @backup_tty - TTY to use if tty gets closed + * @tbusy - reentrancy flag for tx wakeup code + * @woke_up - FIXME: describe this field +- * @tbuf - currently transmitting tx buffer + * @tx_buf_list - list of pending transmit frame buffers + * @rx_buf_list - list of received frame buffers + * @tx_free_buf_list - list unused transmit frame buffers +@@ -149,7 +147,6 @@ struct n_hdlc { + struct tty_struct *backup_tty; + int tbusy; + int woke_up; +- struct n_hdlc_buf *tbuf; + struct n_hdlc_buf_list tx_buf_list; + struct n_hdlc_buf_list rx_buf_list; + struct n_hdlc_buf_list tx_free_buf_list; +@@ -159,7 +156,8 @@ struct n_hdlc { + /* + * HDLC buffer list manipulation functions + */ +-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list); ++static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, ++ struct n_hdlc_buf *buf); + static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, + struct n_hdlc_buf *buf); + static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list); +@@ -209,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty) + { + struct n_hdlc *n_hdlc = tty2n_hdlc(tty); + struct n_hdlc_buf *buf; +- unsigned long flags; + + while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list))) + n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf); +- spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); +- if (n_hdlc->tbuf) { +- n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf); +- n_hdlc->tbuf = NULL; +- } +- spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); + } + + static struct tty_ldisc_ops n_hdlc_ldisc = { +@@ -284,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc) + } else + break; + } +- kfree(n_hdlc->tbuf); + kfree(n_hdlc); + + } /* end of n_hdlc_release() */ +@@ -403,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) + n_hdlc->woke_up = 0; + spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); + +- /* get current transmit buffer or get new transmit */ +- /* buffer from list of pending transmit buffers */ +- +- tbuf = n_hdlc->tbuf; +- if (!tbuf) +- tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); +- ++ tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); + while (tbuf) { + if (debuglevel >= DEBUG_LEVEL_INFO) + printk("%s(%d)sending frame %p, count=%d\n", +@@ -421,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) + + /* rollback was possible and has been done */ + if (actual == -ERESTARTSYS) { +- n_hdlc->tbuf = tbuf; ++ n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); + break; + } + /* if transmit error, throw frame away by */ +@@ -436,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) + + /* free current transmit buffer */ + n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf); +- +- /* this tx buffer is done */ +- n_hdlc->tbuf = NULL; +- ++ + /* wait up sleeping writers */ + wake_up_interruptible(&tty->write_wait); + +@@ -449,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) + if (debuglevel >= DEBUG_LEVEL_INFO) + printk("%s(%d)frame %p pending\n", + __FILE__,__LINE__,tbuf); +- +- /* buffer not accepted by driver */ +- /* set this buffer as pending buffer */ +- n_hdlc->tbuf = tbuf; ++ ++ /* ++ * the buffer was not accepted by driver, ++ * return it back into tx queue ++ */ ++ n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); + break; + } + } +@@ -750,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, + int error = 0; + int count; + unsigned long flags; +- ++ struct n_hdlc_buf *buf = NULL; ++ + if (debuglevel >= DEBUG_LEVEL_INFO) + printk("%s(%d)n_hdlc_tty_ioctl() called %d\n", + __FILE__,__LINE__,cmd); +@@ -764,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, + /* report count of read data available */ + /* in next available frame (if any) */ + spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags); +- if (n_hdlc->rx_buf_list.head) +- count = n_hdlc->rx_buf_list.head->count; ++ buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list, ++ struct n_hdlc_buf, list_item); ++ if (buf) ++ count = buf->count; + else + count = 0; + spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags); +@@ -777,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, + count = tty_chars_in_buffer(tty); + /* add size of next output frame in queue */ + spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); +- if (n_hdlc->tx_buf_list.head) +- count += n_hdlc->tx_buf_list.head->count; ++ buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list, ++ struct n_hdlc_buf, list_item); ++ if (buf) ++ count += buf->count; + spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags); + error = put_user(count, (int __user *)arg); + break; +@@ -826,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, + poll_wait(filp, &tty->write_wait, wait); + + /* set bits for operations that won't block */ +- if (n_hdlc->rx_buf_list.head) ++ if (!list_empty(&n_hdlc->rx_buf_list.list)) + mask |= POLLIN | POLLRDNORM; /* readable */ + if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) + mask |= POLLHUP; + if (tty_hung_up_p(filp)) + mask |= POLLHUP; + if (!tty_is_writelocked(tty) && +- n_hdlc->tx_free_buf_list.head) ++ !list_empty(&n_hdlc->tx_free_buf_list.list)) + mask |= POLLOUT | POLLWRNORM; /* writable */ + } + return mask; +@@ -855,11 +843,16 @@ static struct n_hdlc *n_hdlc_alloc(void) + + memset(n_hdlc, 0, sizeof(*n_hdlc)); + +- n_hdlc_buf_list_init(&n_hdlc->rx_free_buf_list); +- n_hdlc_buf_list_init(&n_hdlc->tx_free_buf_list); +- n_hdlc_buf_list_init(&n_hdlc->rx_buf_list); +- n_hdlc_buf_list_init(&n_hdlc->tx_buf_list); +- ++ spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock); ++ spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock); ++ spin_lock_init(&n_hdlc->rx_buf_list.spinlock); ++ spin_lock_init(&n_hdlc->tx_buf_list.spinlock); ++ ++ INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list); ++ INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list); ++ INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list); ++ INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list); ++ + /* allocate free rx buffer list */ + for(i=0;ispinlock); +-} /* end of n_hdlc_buf_list_init() */ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&buf_list->spinlock, flags); ++ ++ list_add(&buf->list_item, &buf_list->list); ++ buf_list->count++; ++ ++ spin_unlock_irqrestore(&buf_list->spinlock, flags); ++} + + /** + * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list +- * @list - pointer to buffer list ++ * @buf_list - pointer to buffer list + * @buf - pointer to buffer + */ +-static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, ++static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list, + struct n_hdlc_buf *buf) + { + unsigned long flags; +- spin_lock_irqsave(&list->spinlock,flags); +- +- buf->link=NULL; +- if (list->tail) +- list->tail->link = buf; +- else +- list->head = buf; +- list->tail = buf; +- (list->count)++; +- +- spin_unlock_irqrestore(&list->spinlock,flags); +- ++ ++ spin_lock_irqsave(&buf_list->spinlock, flags); ++ ++ list_add_tail(&buf->list_item, &buf_list->list); ++ buf_list->count++; ++ ++ spin_unlock_irqrestore(&buf_list->spinlock, flags); + } /* end of n_hdlc_buf_put() */ + + /** + * n_hdlc_buf_get - remove and return an HDLC buffer from list +- * @list - pointer to HDLC buffer list ++ * @buf_list - pointer to HDLC buffer list + * + * Remove and return an HDLC buffer from the head of the specified HDLC buffer + * list. + * Returns a pointer to HDLC buffer if available, otherwise %NULL. + */ +-static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list) ++static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list) + { + unsigned long flags; + struct n_hdlc_buf *buf; +- spin_lock_irqsave(&list->spinlock,flags); +- +- buf = list->head; ++ ++ spin_lock_irqsave(&buf_list->spinlock, flags); ++ ++ buf = list_first_entry_or_null(&buf_list->list, ++ struct n_hdlc_buf, list_item); + if (buf) { +- list->head = buf->link; +- (list->count)--; ++ list_del(&buf->list_item); ++ buf_list->count--; + } +- if (!list->head) +- list->tail = NULL; +- +- spin_unlock_irqrestore(&list->spinlock,flags); ++ ++ spin_unlock_irqrestore(&buf_list->spinlock, flags); + return buf; +- + } /* end of n_hdlc_buf_get() */ + + static char hdlc_banner[] __initdata = +diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c +index 4c5506ae5e45..64317898a7cb 100644 +--- a/drivers/usb/class/usbtmc.c ++++ b/drivers/usb/class/usbtmc.c +@@ -989,7 +989,7 @@ static int usbtmc_probe(struct usb_interface *intf, + + dev_dbg(&intf->dev, "%s called\n", __func__); + +- data = kmalloc(sizeof(struct usbtmc_device_data), GFP_KERNEL); ++ data = kzalloc(sizeof(struct usbtmc_device_data), GFP_KERNEL); + if (!data) { + dev_err(&intf->dev, "Unable to allocate kernel memory\n"); + return -ENOMEM; +@@ -1035,6 +1035,12 @@ static int usbtmc_probe(struct usb_interface *intf, + } + } + ++ if (!data->bulk_out || !data->bulk_in) { ++ dev_err(&intf->dev, "bulk endpoints not found\n"); ++ retcode = -ENODEV; ++ goto err_put; ++ } ++ + retcode = get_capabilities(data); + if (retcode) + dev_err(&intf->dev, "can't read capabilities\n"); +@@ -1058,6 +1064,7 @@ static int usbtmc_probe(struct usb_interface *intf, + error_register: + sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp); + sysfs_remove_group(&intf->dev.kobj, &data_attr_grp); ++err_put: + kref_put(&data->kref, usbtmc_delete); + return retcode; + } +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index 3252bb2dcb80..d6481cb469c3 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -207,6 +207,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + if (ifp->desc.bNumEndpoints >= num_ep) + goto skip_to_next_endpoint_or_interface_descriptor; + ++ /* Check for duplicate endpoint addresses */ ++ for (i = 0; i < ifp->desc.bNumEndpoints; ++i) { ++ if (ifp->endpoint[i].desc.bEndpointAddress == ++ d->bEndpointAddress) { ++ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", ++ cfgno, inum, asnum, d->bEndpointAddress); ++ goto skip_to_next_endpoint_or_interface_descriptor; ++ } ++ } ++ + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; + ++ifp->desc.bNumEndpoints; + +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 0dfee61f7878..5a2eaf401b00 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -265,11 +265,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, + if (req->request.status == -EINPROGRESS) + req->request.status = status; + +- if (dwc->ep0_bounced && dep->number == 0) ++ if (dwc->ep0_bounced && dep->number <= 1) + dwc->ep0_bounced = false; +- else +- usb_gadget_unmap_request(&dwc->gadget, &req->request, +- req->direction); ++ ++ usb_gadget_unmap_request(&dwc->gadget, &req->request, ++ req->direction); + + dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", + req, dep->name, req->request.actual, +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index a660716f9331..584e43c9748f 100644 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -1320,9 +1320,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) + value = min(w_length, (u16) 1); + break; + +- /* function drivers must handle get/set altsetting; if there's +- * no get() method, we know only altsetting zero works. +- */ ++ /* function drivers must handle get/set altsetting */ + case USB_REQ_SET_INTERFACE: + if (ctrl->bRequestType != USB_RECIP_INTERFACE) + goto unknown; +@@ -1331,7 +1329,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) + f = cdev->config->interface[intf]; + if (!f) + break; +- if (w_value && !f->set_alt) ++ ++ /* ++ * If there's no get_alt() method, we know only altsetting zero ++ * works. There is no need to check if set_alt() is not NULL ++ * as we check this in usb_add_function(). ++ */ ++ if (w_value && !f->get_alt) + break; + value = f->set_alt(f, w_index, w_value); + if (value == USB_GADGET_DELAYED_STATUS) { +diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c +index ac0e79e2c2e9..644c1053d650 100644 +--- a/drivers/usb/gadget/dummy_hcd.c ++++ b/drivers/usb/gadget/dummy_hcd.c +@@ -266,7 +266,7 @@ static void nuke(struct dummy *dum, struct dummy_ep *ep) + /* caller must hold lock */ + static void stop_activity(struct dummy *dum) + { +- struct dummy_ep *ep; ++ int i; + + /* prevent any more requests */ + dum->address = 0; +@@ -274,8 +274,8 @@ static void stop_activity(struct dummy *dum) + /* The timer is left running so that outstanding URBs can fail */ + + /* nuke any pending requests first, so driver i/o is quiesced */ +- list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list) +- nuke(dum, ep); ++ for (i = 0; i < DUMMY_ENDPOINTS; ++i) ++ nuke(dum, &dum->ep[i]); + + /* driver now does any non-usb quiescing necessary */ + } +diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c +index 1e1563da1812..7f93dc26fb15 100644 +--- a/drivers/usb/host/ohci-q.c ++++ b/drivers/usb/host/ohci-q.c +@@ -927,10 +927,6 @@ rescan_all: + int completed, modified; + __hc32 *prev; + +- /* Is this ED already invisible to the hardware? */ +- if (ed->state == ED_IDLE) +- goto ed_idle; +- + /* only take off EDs that the HC isn't using, accounting for + * frame counter wraps and EDs with partially retired TDs + */ +@@ -961,14 +957,12 @@ skip_ed: + } + + /* ED's now officially unlinked, hc doesn't see */ +- ed->state = ED_IDLE; + if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT) + ohci->eds_scheduled--; + ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); + ed->hwNextED = 0; + wmb(); + ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); +-ed_idle: + + /* reentrancy: if we drop the schedule lock, someone might + * have modified this list. normally it's just prepending +@@ -1039,6 +1033,7 @@ rescan_this: + if (list_empty(&ed->td_list)) { + *last = ed->ed_next; + ed->ed_next = NULL; ++ ed->state = ED_IDLE; + } else if (ohci->rh_state == OHCI_RH_RUNNING) { + *last = ed->ed_next; + ed->ed_next = NULL; +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index af9e4e8c9064..b07e0754d784 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -2306,7 +2306,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) + * "physically contiguous and 64-byte (cache line) aligned". + */ + xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, +- GFP_KERNEL); ++ flags); + if (!xhci->dcbaa) + goto fail; + memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); +@@ -2397,7 +2397,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) + + xhci->erst.entries = dma_alloc_coherent(dev, + sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma, +- GFP_KERNEL); ++ flags); + if (!xhci->erst.entries) + goto fail; + xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", +diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c +index ce978384fda1..3b885c61b73e 100644 +--- a/drivers/usb/misc/idmouse.c ++++ b/drivers/usb/misc/idmouse.c +@@ -347,6 +347,9 @@ static int idmouse_probe(struct usb_interface *interface, + if (iface_desc->desc.bInterfaceClass != 0x0A) + return -ENODEV; + ++ if (iface_desc->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + /* allocate memory for our device state and initialize it */ + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (dev == NULL) +diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c +index 4c24ba0a6574..05aa716cf6b5 100644 +--- a/drivers/usb/misc/iowarrior.c ++++ b/drivers/usb/misc/iowarrior.c +@@ -792,12 +792,6 @@ static int iowarrior_probe(struct usb_interface *interface, + iface_desc = interface->cur_altsetting; + dev->product_id = le16_to_cpu(udev->descriptor.idProduct); + +- if (iface_desc->desc.bNumEndpoints < 1) { +- dev_err(&interface->dev, "Invalid number of endpoints\n"); +- retval = -EINVAL; +- goto error; +- } +- + /* set up the endpoint information */ + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; +@@ -808,6 +802,21 @@ static int iowarrior_probe(struct usb_interface *interface, + /* this one will match for the IOWarrior56 only */ + dev->int_out_endpoint = endpoint; + } ++ ++ if (!dev->int_in_endpoint) { ++ dev_err(&interface->dev, "no interrupt-in endpoint found\n"); ++ retval = -ENODEV; ++ goto error; ++ } ++ ++ if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) { ++ if (!dev->int_out_endpoint) { ++ dev_err(&interface->dev, "no interrupt-out endpoint found\n"); ++ retval = -ENODEV; ++ goto error; ++ } ++ } ++ + /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ + dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); + if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && +diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c +index e129cf661223..20d7e5312f0e 100644 +--- a/drivers/usb/misc/uss720.c ++++ b/drivers/usb/misc/uss720.c +@@ -709,6 +709,11 @@ static int uss720_probe(struct usb_interface *intf, + + interface = intf->cur_altsetting; + ++ if (interface->desc.bNumEndpoints < 3) { ++ usb_put_dev(usbdev); ++ return -ENODEV; ++ } ++ + /* + * Allocate parport interface + */ +diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h +index f7b13fd25257..a3dcbd55e436 100644 +--- a/drivers/usb/musb/musbhsdma.h ++++ b/drivers/usb/musb/musbhsdma.h +@@ -157,5 +157,5 @@ struct musb_dma_controller { + void __iomem *base; + u8 channel_count; + u8 used_channels; +- u8 irq; ++ int irq; + }; +diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c +index 40e7fd94646f..62fb2553c771 100644 +--- a/drivers/usb/serial/ark3116.c ++++ b/drivers/usb/serial/ark3116.c +@@ -100,10 +100,17 @@ static int ark3116_read_reg(struct usb_serial *serial, + usb_rcvctrlpipe(serial->dev, 0), + 0xfe, 0xc0, 0, reg, + buf, 1, ARK_TIMEOUT); +- if (result < 0) ++ if (result < 1) { ++ dev_err(&serial->interface->dev, ++ "failed to read register %u: %d\n", ++ reg, result); ++ if (result >= 0) ++ result = -EIO; ++ + return result; +- else +- return buf[0]; ++ } ++ ++ return buf[0]; + } + + static inline int calc_divisor(int bps) +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c +index c2a4171ab9cb..a4e5be5aea46 100644 +--- a/drivers/usb/serial/ch341.c ++++ b/drivers/usb/serial/ch341.c +@@ -97,6 +97,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request, + r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, + value, index, NULL, 0, DEFAULT_TIMEOUT); ++ if (r < 0) ++ dev_err(&dev->dev, "failed to send control message: %d\n", r); + + return r; + } +@@ -114,7 +116,20 @@ static int ch341_control_in(struct usb_device *dev, + r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, + value, index, buf, bufsize, DEFAULT_TIMEOUT); +- return r; ++ if (r < bufsize) { ++ if (r >= 0) { ++ dev_err(&dev->dev, ++ "short control message received (%d < %u)\n", ++ r, bufsize); ++ r = -EIO; ++ } ++ ++ dev_err(&dev->dev, "failed to receive control message: %d\n", ++ r); ++ return r; ++ } ++ ++ return 0; + } + + static int ch341_set_baudrate(struct usb_device *dev, +@@ -156,9 +171,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control) + + static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) + { ++ const unsigned int size = 2; + char *buffer; + int r; +- const unsigned size = 8; + unsigned long flags; + + buffer = kmalloc(size, GFP_KERNEL); +@@ -169,15 +184,10 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) + if (r < 0) + goto out; + +- /* setup the private status if available */ +- if (r == 2) { +- r = 0; +- spin_lock_irqsave(&priv->lock, flags); +- priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT; +- priv->multi_status_change = 0; +- spin_unlock_irqrestore(&priv->lock, flags); +- } else +- r = -EPROTO; ++ spin_lock_irqsave(&priv->lock, flags); ++ priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT; ++ priv->multi_status_change = 0; ++ spin_unlock_irqrestore(&priv->lock, flags); + + out: kfree(buffer); + return r; +@@ -187,9 +197,9 @@ out: kfree(buffer); + + static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) + { ++ const unsigned int size = 2; + char *buffer; + int r; +- const unsigned size = 8; + + buffer = kmalloc(size, GFP_KERNEL); + if (!buffer) +@@ -252,7 +262,6 @@ static int ch341_port_probe(struct usb_serial_port *port) + + spin_lock_init(&priv->lock); + priv->baud_rate = DEFAULT_BAUD_RATE; +- priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; + + r = ch341_configure(port->serial->dev, priv); + if (r < 0) +@@ -316,15 +325,15 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) + + r = ch341_configure(serial->dev, priv); + if (r) +- goto out; ++ return r; + + r = ch341_set_handshake(serial->dev, priv->line_control); + if (r) +- goto out; ++ return r; + + r = ch341_set_baudrate(serial->dev, priv); + if (r) +- goto out; ++ return r; + + dev_dbg(&port->dev, "%s - submitting interrupt urb", __func__); + r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); +@@ -332,12 +341,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) + dev_err(&port->dev, "%s - failed submitting interrupt urb," + " error %d\n", __func__, r); + ch341_close(port); +- goto out; ++ return r; + } + + r = usb_serial_generic_open(tty, port); ++ if (r) ++ goto err_kill_interrupt_urb; + +-out: return r; ++ return 0; ++ ++err_kill_interrupt_urb: ++ usb_kill_urb(port->interrupt_in_urb); ++ ++ return r; + } + + /* Old_termios contains the original termios settings and +@@ -352,26 +368,25 @@ static void ch341_set_termios(struct tty_struct *tty, + + baud_rate = tty_get_baud_rate(tty); + +- priv->baud_rate = baud_rate; +- + if (baud_rate) { +- spin_lock_irqsave(&priv->lock, flags); +- priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); +- spin_unlock_irqrestore(&priv->lock, flags); ++ priv->baud_rate = baud_rate; + ch341_set_baudrate(port->serial->dev, priv); +- } else { +- spin_lock_irqsave(&priv->lock, flags); +- priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); +- spin_unlock_irqrestore(&priv->lock, flags); + } + +- ch341_set_handshake(port->serial->dev, priv->line_control); +- + /* Unimplemented: + * (cflag & CSIZE) : data bits [5, 8] + * (cflag & PARENB) : parity {NONE, EVEN, ODD} + * (cflag & CSTOPB) : stop bits [1, 2] + */ ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ if (C_BAUD(tty) == B0) ++ priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); ++ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) ++ priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); ++ spin_unlock_irqrestore(&priv->lock, flags); ++ ++ ch341_set_handshake(port->serial->dev, priv->line_control); + } + + static void ch341_break_ctl(struct tty_struct *tty, int break_state) +@@ -570,14 +585,23 @@ static int ch341_tiocmget(struct tty_struct *tty) + + static int ch341_reset_resume(struct usb_serial *serial) + { +- struct ch341_private *priv; +- +- priv = usb_get_serial_port_data(serial->port[0]); ++ struct usb_serial_port *port = serial->port[0]; ++ struct ch341_private *priv = usb_get_serial_port_data(port); ++ int ret; + + /* reconfigure ch341 serial port after bus-reset */ + ch341_configure(serial->dev, priv); + +- return 0; ++ if (test_bit(ASYNCB_INITIALIZED, &port->port.flags)) { ++ ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); ++ if (ret) { ++ dev_err(&port->dev, "failed to submit interrupt urb: %d\n", ++ ret); ++ return ret; ++ } ++ } ++ ++ return usb_serial_generic_resume(serial); + } + + static struct usb_serial_driver ch341_device = { +diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c +index 781426230d69..bb3c7f09f059 100644 +--- a/drivers/usb/serial/cyberjack.c ++++ b/drivers/usb/serial/cyberjack.c +@@ -51,6 +51,7 @@ + #define CYBERJACK_PRODUCT_ID 0x0100 + + /* Function prototypes */ ++static int cyberjack_attach(struct usb_serial *serial); + static int cyberjack_port_probe(struct usb_serial_port *port); + static int cyberjack_port_remove(struct usb_serial_port *port); + static int cyberjack_open(struct tty_struct *tty, +@@ -78,6 +79,7 @@ static struct usb_serial_driver cyberjack_device = { + .description = "Reiner SCT Cyberjack USB card reader", + .id_table = id_table, + .num_ports = 1, ++ .attach = cyberjack_attach, + .port_probe = cyberjack_port_probe, + .port_remove = cyberjack_port_remove, + .open = cyberjack_open, +@@ -101,6 +103,14 @@ struct cyberjack_private { + short wrsent; /* Data already sent */ + }; + ++static int cyberjack_attach(struct usb_serial *serial) ++{ ++ if (serial->num_bulk_out < serial->num_ports) ++ return -ENODEV; ++ ++ return 0; ++} ++ + static int cyberjack_port_probe(struct usb_serial_port *port) + { + struct cyberjack_private *priv; +diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c +index 8c34d9cfb226..e8d7c1beae83 100644 +--- a/drivers/usb/serial/digi_acceleport.c ++++ b/drivers/usb/serial/digi_acceleport.c +@@ -1489,16 +1489,20 @@ static int digi_read_oob_callback(struct urb *urb) + struct usb_serial *serial = port->serial; + struct tty_struct *tty; + struct digi_port *priv = usb_get_serial_port_data(port); ++ unsigned char *buf = urb->transfer_buffer; + int opcode, line, status, val; + int i; + unsigned int rts; + ++ if (urb->actual_length < 4) ++ return -1; ++ + /* handle each oob command */ +- for (i = 0; i < urb->actual_length - 3;) { +- opcode = ((unsigned char *)urb->transfer_buffer)[i++]; +- line = ((unsigned char *)urb->transfer_buffer)[i++]; +- status = ((unsigned char *)urb->transfer_buffer)[i++]; +- val = ((unsigned char *)urb->transfer_buffer)[i++]; ++ for (i = 0; i < urb->actual_length - 3; i += 4) { ++ opcode = buf[i]; ++ line = buf[i + 1]; ++ status = buf[i + 2]; ++ val = buf[i + 3]; + + dev_dbg(&port->dev, "digi_read_oob_callback: opcode=%d, line=%d, status=%d, val=%d\n", + opcode, line, status, val); +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 4e865664699b..ce884f7434b8 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -1813,8 +1813,6 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) + + mutex_init(&priv->cfg_lock); + +- priv->flags = ASYNC_LOW_LATENCY; +- + if (quirk && quirk->port_probe) + quirk->port_probe(priv); + +@@ -2091,6 +2089,20 @@ static int ftdi_process_packet(struct usb_serial_port *port, + priv->prev_status = status; + } + ++ /* save if the transmitter is empty or not */ ++ if (packet[1] & FTDI_RS_TEMT) ++ priv->transmit_empty = 1; ++ else ++ priv->transmit_empty = 0; ++ ++ len -= 2; ++ if (!len) ++ return 0; /* status only */ ++ ++ /* ++ * Break and error status must only be processed for packets with ++ * data payload to avoid over-reporting. ++ */ + flag = TTY_NORMAL; + if (packet[1] & FTDI_RS_ERR_MASK) { + /* Break takes precedence over parity, which takes precedence +@@ -2113,15 +2125,6 @@ static int ftdi_process_packet(struct usb_serial_port *port, + } + } + +- /* save if the transmitter is empty or not */ +- if (packet[1] & FTDI_RS_TEMT) +- priv->transmit_empty = 1; +- else +- priv->transmit_empty = 0; +- +- len -= 2; +- if (!len) +- return 0; /* status only */ + port->icount.rx += len; + ch = packet + 2; + +@@ -2452,8 +2455,12 @@ static int ftdi_get_modem_status(struct usb_serial_port *port, + FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE, + 0, priv->interface, + buf, len, WDR_TIMEOUT); +- if (ret < 0) { ++ ++ /* NOTE: We allow short responses and handle that below. */ ++ if (ret < 1) { + dev_err(&port->dev, "failed to get modem status: %d\n", ret); ++ if (ret >= 0) ++ ret = -EIO; + ret = usb_translate_errors(ret); + goto out; + } +diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c +index b110c573ea85..ea9c4f4aea32 100644 +--- a/drivers/usb/serial/garmin_gps.c ++++ b/drivers/usb/serial/garmin_gps.c +@@ -1049,6 +1049,7 @@ static int garmin_write_bulk(struct usb_serial_port *port, + "%s - usb_submit_urb(write bulk) failed with status = %d\n", + __func__, status); + count = status; ++ kfree(buffer); + } + + /* we are done with this urb, so let the host driver +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c +index c574d312f1f5..9f24fd776ec0 100644 +--- a/drivers/usb/serial/io_edgeport.c ++++ b/drivers/usb/serial/io_edgeport.c +@@ -2795,6 +2795,11 @@ static int edge_startup(struct usb_serial *serial) + EDGE_COMPATIBILITY_MASK1, + EDGE_COMPATIBILITY_MASK2 }; + ++ if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ + dev = serial->dev; + + /* create our private serial structure */ +diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c +index 8cd6479a8b43..20814d528c15 100644 +--- a/drivers/usb/serial/io_ti.c ++++ b/drivers/usb/serial/io_ti.c +@@ -1402,7 +1402,7 @@ stayinbootmode: + dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__); + serial->product_info.TiMode = TI_MODE_BOOT; + +- return 0; ++ return 1; + } + + +@@ -1575,6 +1575,12 @@ static void edge_interrupt_callback(struct urb *urb) + function = TIUMP_GET_FUNC_FROM_CODE(data[0]); + dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, + port_number, function, data[1]); ++ ++ if (port_number >= edge_serial->serial->num_ports) { ++ dev_err(dev, "bad port number %d\n", port_number); ++ goto exit; ++ } ++ + port = edge_serial->serial->port[port_number]; + edge_port = usb_get_serial_port_data(port); + if (!edge_port) { +@@ -1655,7 +1661,7 @@ static void edge_bulk_in_callback(struct urb *urb) + + port_number = edge_port->port->number - edge_port->port->serial->minor; + +- if (edge_port->lsr_event) { ++ if (urb->actual_length > 0 && edge_port->lsr_event) { + edge_port->lsr_event = 0; + dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", + __func__, port_number, edge_port->lsr_mask, *data); +@@ -2433,6 +2439,13 @@ static int edge_startup(struct usb_serial *serial) + struct edgeport_serial *edge_serial; + int status; + ++ /* Make sure we have the required endpoints when in download mode. */ ++ if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) { ++ if (serial->num_bulk_in < serial->num_ports || ++ serial->num_bulk_out < serial->num_ports) ++ return -ENODEV; ++ } ++ + /* create our private serial structure */ + edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); + if (edge_serial == NULL) { +@@ -2444,11 +2457,14 @@ static int edge_startup(struct usb_serial *serial) + usb_set_serial_data(serial, edge_serial); + + status = download_fw(edge_serial); +- if (status) { ++ if (status < 0) { + kfree(edge_serial); + return status; + } + ++ if (status > 0) ++ return 1; /* bind but do not register any ports */ ++ + return 0; + } + +diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c +index 790673e5faa7..eadab621361a 100644 +--- a/drivers/usb/serial/iuu_phoenix.c ++++ b/drivers/usb/serial/iuu_phoenix.c +@@ -69,6 +69,16 @@ struct iuu_private { + u32 clk; + }; + ++static int iuu_attach(struct usb_serial *serial) ++{ ++ unsigned char num_ports = serial->num_ports; ++ ++ if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports) ++ return -ENODEV; ++ ++ return 0; ++} ++ + static int iuu_port_probe(struct usb_serial_port *port) + { + struct iuu_private *priv; +@@ -1199,6 +1209,7 @@ static struct usb_serial_driver iuu_device = { + .tiocmset = iuu_tiocmset, + .set_termios = iuu_set_termios, + .init_termios = iuu_init_termios, ++ .attach = iuu_attach, + .port_probe = iuu_port_probe, + .port_remove = iuu_port_remove, + }; +diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c +index 5f1d382e55cf..05c567bf5cfa 100644 +--- a/drivers/usb/serial/keyspan_pda.c ++++ b/drivers/usb/serial/keyspan_pda.c +@@ -697,6 +697,19 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw"); + MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw"); + #endif + ++static int keyspan_pda_attach(struct usb_serial *serial) ++{ ++ unsigned char num_ports = serial->num_ports; ++ ++ if (serial->num_bulk_out < num_ports || ++ serial->num_interrupt_in < num_ports) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + static int keyspan_pda_port_probe(struct usb_serial_port *port) + { + +@@ -774,6 +787,7 @@ static struct usb_serial_driver keyspan_pda_device = { + .break_ctl = keyspan_pda_break_ctl, + .tiocmget = keyspan_pda_tiocmget, + .tiocmset = keyspan_pda_tiocmset, ++ .attach = keyspan_pda_attach, + .port_probe = keyspan_pda_port_probe, + .port_remove = keyspan_pda_port_remove, + }; +diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c +index 1b4054fe52a5..b6794baf0a3b 100644 +--- a/drivers/usb/serial/kl5kusb105.c ++++ b/drivers/usb/serial/kl5kusb105.c +@@ -198,10 +198,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port, + status_buf, KLSI_STATUSBUF_LEN, + 10000 + ); +- if (rc < 0) +- dev_err(&port->dev, "Reading line status failed (error = %d)\n", +- rc); +- else { ++ if (rc != KLSI_STATUSBUF_LEN) { ++ dev_err(&port->dev, "reading line status failed: %d\n", rc); ++ if (rc >= 0) ++ rc = -EIO; ++ } else { + status = get_unaligned_le16(status_buf); + + dev_info(&port->serial->dev->dev, "read status %x %x", +@@ -304,7 +305,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) + rc = usb_serial_generic_open(tty, port); + if (rc) { + retval = rc; +- goto exit; ++ goto err_free_cfg; + } + + rc = usb_control_msg(port->serial->dev, +@@ -319,21 +320,38 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) + if (rc < 0) { + dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc); + retval = rc; ++ goto err_generic_close; + } else + dev_dbg(&port->dev, "%s - enabled reading\n", __func__); + + rc = klsi_105_get_line_state(port, &line_state); +- if (rc >= 0) { +- spin_lock_irqsave(&priv->lock, flags); +- priv->line_state = line_state; +- spin_unlock_irqrestore(&priv->lock, flags); +- dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state); +- retval = 0; +- } else ++ if (rc < 0) { + retval = rc; ++ goto err_disable_read; ++ } ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ priv->line_state = line_state; ++ spin_unlock_irqrestore(&priv->lock, flags); ++ dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, ++ line_state); ++ ++ return 0; + +-exit: ++err_disable_read: ++ usb_control_msg(port->serial->dev, ++ usb_sndctrlpipe(port->serial->dev, 0), ++ KL5KUSB105A_SIO_CONFIGURE, ++ USB_TYPE_VENDOR | USB_DIR_OUT, ++ KL5KUSB105A_SIO_CONFIGURE_READ_OFF, ++ 0, /* index */ ++ NULL, 0, ++ KLSI_TIMEOUT); ++err_generic_close: ++ usb_serial_generic_close(port); ++err_free_cfg: + kfree(cfg); ++ + return retval; + } + +diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c +index efa75b4e51f2..63fa400a822f 100644 +--- a/drivers/usb/serial/kobil_sct.c ++++ b/drivers/usb/serial/kobil_sct.c +@@ -52,6 +52,7 @@ + + + /* Function prototypes */ ++static int kobil_attach(struct usb_serial *serial); + static int kobil_port_probe(struct usb_serial_port *probe); + static int kobil_port_remove(struct usb_serial_port *probe); + static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port); +@@ -87,6 +88,7 @@ static struct usb_serial_driver kobil_device = { + .description = "KOBIL USB smart card terminal", + .id_table = id_table, + .num_ports = 1, ++ .attach = kobil_attach, + .port_probe = kobil_port_probe, + .port_remove = kobil_port_remove, + .ioctl = kobil_ioctl, +@@ -114,6 +116,16 @@ struct kobil_private { + }; + + ++static int kobil_attach(struct usb_serial *serial) ++{ ++ if (serial->num_interrupt_out < serial->num_ports) { ++ dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + static int kobil_port_probe(struct usb_serial_port *port) + { + struct usb_serial *serial = port->serial; +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c +index ddc71d706ac6..2d1ad823b1ab 100644 +--- a/drivers/usb/serial/mos7720.c ++++ b/drivers/usb/serial/mos7720.c +@@ -66,8 +66,6 @@ struct moschip_port { + struct urb *write_urb_pool[NUM_URBS]; + }; + +-static struct usb_serial_driver moschip7720_2port_driver; +- + #define USB_VENDOR_ID_MOSCHIP 0x9710 + #define MOSCHIP_DEVICE_ID_7720 0x7720 + #define MOSCHIP_DEVICE_ID_7715 0x7715 +@@ -966,25 +964,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb) + tty_port_tty_wakeup(&mos7720_port->port->port); + } + +-/* +- * mos77xx_probe +- * this function installs the appropriate read interrupt endpoint callback +- * depending on whether the device is a 7720 or 7715, thus avoiding costly +- * run-time checks in the high-frequency callback routine itself. +- */ +-static int mos77xx_probe(struct usb_serial *serial, +- const struct usb_device_id *id) +-{ +- if (id->idProduct == MOSCHIP_DEVICE_ID_7715) +- moschip7720_2port_driver.read_int_callback = +- mos7715_interrupt_callback; +- else +- moschip7720_2port_driver.read_int_callback = +- mos7720_interrupt_callback; +- +- return 0; +-} +- + static int mos77xx_calc_num_ports(struct usb_serial *serial) + { + u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); +@@ -1917,6 +1896,11 @@ static int mos7720_startup(struct usb_serial *serial) + u16 product; + int ret_val; + ++ if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) { ++ dev_err(&serial->interface->dev, "missing bulk endpoints\n"); ++ return -ENODEV; ++ } ++ + product = le16_to_cpu(serial->dev->descriptor.idProduct); + dev = serial->dev; + +@@ -1941,19 +1925,18 @@ static int mos7720_startup(struct usb_serial *serial) + tmp->interrupt_in_endpointAddress; + serial->port[1]->interrupt_in_urb = NULL; + serial->port[1]->interrupt_in_buffer = NULL; ++ ++ if (serial->port[0]->interrupt_in_urb) { ++ struct urb *urb = serial->port[0]->interrupt_in_urb; ++ ++ urb->complete = mos7715_interrupt_callback; ++ } + } + + /* setting configuration feature to one */ + usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), + (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); + +- /* start the interrupt urb */ +- ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); +- if (ret_val) +- dev_err(&dev->dev, +- "%s - Error %d submitting control urb\n", +- __func__, ret_val); +- + #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT + if (product == MOSCHIP_DEVICE_ID_7715) { + ret_val = mos7715_parport_init(serial); +@@ -1961,6 +1944,13 @@ static int mos7720_startup(struct usb_serial *serial) + return ret_val; + } + #endif ++ /* start the interrupt urb */ ++ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); ++ if (ret_val) { ++ dev_err(&dev->dev, "failed to submit interrupt urb: %d\n", ++ ret_val); ++ } ++ + /* LSR For Port 1 */ + read_mos_reg(serial, 0, LSR, &data); + dev_dbg(&dev->dev, "LSR:%x\n", data); +@@ -1970,6 +1960,8 @@ static int mos7720_startup(struct usb_serial *serial) + + static void mos7720_release(struct usb_serial *serial) + { ++ usb_kill_urb(serial->port[0]->interrupt_in_urb); ++ + #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT + /* close the parallel port */ + +@@ -2052,7 +2044,6 @@ static struct usb_serial_driver moschip7720_2port_driver = { + .close = mos7720_close, + .throttle = mos7720_throttle, + .unthrottle = mos7720_unthrottle, +- .probe = mos77xx_probe, + .attach = mos7720_startup, + .release = mos7720_release, + .port_probe = mos7720_port_probe, +@@ -2066,7 +2057,7 @@ static struct usb_serial_driver moschip7720_2port_driver = { + .chars_in_buffer = mos7720_chars_in_buffer, + .break_ctl = mos7720_break, + .read_bulk_callback = mos7720_bulk_in_callback, +- .read_int_callback = NULL /* dynamically assigned in probe() */ ++ .read_int_callback = mos7720_interrupt_callback, + }; + + static struct usb_serial_driver * const serial_drivers[] = { +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c +index 7df7df62e177..3308c43d2313 100644 +--- a/drivers/usb/serial/mos7840.c ++++ b/drivers/usb/serial/mos7840.c +@@ -1041,6 +1041,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) + * (can't set it up in mos7840_startup as the structures * + * were not set up at that time.) */ + if (port0->open_ports == 1) { ++ /* FIXME: Buffer never NULL, so URB is not submitted. */ + if (serial->port[0]->interrupt_in_buffer == NULL) { + /* set up interrupt urb */ + usb_fill_int_urb(serial->port[0]->interrupt_in_urb, +@@ -2255,6 +2256,18 @@ static int mos7840_calc_num_ports(struct usb_serial *serial) + return mos7840_num_ports; + } + ++static int mos7840_attach(struct usb_serial *serial) ++{ ++ if (serial->num_bulk_in < serial->num_ports || ++ serial->num_bulk_out < serial->num_ports || ++ serial->num_interrupt_in < 1) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + static int mos7840_port_probe(struct usb_serial_port *port) + { + struct usb_serial *serial = port->serial; +@@ -2537,6 +2550,7 @@ static struct usb_serial_driver moschip7840_4port_device = { + .tiocmset = mos7840_tiocmset, + .tiocmiwait = usb_serial_generic_tiocmiwait, + .get_icount = usb_serial_generic_get_icount, ++ .attach = mos7840_attach, + .port_probe = mos7840_port_probe, + .port_remove = mos7840_port_remove, + .read_bulk_callback = mos7840_bulk_in_callback, +diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c +index 5739bf6f7200..8028e5ffe80d 100644 +--- a/drivers/usb/serial/omninet.c ++++ b/drivers/usb/serial/omninet.c +@@ -39,6 +39,7 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, + const unsigned char *buf, int count); + static int omninet_write_room(struct tty_struct *tty); + static void omninet_disconnect(struct usb_serial *serial); ++static int omninet_attach(struct usb_serial *serial); + static int omninet_port_probe(struct usb_serial_port *port); + static int omninet_port_remove(struct usb_serial_port *port); + +@@ -57,6 +58,7 @@ static struct usb_serial_driver zyxel_omninet_device = { + .description = "ZyXEL - omni.net lcd plus usb", + .id_table = id_table, + .num_ports = 1, ++ .attach = omninet_attach, + .port_probe = omninet_port_probe, + .port_remove = omninet_port_remove, + .open = omninet_open, +@@ -105,6 +107,17 @@ struct omninet_data { + __u8 od_outseq; /* Sequence number for bulk_out URBs */ + }; + ++static int omninet_attach(struct usb_serial *serial) ++{ ++ /* The second bulk-out endpoint is used for writing. */ ++ if (serial->num_bulk_out < 2) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + static int omninet_port_probe(struct usb_serial_port *port) + { + struct omninet_data *od; +@@ -130,12 +143,6 @@ static int omninet_port_remove(struct usb_serial_port *port) + + static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port) + { +- struct usb_serial *serial = port->serial; +- struct usb_serial_port *wport; +- +- wport = serial->port[1]; +- tty_port_tty_set(&wport->port, tty); +- + return usb_serial_generic_open(tty, port); + } + +diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c +index b0eb1dfc601a..b93ab96573ef 100644 +--- a/drivers/usb/serial/opticon.c ++++ b/drivers/usb/serial/opticon.c +@@ -143,7 +143,7 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port) + usb_clear_halt(port->serial->dev, port->read_urb->pipe); + + res = usb_serial_generic_open(tty, port); +- if (!res) ++ if (res) + return res; + + /* Request CTS line state, sometimes during opening the current +diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c +index 7e3e0782e51f..ff83d87ed921 100644 +--- a/drivers/usb/serial/oti6858.c ++++ b/drivers/usb/serial/oti6858.c +@@ -135,6 +135,7 @@ static int oti6858_tiocmget(struct tty_struct *tty); + static int oti6858_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear); + static int oti6858_tiocmiwait(struct tty_struct *tty, unsigned long arg); ++static int oti6858_attach(struct usb_serial *serial); + static int oti6858_port_probe(struct usb_serial_port *port); + static int oti6858_port_remove(struct usb_serial_port *port); + +@@ -159,6 +160,7 @@ static struct usb_serial_driver oti6858_device = { + .write_bulk_callback = oti6858_write_bulk_callback, + .write_room = oti6858_write_room, + .chars_in_buffer = oti6858_chars_in_buffer, ++ .attach = oti6858_attach, + .port_probe = oti6858_port_probe, + .port_remove = oti6858_port_remove, + }; +@@ -328,6 +330,20 @@ static void send_data(struct work_struct *work) + usb_serial_port_softint(port); + } + ++static int oti6858_attach(struct usb_serial *serial) ++{ ++ unsigned char num_ports = serial->num_ports; ++ ++ if (serial->num_bulk_in < num_ports || ++ serial->num_bulk_out < num_ports || ++ serial->num_interrupt_in < num_ports) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + static int oti6858_port_probe(struct usb_serial_port *port) + { + struct oti6858_private *priv; +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c +index 33313caed504..f496c38d5395 100644 +--- a/drivers/usb/serial/pl2303.c ++++ b/drivers/usb/serial/pl2303.c +@@ -175,9 +175,17 @@ static int pl2303_vendor_write(__u16 value, __u16 index, + static int pl2303_startup(struct usb_serial *serial) + { + struct pl2303_serial_private *spriv; ++ unsigned char num_ports = serial->num_ports; + enum pl2303_type type = type_0; + unsigned char *buf; + ++ if (serial->num_bulk_in < num_ports || ++ serial->num_bulk_out < num_ports || ++ serial->num_interrupt_in < num_ports) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ + spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); + if (!spriv) + return -ENOMEM; +diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c +index 13824b5ca343..ecd0a84ffc0d 100644 +--- a/drivers/usb/serial/quatech2.c ++++ b/drivers/usb/serial/quatech2.c +@@ -408,16 +408,12 @@ static void qt2_close(struct usb_serial_port *port) + { + struct usb_serial *serial; + struct qt2_port_private *port_priv; +- unsigned long flags; + int i; + + serial = port->serial; + port_priv = usb_get_serial_port_data(port); + +- spin_lock_irqsave(&port_priv->urb_lock, flags); + usb_kill_urb(port_priv->write_urb); +- port_priv->urb_in_use = false; +- spin_unlock_irqrestore(&port_priv->urb_lock, flags); + + /* flush the port transmit buffer */ + i = usb_control_msg(serial->dev, +diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c +index 21cd7bf2a8cc..8e24f8ff2fc6 100644 +--- a/drivers/usb/serial/safe_serial.c ++++ b/drivers/usb/serial/safe_serial.c +@@ -215,6 +215,11 @@ static void safe_process_read_urb(struct urb *urb) + if (!safe) + goto out; + ++ if (length < 2) { ++ dev_err(&port->dev, "malformed packet\n"); ++ return; ++ } ++ + fcs = fcs_compute10(data, length, CRC10_INITFCS); + if (fcs) { + dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs); +diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c +index 1694d4ff1639..595a3f0b021e 100644 +--- a/drivers/usb/serial/spcp8x5.c ++++ b/drivers/usb/serial/spcp8x5.c +@@ -155,6 +155,19 @@ static int spcp8x5_probe(struct usb_serial *serial, + return 0; + } + ++static int spcp8x5_attach(struct usb_serial *serial) ++{ ++ unsigned char num_ports = serial->num_ports; ++ ++ if (serial->num_bulk_in < num_ports || ++ serial->num_bulk_out < num_ports) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + static int spcp8x5_port_probe(struct usb_serial_port *port) + { + const struct usb_device_id *id = usb_get_serial_data(port->serial); +@@ -218,11 +231,17 @@ static int spcp8x5_get_msr(struct usb_serial_port *port, u8 *status) + ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), + GET_UART_STATUS, GET_UART_STATUS_TYPE, + 0, GET_UART_STATUS_MSR, buf, 1, 100); +- if (ret < 0) ++ if (ret < 1) { + dev_err(&port->dev, "failed to get modem status: %d", ret); ++ if (ret >= 0) ++ ret = -EIO; ++ goto out; ++ } + + dev_dbg(&port->dev, "0xc0:0x22:0:6 %d - 0x02%x", ret, *buf); + *status = *buf; ++ ret = 0; ++out: + kfree(buf); + + return ret; +@@ -479,6 +498,7 @@ static struct usb_serial_driver spcp8x5_device = { + .tiocmget = spcp8x5_tiocmget, + .tiocmset = spcp8x5_tiocmset, + .probe = spcp8x5_probe, ++ .attach = spcp8x5_attach, + .port_probe = spcp8x5_port_probe, + .port_remove = spcp8x5_port_remove, + }; +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c +index 0a7c68fa5e5e..1ccf221d842b 100644 +--- a/drivers/usb/serial/ti_usb_3410_5052.c ++++ b/drivers/usb/serial/ti_usb_3410_5052.c +@@ -401,6 +401,13 @@ static int ti_startup(struct usb_serial *serial) + goto free_tdev; + } + ++ if (serial->num_bulk_in < serial->num_ports || ++ serial->num_bulk_out < serial->num_ports) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ status = -ENODEV; ++ goto free_tdev; ++ } ++ + return 0; + + free_tdev: +diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c +index a09b65ebd9bb..2bb0fd3f3423 100644 +--- a/drivers/usb/wusbcore/wa-hc.c ++++ b/drivers/usb/wusbcore/wa-hc.c +@@ -38,6 +38,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface) + int result; + struct device *dev = &iface->dev; + ++ if (iface->cur_altsetting->desc.bNumEndpoints < 3) ++ return -ENODEV; ++ + result = wa_rpipes_create(wa); + if (result < 0) + goto error_rpipes_create; +diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c +index 810c90ae2c55..cd8bf69aa691 100644 +--- a/drivers/uwb/hwa-rc.c ++++ b/drivers/uwb/hwa-rc.c +@@ -811,6 +811,9 @@ static int hwarc_probe(struct usb_interface *iface, + struct hwarc *hwarc; + struct device *dev = &iface->dev; + ++ if (iface->cur_altsetting->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + result = -ENOMEM; + uwb_rc = uwb_rc_alloc(); + if (uwb_rc == NULL) { +diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c +index 2bfc846ac071..6345e85822a4 100644 +--- a/drivers/uwb/i1480/dfu/usb.c ++++ b/drivers/uwb/i1480/dfu/usb.c +@@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) + result); + } + ++ if (iface->cur_altsetting->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + result = -ENOMEM; + i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); + if (i1480_usb == NULL) { +diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c +index 5c3960da755a..71666c02dea8 100644 +--- a/drivers/video/fbcmap.c ++++ b/drivers/video/fbcmap.c +@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap) + + int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) + { +- int tooff = 0, fromoff = 0; +- int size; ++ unsigned int tooff = 0, fromoff = 0; ++ size_t size; + + if (to->start > from->start) + fromoff = to->start - from->start; + else + tooff = from->start - to->start; +- size = to->len - tooff; +- if (size > (int) (from->len - fromoff)) +- size = from->len - fromoff; +- if (size <= 0) ++ if (fromoff >= from->len || tooff >= to->len) ++ return -EINVAL; ++ ++ size = min_t(size_t, to->len - tooff, from->len - fromoff); ++ if (size == 0) + return -EINVAL; + size *= sizeof(u16); + +@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) + + int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) + { +- int tooff = 0, fromoff = 0; +- int size; ++ unsigned int tooff = 0, fromoff = 0; ++ size_t size; + + if (to->start > from->start) + fromoff = to->start - from->start; + else + tooff = from->start - to->start; +- size = to->len - tooff; +- if (size > (int) (from->len - fromoff)) +- size = from->len - fromoff; +- if (size <= 0) ++ if (fromoff >= from->len || tooff >= to->len) ++ return -EINVAL; ++ ++ size = min_t(size_t, to->len - tooff, from->len - fromoff); ++ if (size == 0) + return -EINVAL; + size *= sizeof(u16); + +diff --git a/fs/9p/acl.c b/fs/9p/acl.c +index 7af425f53bee..9686c1f17653 100644 +--- a/fs/9p/acl.c ++++ b/fs/9p/acl.c +@@ -320,32 +320,26 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name, + case ACL_TYPE_ACCESS: + name = POSIX_ACL_XATTR_ACCESS; + if (acl) { +- umode_t mode = inode->i_mode; +- retval = posix_acl_equiv_mode(acl, &mode); +- if (retval < 0) ++ struct iattr iattr; ++ ++ retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl); ++ if (retval) + goto err_out; +- else { +- struct iattr iattr; +- if (retval == 0) { +- /* +- * ACL can be represented +- * by the mode bits. So don't +- * update ACL. +- */ +- acl = NULL; +- value = NULL; +- size = 0; +- } +- /* Updte the mode bits */ +- iattr.ia_mode = ((mode & S_IALLUGO) | +- (inode->i_mode & ~S_IALLUGO)); +- iattr.ia_valid = ATTR_MODE; +- /* FIXME should we update ctime ? +- * What is the following setxattr update the +- * mode ? ++ if (!acl) { ++ /* ++ * ACL can be represented ++ * by the mode bits. So don't ++ * update ACL. + */ +- v9fs_vfs_setattr_dotl(dentry, &iattr); ++ value = NULL; ++ size = 0; + } ++ iattr.ia_valid = ATTR_MODE; ++ /* FIXME should we update ctime ? ++ * What is the following setxattr update the ++ * mode ? ++ */ ++ v9fs_vfs_setattr_dotl(dentry, &iattr); + } + break; + case ACL_TYPE_DEFAULT: +diff --git a/fs/block_dev.c b/fs/block_dev.c +index 85f5c85ec91c..8f0267e81e8d 100644 +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -660,7 +660,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, + else if (bdev->bd_holder != NULL){ + return false; /* held by someone else */ + } +- else if (bdev->bd_contains == bdev) ++ else if (whole == bdev) + return true; /* is a whole device which isn't held */ + + else if (whole->bd_holder == bd_may_claim) +@@ -1692,6 +1692,7 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) + spin_lock(&inode_sb_list_lock); + list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { + struct address_space *mapping = inode->i_mapping; ++ struct block_device *bdev; + + spin_lock(&inode->i_lock); + if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || +@@ -1712,8 +1713,12 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) + */ + iput(old_inode); + old_inode = inode; ++ bdev = I_BDEV(inode); + +- func(I_BDEV(inode), arg); ++ mutex_lock(&bdev->bd_mutex); ++ if (bdev->bd_openers) ++ func(bdev, arg); ++ mutex_unlock(&bdev->bd_mutex); + + spin_lock(&inode_sb_list_lock); + } +diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c +index 0890c83643e9..d6d53e5e7945 100644 +--- a/fs/btrfs/acl.c ++++ b/fs/btrfs/acl.c +@@ -118,11 +118,9 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans, + case ACL_TYPE_ACCESS: + name = POSIX_ACL_XATTR_ACCESS; + if (acl) { +- ret = posix_acl_equiv_mode(acl, &inode->i_mode); +- if (ret < 0) ++ ret = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ if (ret) + return ret; +- if (ret == 0) +- acl = NULL; + } + ret = 0; + break; +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 7d3331cbccba..681782d00b1f 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -1691,12 +1691,11 @@ static noinline int find_dir_range(struct btrfs_root *root, + next: + /* check the next slot in the tree to see if it is a valid item */ + nritems = btrfs_header_nritems(path->nodes[0]); ++ path->slots[0]++; + if (path->slots[0] >= nritems) { + ret = btrfs_next_leaf(root, path); + if (ret) + goto out; +- } else { +- path->slots[0]++; + } + + btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); +diff --git a/fs/exec.c b/fs/exec.c +index acbd7ac2deda..c945a555eb25 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -19,7 +19,7 @@ + * current->executable is only used by the procfs. This allows a dispatch + * table to check for several different types of binary formats. We keep + * trying until we recognize the file or we run out of supported binary +- * formats. ++ * formats. + */ + + #include +@@ -1091,6 +1091,13 @@ int flush_old_exec(struct linux_binprm * bprm) + flush_thread(); + current->personality &= ~bprm->per_clear; + ++ /* ++ * We have to apply CLOEXEC before we change whether the process is ++ * dumpable (in setup_new_exec) to avoid a race with a process in userspace ++ * trying to access the should-be-closed file descriptors of a process ++ * undergoing exec(2). ++ */ ++ do_close_on_exec(current->files); + return 0; + + out: +@@ -1141,7 +1148,6 @@ void setup_new_exec(struct linux_binprm * bprm) + current->self_exec_id++; + + flush_signal_handlers(current, 0); +- do_close_on_exec(current->files); + } + EXPORT_SYMBOL(setup_new_exec); + +diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c +index 110b6b371a4e..48c3c2d7d261 100644 +--- a/fs/ext2/acl.c ++++ b/fs/ext2/acl.c +@@ -206,15 +206,11 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl) + case ACL_TYPE_ACCESS: + name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; + if (acl) { +- error = posix_acl_equiv_mode(acl, &inode->i_mode); +- if (error < 0) ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ if (error) + return error; +- else { +- inode->i_ctime = CURRENT_TIME_SEC; +- mark_inode_dirty(inode); +- if (error == 0) +- acl = NULL; +- } ++ inode->i_ctime = CURRENT_TIME_SEC; ++ mark_inode_dirty(inode); + } + break; + +diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c +index dbb5ad59a7fc..2f994bbf73a7 100644 +--- a/fs/ext3/acl.c ++++ b/fs/ext3/acl.c +@@ -205,15 +205,11 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type, + case ACL_TYPE_ACCESS: + name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS; + if (acl) { +- error = posix_acl_equiv_mode(acl, &inode->i_mode); ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error < 0) + return error; +- else { +- inode->i_ctime = CURRENT_TIME_SEC; +- ext3_mark_inode_dirty(handle, inode); +- if (error == 0) +- acl = NULL; +- } ++ inode->i_ctime = CURRENT_TIME_SEC; ++ ext3_mark_inode_dirty(handle, inode); + } + break; + +diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c +index 39a54a0e9fe4..c844f1bfb451 100644 +--- a/fs/ext4/acl.c ++++ b/fs/ext4/acl.c +@@ -211,15 +211,11 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type, + case ACL_TYPE_ACCESS: + name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS; + if (acl) { +- error = posix_acl_equiv_mode(acl, &inode->i_mode); +- if (error < 0) ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ if (error) + return error; +- else { +- inode->i_ctime = ext4_current_time(inode); +- ext4_mark_inode_dirty(handle, inode); +- if (error == 0) +- acl = NULL; +- } ++ inode->i_ctime = ext4_current_time(inode); ++ ext4_mark_inode_dirty(handle, inode); + } + break; + +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index e350be6c7ac6..55af0d98d968 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -339,8 +339,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode, + + len -= EXT4_MIN_INLINE_DATA_SIZE; + value = kzalloc(len, GFP_NOFS); +- if (!value) ++ if (!value) { ++ error = -ENOMEM; + goto out; ++ } + + error = ext4_xattr_ibody_get(inode, i.name_index, i.name, + value, len); +@@ -1145,10 +1147,9 @@ static int ext4_finish_convert_inline_dir(handle_t *handle, + set_buffer_uptodate(dir_block); + err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); + if (err) +- goto out; ++ return err; + set_buffer_verified(dir_block); +-out: +- return err; ++ return ext4_mark_inode_dirty(handle, inode); + } + + static int ext4_convert_inline_data_nolock(handle_t *handle, +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 31179ba2072c..5fb975495c2d 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -759,6 +759,20 @@ has_zeroout: + int ret = check_block_validity(inode, map); + if (ret != 0) + return ret; ++ ++ /* ++ * Inodes with freshly allocated blocks where contents will be ++ * visible after transaction commit must be on transaction's ++ * ordered data list. ++ */ ++ if (map->m_flags & EXT4_MAP_NEW && ++ !(map->m_flags & EXT4_MAP_UNWRITTEN) && ++ !IS_NOQUOTA(inode) && ++ ext4_should_order_data(inode)) { ++ ret = ext4_jbd2_file_inode(handle, inode); ++ if (ret) ++ return ret; ++ } + } + return retval; + } +@@ -1119,15 +1133,6 @@ static int ext4_write_end(struct file *file, + int i_size_changed = 0; + + trace_ext4_write_end(inode, pos, len, copied); +- if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) { +- ret = ext4_jbd2_file_inode(handle, inode); +- if (ret) { +- unlock_page(page); +- page_cache_release(page); +- goto errout; +- } +- } +- + if (ext4_has_inline_data(inode)) { + ret = ext4_write_inline_data_end(inode, pos, len, + copied, page); +@@ -4178,6 +4183,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) + struct inode *inode; + journal_t *journal = EXT4_SB(sb)->s_journal; + long ret; ++ loff_t size; + int block; + uid_t i_uid; + gid_t i_gid; +@@ -4270,6 +4276,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) + ei->i_file_acl |= + ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; + inode->i_size = ext4_isize(raw_inode); ++ if ((size = i_size_read(inode)) < 0) { ++ EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size); ++ ret = -EIO; ++ goto bad_inode; ++ } + ei->i_disksize = inode->i_size; + #ifdef CONFIG_QUOTA + ei->i_reserved_quota = 0; +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index cb9eec025ba8..83ed61a6cfcb 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -668,7 +668,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb, + ext4_grpblk_t min; + ext4_grpblk_t max; + ext4_grpblk_t chunk; +- unsigned short border; ++ unsigned int border; + + BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); + +@@ -2222,7 +2222,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) + struct ext4_group_info *grinfo; + struct sg { + struct ext4_group_info info; +- ext4_grpblk_t counters[16]; ++ ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; + } sg; + + group--; +diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c +index 44abc2f286e0..9c4f3c732bce 100644 +--- a/fs/f2fs/acl.c ++++ b/fs/f2fs/acl.c +@@ -223,12 +223,10 @@ static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl) + case ACL_TYPE_ACCESS: + name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; + if (acl) { +- error = posix_acl_equiv_mode(acl, &inode->i_mode); +- if (error < 0) ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ if (error) + return error; + set_acl_inode(fi, inode->i_mode); +- if (error == 0) +- acl = NULL; + } + break; + +diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c +index 8d9943786c31..a73dddedc803 100644 +--- a/fs/f2fs/debug.c ++++ b/fs/f2fs/debug.c +@@ -294,6 +294,7 @@ static int stat_open(struct inode *inode, struct file *file) + } + + static const struct file_operations stat_fops = { ++ .owner = THIS_MODULE, + .open = stat_open, + .read = seq_read, + .llseek = seq_lseek, +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 35f604b5f408..1dce93041012 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -128,6 +128,7 @@ static void fuse_file_put(struct fuse_file *ff, bool sync) + struct fuse_req *req = ff->reserved_req; + + if (sync) { ++ req->force = 1; + req->background = 0; + fuse_request_send(ff->fc, req); + path_put(&req->misc.release.path); +@@ -2398,6 +2399,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, + loff_t i_size; + size_t count = iov_length(iov, nr_segs); + struct fuse_io_priv *io; ++ bool is_sync = is_sync_kiocb(iocb); + + pos = offset; + inode = file->f_mapping->host; +@@ -2433,7 +2435,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, + * to wait on real async I/O requests, so we must submit this request + * synchronously. + */ +- if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE) ++ if (!is_sync && (offset + count > i_size) && rw == WRITE) + io->async = false; + + if (rw == WRITE) +@@ -2445,7 +2447,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, + fuse_aio_complete(io, ret < 0 ? ret : 0, -1); + + /* we have a non-extending, async request, so return */ +- if (!is_sync_kiocb(iocb)) ++ if (!is_sync) + return -EIOCBQUEUED; + + ret = wait_on_sync_kiocb(iocb); +diff --git a/fs/generic_acl.c b/fs/generic_acl.c +index b3f3676796d3..7855cfb938f6 100644 +--- a/fs/generic_acl.c ++++ b/fs/generic_acl.c +@@ -82,19 +82,21 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value, + return PTR_ERR(acl); + } + if (acl) { ++ struct posix_acl *old_acl; ++ + error = posix_acl_valid(acl); + if (error) + goto failed; + switch (type) { + case ACL_TYPE_ACCESS: +- error = posix_acl_equiv_mode(acl, &inode->i_mode); ++ old_acl = acl; ++ error = posix_acl_update_mode(inode, &inode->i_mode, ++ &acl); + if (error < 0) + goto failed; ++ if (!acl) ++ posix_acl_release(old_acl); + inode->i_ctime = CURRENT_TIME; +- if (error == 0) { +- posix_acl_release(acl); +- acl = NULL; +- } + break; + case ACL_TYPE_DEFAULT: + if (!S_ISDIR(inode->i_mode)) { +diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c +index f69ac0af5496..a61b0c2b57ab 100644 +--- a/fs/gfs2/acl.c ++++ b/fs/gfs2/acl.c +@@ -268,15 +268,13 @@ static int gfs2_xattr_system_set(struct dentry *dentry, const char *name, + + if (type == ACL_TYPE_ACCESS) { + umode_t mode = inode->i_mode; +- error = posix_acl_equiv_mode(acl, &mode); ++ struct posix_acl *old_acl = acl; + +- if (error <= 0) { +- posix_acl_release(acl); +- acl = NULL; +- +- if (error < 0) +- return error; +- } ++ error = posix_acl_update_mode(inode, &mode, &acl); ++ if (error < 0) ++ goto out_release; ++ if (!acl) ++ posix_acl_release(old_acl); + + error = gfs2_set_mode(inode, mode); + if (error) +diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c +index 223283c30111..9335b8d3cf52 100644 +--- a/fs/jffs2/acl.c ++++ b/fs/jffs2/acl.c +@@ -243,9 +243,10 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl) + case ACL_TYPE_ACCESS: + xprefix = JFFS2_XPREFIX_ACL_ACCESS; + if (acl) { +- umode_t mode = inode->i_mode; +- rc = posix_acl_equiv_mode(acl, &mode); +- if (rc < 0) ++ umode_t mode; ++ ++ rc = posix_acl_update_mode(inode, &mode, &acl); ++ if (rc) + return rc; + if (inode->i_mode != mode) { + struct iattr attr; +@@ -257,8 +258,6 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl) + if (rc < 0) + return rc; + } +- if (rc == 0) +- acl = NULL; + } + break; + case ACL_TYPE_DEFAULT: +diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c +index 42d67f9757bf..29a28601cb93 100644 +--- a/fs/jfs/xattr.c ++++ b/fs/jfs/xattr.c +@@ -693,8 +693,9 @@ static int can_set_system_xattr(struct inode *inode, const char *name, + return rc; + } + if (acl) { +- rc = posix_acl_equiv_mode(acl, &inode->i_mode); +- posix_acl_release(acl); ++ struct posix_acl *old_acl = acl; ++ rc = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ posix_acl_release(old_acl); + if (rc < 0) { + printk(KERN_ERR + "posix_acl_equiv_mode returned %d\n", +diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c +index 678cb8964532..b906d8e55d83 100644 +--- a/fs/nfs/nfs4filelayoutdev.c ++++ b/fs/nfs/nfs4filelayoutdev.c +@@ -821,7 +821,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) + nfs4_wait_ds_connect(ds); + } + out_test_devid: +- if (filelayout_test_devid_unavailable(devid)) ++ if (ret->ds_clp == NULL || ++ filelayout_test_devid_unavailable(devid)) + ret = NULL; + out: + return ret; +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c +index 988efb4caac0..f5d27ca10146 100644 +--- a/fs/nfs/nfs4xdr.c ++++ b/fs/nfs/nfs4xdr.c +@@ -2435,7 +2435,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); +- replen = hdr.replen + op_decode_hdr_maxsz + 1; ++ replen = hdr.replen + op_decode_hdr_maxsz; + encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr); + + xdr_inline_pages(&req->rq_rcv_buf, replen << 2, +diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c +index 8a404576fb26..51ff9506cb0f 100644 +--- a/fs/ocfs2/acl.c ++++ b/fs/ocfs2/acl.c +@@ -274,20 +274,14 @@ static int ocfs2_set_acl(handle_t *handle, + case ACL_TYPE_ACCESS: + name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; + if (acl) { +- umode_t mode = inode->i_mode; +- ret = posix_acl_equiv_mode(acl, &mode); +- if (ret < 0) ++ umode_t mode; ++ ret = posix_acl_update_mode(inode, &mode, &acl); ++ if (ret) ++ return ret; ++ ret = ocfs2_acl_set_mode(inode, di_bh, ++ handle, mode); ++ if (ret) + return ret; +- else { +- if (ret == 0) +- acl = NULL; +- +- ret = ocfs2_acl_set_mode(inode, di_bh, +- handle, mode); +- if (ret) +- return ret; +- +- } + } + break; + case ACL_TYPE_DEFAULT: +diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c +index 0c60ef2d8056..b9d16098ede3 100644 +--- a/fs/ocfs2/ioctl.c ++++ b/fs/ocfs2/ioctl.c +@@ -34,9 +34,8 @@ + copy_to_user((typeof(a) __user *)b, &(a), sizeof(a)) + + /* +- * This call is void because we are already reporting an error that may +- * be -EFAULT. The error will be returned from the ioctl(2) call. It's +- * just a best-effort to tell userspace that this request caused the error. ++ * This is just a best-effort to tell userspace that this request ++ * caused the error. + */ + static inline void o2info_set_request_error(struct ocfs2_info_request *kreq, + struct ocfs2_info_request __user *req) +@@ -145,136 +144,105 @@ bail: + int ocfs2_info_handle_blocksize(struct inode *inode, + struct ocfs2_info_request __user *req) + { +- int status = -EFAULT; + struct ocfs2_info_blocksize oib; + + if (o2info_from_user(oib, req)) +- goto bail; ++ return -EFAULT; + + oib.ib_blocksize = inode->i_sb->s_blocksize; + + o2info_set_request_filled(&oib.ib_req); + + if (o2info_to_user(oib, req)) +- goto bail; +- +- status = 0; +-bail: +- if (status) +- o2info_set_request_error(&oib.ib_req, req); ++ return -EFAULT; + +- return status; ++ return 0; + } + + int ocfs2_info_handle_clustersize(struct inode *inode, + struct ocfs2_info_request __user *req) + { +- int status = -EFAULT; + struct ocfs2_info_clustersize oic; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (o2info_from_user(oic, req)) +- goto bail; ++ return -EFAULT; + + oic.ic_clustersize = osb->s_clustersize; + + o2info_set_request_filled(&oic.ic_req); + + if (o2info_to_user(oic, req)) +- goto bail; +- +- status = 0; +-bail: +- if (status) +- o2info_set_request_error(&oic.ic_req, req); ++ return -EFAULT; + +- return status; ++ return 0; + } + + int ocfs2_info_handle_maxslots(struct inode *inode, + struct ocfs2_info_request __user *req) + { +- int status = -EFAULT; + struct ocfs2_info_maxslots oim; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (o2info_from_user(oim, req)) +- goto bail; ++ return -EFAULT; + + oim.im_max_slots = osb->max_slots; + + o2info_set_request_filled(&oim.im_req); + + if (o2info_to_user(oim, req)) +- goto bail; ++ return -EFAULT; + +- status = 0; +-bail: +- if (status) +- o2info_set_request_error(&oim.im_req, req); +- +- return status; ++ return 0; + } + + int ocfs2_info_handle_label(struct inode *inode, + struct ocfs2_info_request __user *req) + { +- int status = -EFAULT; + struct ocfs2_info_label oil; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (o2info_from_user(oil, req)) +- goto bail; ++ return -EFAULT; + + memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN); + + o2info_set_request_filled(&oil.il_req); + + if (o2info_to_user(oil, req)) +- goto bail; ++ return -EFAULT; + +- status = 0; +-bail: +- if (status) +- o2info_set_request_error(&oil.il_req, req); +- +- return status; ++ return 0; + } + + int ocfs2_info_handle_uuid(struct inode *inode, + struct ocfs2_info_request __user *req) + { +- int status = -EFAULT; + struct ocfs2_info_uuid oiu; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (o2info_from_user(oiu, req)) +- goto bail; ++ return -EFAULT; + + memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1); + + o2info_set_request_filled(&oiu.iu_req); + + if (o2info_to_user(oiu, req)) +- goto bail; +- +- status = 0; +-bail: +- if (status) +- o2info_set_request_error(&oiu.iu_req, req); ++ return -EFAULT; + +- return status; ++ return 0; + } + + int ocfs2_info_handle_fs_features(struct inode *inode, + struct ocfs2_info_request __user *req) + { +- int status = -EFAULT; + struct ocfs2_info_fs_features oif; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (o2info_from_user(oif, req)) +- goto bail; ++ return -EFAULT; + + oif.if_compat_features = osb->s_feature_compat; + oif.if_incompat_features = osb->s_feature_incompat; +@@ -283,39 +251,28 @@ int ocfs2_info_handle_fs_features(struct inode *inode, + o2info_set_request_filled(&oif.if_req); + + if (o2info_to_user(oif, req)) +- goto bail; ++ return -EFAULT; + +- status = 0; +-bail: +- if (status) +- o2info_set_request_error(&oif.if_req, req); +- +- return status; ++ return 0; + } + + int ocfs2_info_handle_journal_size(struct inode *inode, + struct ocfs2_info_request __user *req) + { +- int status = -EFAULT; + struct ocfs2_info_journal_size oij; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (o2info_from_user(oij, req)) +- goto bail; ++ return -EFAULT; + + oij.ij_journal_size = osb->journal->j_inode->i_size; + + o2info_set_request_filled(&oij.ij_req); + + if (o2info_to_user(oij, req)) +- goto bail; ++ return -EFAULT; + +- status = 0; +-bail: +- if (status) +- o2info_set_request_error(&oij.ij_req, req); +- +- return status; ++ return 0; + } + + int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb, +@@ -371,7 +328,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode, + u32 i; + u64 blkno = -1; + char namebuf[40]; +- int status = -EFAULT, type = INODE_ALLOC_SYSTEM_INODE; ++ int status, type = INODE_ALLOC_SYSTEM_INODE; + struct ocfs2_info_freeinode *oifi = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct inode *inode_alloc = NULL; +@@ -383,8 +340,10 @@ int ocfs2_info_handle_freeinode(struct inode *inode, + goto out_err; + } + +- if (o2info_from_user(*oifi, req)) +- goto bail; ++ if (o2info_from_user(*oifi, req)) { ++ status = -EFAULT; ++ goto out_free; ++ } + + oifi->ifi_slotnum = osb->max_slots; + +@@ -421,14 +380,16 @@ int ocfs2_info_handle_freeinode(struct inode *inode, + + o2info_set_request_filled(&oifi->ifi_req); + +- if (o2info_to_user(*oifi, req)) +- goto bail; ++ if (o2info_to_user(*oifi, req)) { ++ status = -EFAULT; ++ goto out_free; ++ } + + status = 0; + bail: + if (status) + o2info_set_request_error(&oifi->ifi_req, req); +- ++out_free: + kfree(oifi); + out_err: + return status; +@@ -655,7 +616,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode, + { + u64 blkno = -1; + char namebuf[40]; +- int status = -EFAULT, type = GLOBAL_BITMAP_SYSTEM_INODE; ++ int status, type = GLOBAL_BITMAP_SYSTEM_INODE; + + struct ocfs2_info_freefrag *oiff; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); +@@ -668,8 +629,10 @@ int ocfs2_info_handle_freefrag(struct inode *inode, + goto out_err; + } + +- if (o2info_from_user(*oiff, req)) +- goto bail; ++ if (o2info_from_user(*oiff, req)) { ++ status = -EFAULT; ++ goto out_free; ++ } + /* + * chunksize from userspace should be power of 2. + */ +@@ -708,14 +671,14 @@ int ocfs2_info_handle_freefrag(struct inode *inode, + + if (o2info_to_user(*oiff, req)) { + status = -EFAULT; +- goto bail; ++ goto out_free; + } + + status = 0; + bail: + if (status) + o2info_set_request_error(&oiff->iff_req, req); +- ++out_free: + kfree(oiff); + out_err: + return status; +@@ -724,23 +687,17 @@ out_err: + int ocfs2_info_handle_unknown(struct inode *inode, + struct ocfs2_info_request __user *req) + { +- int status = -EFAULT; + struct ocfs2_info_request oir; + + if (o2info_from_user(oir, req)) +- goto bail; ++ return -EFAULT; + + o2info_clear_request_filled(&oir); + + if (o2info_to_user(oir, req)) +- goto bail; ++ return -EFAULT; + +- status = 0; +-bail: +- if (status) +- o2info_set_request_error(&oir, req); +- +- return status; ++ return 0; + } + + /* +diff --git a/fs/posix_acl.c b/fs/posix_acl.c +index 3542f1f814e2..1da000aabb08 100644 +--- a/fs/posix_acl.c ++++ b/fs/posix_acl.c +@@ -407,6 +407,37 @@ posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p) + } + EXPORT_SYMBOL(posix_acl_create); + ++/** ++ * posix_acl_update_mode - update mode in set_acl ++ * ++ * Update the file mode when setting an ACL: compute the new file permission ++ * bits based on the ACL. In addition, if the ACL is equivalent to the new ++ * file mode, set *acl to NULL to indicate that no ACL should be set. ++ * ++ * As with chmod, clear the setgit bit if the caller is not in the owning group ++ * or capable of CAP_FSETID (see inode_change_ok). ++ * ++ * Called from set_acl inode operations. ++ */ ++int posix_acl_update_mode(struct inode *inode, umode_t *mode_p, ++ struct posix_acl **acl) ++{ ++ umode_t mode = inode->i_mode; ++ int error; ++ ++ error = posix_acl_equiv_mode(*acl, &mode); ++ if (error < 0) ++ return error; ++ if (error == 0) ++ *acl = NULL; ++ if (!in_group_p(inode->i_gid) && ++ !capable_wrt_inode_uidgid(inode, CAP_FSETID)) ++ mode &= ~S_ISGID; ++ *mode_p = mode; ++ return 0; ++} ++EXPORT_SYMBOL(posix_acl_update_mode); ++ + int + posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode) + { +diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c +index 6c8767fdfc6a..2d73589f37d6 100644 +--- a/fs/reiserfs/xattr_acl.c ++++ b/fs/reiserfs/xattr_acl.c +@@ -286,13 +286,9 @@ reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode, + case ACL_TYPE_ACCESS: + name = POSIX_ACL_XATTR_ACCESS; + if (acl) { +- error = posix_acl_equiv_mode(acl, &inode->i_mode); +- if (error < 0) ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ if (error) + return error; +- else { +- if (error == 0) +- acl = NULL; +- } + } + break; + case ACL_TYPE_DEFAULT: +diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c +index 349f31a30f40..fdf2ca1dd771 100644 +--- a/fs/ubifs/tnc.c ++++ b/fs/ubifs/tnc.c +@@ -34,6 +34,11 @@ + #include + #include "ubifs.h" + ++static int try_read_node(const struct ubifs_info *c, void *buf, int type, ++ int len, int lnum, int offs); ++static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, ++ struct ubifs_zbranch *zbr, void *node); ++ + /* + * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. + * @NAME_LESS: name corresponding to the first argument is less than second +@@ -419,7 +424,19 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr, + return 0; + } + +- err = ubifs_tnc_read_node(c, zbr, node); ++ if (c->replaying) { ++ err = fallible_read_node(c, &zbr->key, zbr, node); ++ /* ++ * When the node was not found, return -ENOENT, 0 otherwise. ++ * Negative return codes stay as-is. ++ */ ++ if (err == 0) ++ err = -ENOENT; ++ else if (err == 1) ++ err = 0; ++ } else { ++ err = ubifs_tnc_read_node(c, zbr, node); ++ } + if (err) + return err; + +@@ -2783,7 +2800,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c, + if (nm->name) { + if (err) { + /* Handle collisions */ +- err = resolve_collision(c, key, &znode, &n, nm); ++ if (c->replaying) ++ err = fallible_resolve_collision(c, key, &znode, &n, ++ nm, 0); ++ else ++ err = resolve_collision(c, key, &znode, &n, nm); + dbg_tnc("rc returned %d, znode %p, n %d", + err, znode, n); + if (unlikely(err < 0)) +diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c +index 306d883d89bc..5e9a9a62a45c 100644 +--- a/fs/xfs/xfs_acl.c ++++ b/fs/xfs/xfs_acl.c +@@ -388,16 +388,15 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name, + goto out_release; + + if (type == ACL_TYPE_ACCESS) { +- umode_t mode = inode->i_mode; +- error = posix_acl_equiv_mode(acl, &mode); ++ umode_t mode; ++ struct posix_acl *old_acl = acl; + +- if (error <= 0) { +- posix_acl_release(acl); +- acl = NULL; ++ error = posix_acl_update_mode(inode, &mode, &acl); + +- if (error < 0) +- return error; +- } ++ if (error) ++ goto out_release; ++ if (!acl) ++ posix_acl_release(old_acl); + + error = xfs_set_mode(inode, mode); + if (error) +diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h +index 418d270e1806..e73c19e90e38 100644 +--- a/include/crypto/algapi.h ++++ b/include/crypto/algapi.h +@@ -386,5 +386,21 @@ static inline int crypto_requires_sync(u32 type, u32 mask) + return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; + } + +-#endif /* _CRYPTO_ALGAPI_H */ ++noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); ++ ++/** ++ * crypto_memneq - Compare two areas of memory without leaking ++ * timing information. ++ * ++ * @a: One area of memory ++ * @b: Another area of memory ++ * @size: The size of the area. ++ * ++ * Returns 0 when data is equal, 1 otherwise. ++ */ ++static inline int crypto_memneq(const void *a, const void *b, size_t size) ++{ ++ return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; ++} + ++#endif /* _CRYPTO_ALGAPI_H */ +diff --git a/include/linux/can/core.h b/include/linux/can/core.h +index 78c6c52073ad..6bdc00b6df01 100644 +--- a/include/linux/can/core.h ++++ b/include/linux/can/core.h +@@ -45,10 +45,9 @@ struct can_proto { + extern int can_proto_register(const struct can_proto *cp); + extern void can_proto_unregister(const struct can_proto *cp); + +-extern int can_rx_register(struct net_device *dev, canid_t can_id, +- canid_t mask, +- void (*func)(struct sk_buff *, void *), +- void *data, char *ident); ++int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, ++ void (*func)(struct sk_buff *, void *), ++ void *data, char *ident, struct sock *sk); + + extern void can_rx_unregister(struct net_device *dev, canid_t can_id, + canid_t mask, +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index 9f3c7e81270a..d0d5946b6ee9 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -119,22 +119,16 @@ enum { + { .notifier_call = fn, .priority = pri }; \ + register_cpu_notifier(&fn##_nb); \ + } +-#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ +-#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) +-#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ +-#ifdef CONFIG_HOTPLUG_CPU + extern int register_cpu_notifier(struct notifier_block *nb); + extern void unregister_cpu_notifier(struct notifier_block *nb); +-#else + +-#ifndef MODULE +-extern int register_cpu_notifier(struct notifier_block *nb); +-#else ++#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ ++#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) ++ + static inline int register_cpu_notifier(struct notifier_block *nb) + { + return 0; + } +-#endif + + static inline void unregister_cpu_notifier(struct notifier_block *nb) + { +diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h +index 0976fc46d1e0..7f831b28ee68 100644 +--- a/include/linux/jump_label.h ++++ b/include/linux/jump_label.h +@@ -208,4 +208,20 @@ static inline bool static_key_enabled(struct static_key *key) + return (atomic_read(&key->enabled) > 0); + } + ++static inline void static_key_enable(struct static_key *key) ++{ ++ int count = atomic_read(&key->enabled); ++ ++ if (!count) ++ static_key_slow_inc(key); ++} ++ ++static inline void static_key_disable(struct static_key *key) ++{ ++ int count = atomic_read(&key->enabled); ++ ++ if (count) ++ static_key_slow_dec(key); ++} ++ + #endif /* _LINUX_JUMP_LABEL_H */ +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index 8db53cfaccdb..71bcaf585edd 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -145,7 +145,8 @@ struct kvm_io_range { + #define NR_IOBUS_DEVS 1000 + + struct kvm_io_bus { +- int dev_count; ++ int dev_count; ++ int ioeventfd_count; + struct kvm_io_range range[]; + }; + +@@ -162,8 +163,8 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, + void *val); + int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + int len, struct kvm_io_device *dev); +-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, +- struct kvm_io_device *dev); ++void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, ++ struct kvm_io_device *dev); + + #ifdef CONFIG_KVM_ASYNC_PF + struct kvm_async_pf { +diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h +index 0adf073f13b3..669af5eaa898 100644 +--- a/include/linux/lockd/lockd.h ++++ b/include/linux/lockd/lockd.h +@@ -355,7 +355,8 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) + static inline int nlm_compare_locks(const struct file_lock *fl1, + const struct file_lock *fl2) + { +- return fl1->fl_pid == fl2->fl_pid ++ return file_inode(fl1->fl_file) == file_inode(fl2->fl_file) ++ && fl1->fl_pid == fl2->fl_pid + && fl1->fl_owner == fl2->fl_owner + && fl1->fl_start == fl2->fl_start + && fl1->fl_end == fl2->fl_end +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 45a618b58864..157a47c998e5 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1729,14 +1729,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) + return NAPI_GRO_CB(skb)->frag0_len < hlen; + } + ++static inline void skb_gro_frag0_invalidate(struct sk_buff *skb) ++{ ++ NAPI_GRO_CB(skb)->frag0 = NULL; ++ NAPI_GRO_CB(skb)->frag0_len = 0; ++} ++ + static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, + unsigned int offset) + { + if (!pskb_may_pull(skb, hlen)) + return NULL; + +- NAPI_GRO_CB(skb)->frag0 = NULL; +- NAPI_GRO_CB(skb)->frag0_len = 0; ++ skb_gro_frag0_invalidate(skb); + return skb->data + offset; + } + +diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h +index 7931efe71175..43cb8d59d0a7 100644 +--- a/include/linux/posix_acl.h ++++ b/include/linux/posix_acl.h +@@ -89,6 +89,7 @@ extern int posix_acl_permission(struct inode *, const struct posix_acl *, int); + extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t); + extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *); + extern int posix_acl_create(struct posix_acl **, gfp_t, umode_t *); ++extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **); + extern int posix_acl_chmod(struct posix_acl **, gfp_t, umode_t); + + extern struct posix_acl *get_posix_acl(struct inode *, int); +diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h +index a8c2ef6d3b93..9078b31d336f 100644 +--- a/include/net/cipso_ipv4.h ++++ b/include/net/cipso_ipv4.h +@@ -303,6 +303,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb, + } + + for (opt_iter = 6; opt_iter < opt_len;) { ++ if (opt_iter + 1 == opt_len) { ++ err_offset = opt_iter; ++ goto out; ++ } + tag_len = opt[opt_iter + 1]; + if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) { + err_offset = opt_iter + 1; +diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h +index 8275e539bace..969aff6f657e 100644 +--- a/include/rdma/ib_sa.h ++++ b/include/rdma/ib_sa.h +@@ -137,12 +137,12 @@ struct ib_sa_path_rec { + union ib_gid sgid; + __be16 dlid; + __be16 slid; +- int raw_traffic; ++ u8 raw_traffic; + /* reserved */ + __be32 flow_label; + u8 hop_limit; + u8 traffic_class; +- int reversible; ++ u8 reversible; + u8 numb_path; + __be16 pkey; + __be16 qos_class; +@@ -193,7 +193,7 @@ struct ib_sa_mcmember_rec { + u8 hop_limit; + u8 scope; + u8 join_state; +- int proxy_join; ++ u8 proxy_join; + }; + + /* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */ +diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h +index 5a4c04a75b3d..55c9b99ff9a6 100644 +--- a/include/trace/events/syscalls.h ++++ b/include/trace/events/syscalls.h +@@ -1,5 +1,6 @@ + #undef TRACE_SYSTEM + #define TRACE_SYSTEM raw_syscalls ++#undef TRACE_INCLUDE_FILE + #define TRACE_INCLUDE_FILE syscalls + + #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) +diff --git a/kernel/cpu.c b/kernel/cpu.c +index bc255e25d5dd..a6c242489861 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -185,8 +185,6 @@ static int cpu_notify(unsigned long val, void *v) + return __cpu_notify(val, v, -1, NULL); + } + +-#ifdef CONFIG_HOTPLUG_CPU +- + static void cpu_notify_nofail(unsigned long val, void *v) + { + BUG_ON(cpu_notify(val, v)); +@@ -201,6 +199,7 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb) + } + EXPORT_SYMBOL(unregister_cpu_notifier); + ++#ifdef CONFIG_HOTPLUG_CPU + /** + * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU + * @cpu: a CPU id +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 76e26b8e4e41..5a550f2e37f2 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -7470,7 +7470,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) + ret = inherit_task_group(event, parent, parent_ctx, + child, ctxn, &inherited_all); + if (ret) +- break; ++ goto out_unlock; + } + + /* +@@ -7486,7 +7486,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) + ret = inherit_task_group(event, parent, parent_ctx, + child, ctxn, &inherited_all); + if (ret) +- break; ++ goto out_unlock; + } + + raw_spin_lock_irqsave(&parent_ctx->lock, flags); +@@ -7514,6 +7514,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) + } + + raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); ++out_unlock: + mutex_unlock(&parent_ctx->mutex); + + perf_unpin_context(parent_ctx); +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index 72b0b3e0e065..d34c05ac6f99 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -150,11 +150,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task) + + WARN_ON(!task->ptrace || task->parent != current); + ++ /* ++ * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely. ++ * Recheck state under the lock to close this race. ++ */ + spin_lock_irq(&task->sighand->siglock); +- if (__fatal_signal_pending(task)) +- wake_up_state(task, __TASK_TRACED); +- else +- task->state = TASK_TRACED; ++ if (task->state == __TASK_TRACED) { ++ if (__fatal_signal_pending(task)) ++ wake_up_state(task, __TASK_TRACED); ++ else ++ task->state = TASK_TRACED; ++ } + spin_unlock_irq(&task->sighand->siglock); + } + +diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c +index d9ca207cec0c..286c92f5573c 100644 +--- a/kernel/rtmutex.c ++++ b/kernel/rtmutex.c +@@ -64,8 +64,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) + + static void fixup_rt_mutex_waiters(struct rt_mutex *lock) + { +- if (!rt_mutex_has_waiters(lock)) +- clear_rt_mutex_waiters(lock); ++ unsigned long owner, *p = (unsigned long *) &lock->owner; ++ ++ if (rt_mutex_has_waiters(lock)) ++ return; ++ ++ /* ++ * The rbtree has no waiters enqueued, now make sure that the ++ * lock->owner still has the waiters bit set, otherwise the ++ * following can happen: ++ * ++ * CPU 0 CPU 1 CPU2 ++ * l->owner=T1 ++ * rt_mutex_lock(l) ++ * lock(l->lock) ++ * l->owner = T1 | HAS_WAITERS; ++ * enqueue(T2) ++ * boost() ++ * unlock(l->lock) ++ * block() ++ * ++ * rt_mutex_lock(l) ++ * lock(l->lock) ++ * l->owner = T1 | HAS_WAITERS; ++ * enqueue(T3) ++ * boost() ++ * unlock(l->lock) ++ * block() ++ * signal(->T2) signal(->T3) ++ * lock(l->lock) ++ * dequeue(T2) ++ * deboost() ++ * unlock(l->lock) ++ * lock(l->lock) ++ * dequeue(T3) ++ * ==> wait list is empty ++ * deboost() ++ * unlock(l->lock) ++ * lock(l->lock) ++ * fixup_rt_mutex_waiters() ++ * if (wait_list_empty(l) { ++ * l->owner = owner ++ * owner = l->owner & ~HAS_WAITERS; ++ * ==> l->owner = T1 ++ * } ++ * lock(l->lock) ++ * rt_mutex_unlock(l) fixup_rt_mutex_waiters() ++ * if (wait_list_empty(l) { ++ * owner = l->owner & ~HAS_WAITERS; ++ * cmpxchg(l->owner, T1, NULL) ++ * ===> Success (l->owner = NULL) ++ * ++ * l->owner = owner ++ * ==> l->owner = T1 ++ * } ++ * ++ * With the check for the waiter bit in place T3 on CPU2 will not ++ * overwrite. All tasks fiddling with the waiters bit are ++ * serialized by l->lock, so nothing else can modify the waiters ++ * bit. If the bit is set then nothing can change l->owner either ++ * so the simple RMW is safe. The cmpxchg() will simply fail if it ++ * happens in the middle of the RMW because the waiters bit is ++ * still set. ++ */ ++ owner = ACCESS_ONCE(*p); ++ if (owner & RT_MUTEX_HAS_WAITERS) ++ ACCESS_ONCE(*p) = owner & ~RT_MUTEX_HAS_WAITERS; + } + + /* +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 6a366f9d08db..506e56ec56a9 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -179,14 +179,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { + + static void sched_feat_disable(int i) + { +- if (static_key_enabled(&sched_feat_keys[i])) +- static_key_slow_dec(&sched_feat_keys[i]); ++ static_key_disable(&sched_feat_keys[i]); + } + + static void sched_feat_enable(int i) + { +- if (!static_key_enabled(&sched_feat_keys[i])) +- static_key_slow_inc(&sched_feat_keys[i]); ++ static_key_enable(&sched_feat_keys[i]); + } + #else + static void sched_feat_disable(int i) { }; +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index c4ce3a951a1c..1caa75585fa0 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -3402,11 +3402,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); + int ring_buffer_iter_empty(struct ring_buffer_iter *iter) + { + struct ring_buffer_per_cpu *cpu_buffer; ++ struct buffer_page *reader; ++ struct buffer_page *head_page; ++ struct buffer_page *commit_page; ++ unsigned commit; + + cpu_buffer = iter->cpu_buffer; + +- return iter->head_page == cpu_buffer->commit_page && +- iter->head == rb_commit_index(cpu_buffer); ++ /* Remember, trace recording is off when iterator is in use */ ++ reader = cpu_buffer->reader_page; ++ head_page = cpu_buffer->head_page; ++ commit_page = cpu_buffer->commit_page; ++ commit = rb_page_commit(commit_page); ++ ++ return ((iter->head_page == commit_page && iter->head == commit) || ++ (iter->head_page == reader && commit_page == head_page && ++ head_page->read == commit && ++ iter->head == rb_page_commit(cpu_buffer->reader_page))); + } + EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); + +@@ -4837,9 +4849,9 @@ static __init int test_ringbuffer(void) + rb_data[cpu].cnt = cpu; + rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], + "rbtester/%d", cpu); +- if (WARN_ON(!rb_threads[cpu])) { ++ if (WARN_ON(IS_ERR(rb_threads[cpu]))) { + pr_cont("FAILED\n"); +- ret = -1; ++ ret = PTR_ERR(rb_threads[cpu]); + goto out_free; + } + +@@ -4849,9 +4861,9 @@ static __init int test_ringbuffer(void) + + /* Now create the rb hammer! */ + rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); +- if (WARN_ON(!rb_hammer)) { ++ if (WARN_ON(IS_ERR(rb_hammer))) { + pr_cont("FAILED\n"); +- ret = -1; ++ ret = PTR_ERR(rb_hammer); + goto out_free; + } + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index d6e72522fc4e..edffb6781c0e 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -5468,11 +5468,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash, + return ret; + + out_reg: +- ret = register_ftrace_function_probe(glob, ops, count); ++ ret = alloc_snapshot(&global_trace); ++ if (ret < 0) ++ goto out; + +- if (ret >= 0) +- alloc_snapshot(&global_trace); ++ ret = register_ftrace_function_probe(glob, ops, count); + ++ out: + return ret < 0 ? ret : 0; + } + +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index d21c9ef0943c..3877483a20fd 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -1235,6 +1235,18 @@ out_unlock: + return ret; + } + ++/* ++ * foll_force can write to even unwritable pmd's, but only ++ * after we've gone through a cow cycle and they are dirty. ++ */ ++static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, ++ unsigned int flags) ++{ ++ return pmd_write(pmd) || ++ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && ++ page && PageAnon(page)); ++} ++ + struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, + unsigned long addr, + pmd_t *pmd, +@@ -1245,15 +1257,16 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, + + assert_spin_locked(&mm->page_table_lock); + +- if (flags & FOLL_WRITE && !pmd_write(*pmd)) +- goto out; +- + /* Avoid dumping huge zero page */ + if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) + return ERR_PTR(-EFAULT); + + page = pmd_page(*pmd); + VM_BUG_ON(!PageHead(page)); ++ ++ if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, page, flags)) ++ return NULL; ++ + if (flags & FOLL_TOUCH) { + pmd_t _pmd; + /* +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index e9fd382bf25a..69832290015f 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -1070,23 +1070,32 @@ free: + } + + /* +- * When releasing a hugetlb pool reservation, any surplus pages that were +- * allocated to satisfy the reservation must be explicitly freed if they were +- * never used. +- * Called with hugetlb_lock held. ++ * This routine has two main purposes: ++ * 1) Decrement the reservation count (resv_huge_pages) by the value passed ++ * in unused_resv_pages. This corresponds to the prior adjustments made ++ * to the associated reservation map. ++ * 2) Free any unused surplus pages that may have been allocated to satisfy ++ * the reservation. As many as unused_resv_pages may be freed. ++ * ++ * Called with hugetlb_lock held. However, the lock could be dropped (and ++ * reacquired) during calls to cond_resched_lock. Whenever dropping the lock, ++ * we must make sure nobody else can claim pages we are in the process of ++ * freeing. Do this by ensuring resv_huge_page always is greater than the ++ * number of huge pages we plan to free when dropping the lock. + */ + static void return_unused_surplus_pages(struct hstate *h, + unsigned long unused_resv_pages) + { + unsigned long nr_pages; + +- /* Uncommit the reservation */ +- h->resv_huge_pages -= unused_resv_pages; +- + /* Cannot return gigantic pages currently */ + if (h->order >= MAX_ORDER) +- return; ++ goto out; + ++ /* ++ * Part (or even all) of the reservation could have been backed ++ * by pre-allocated pages. Only free surplus pages. ++ */ + nr_pages = min(unused_resv_pages, h->surplus_huge_pages); + + /* +@@ -1096,12 +1105,22 @@ static void return_unused_surplus_pages(struct hstate *h, + * when the nodes with surplus pages have no free pages. + * free_pool_huge_page() will balance the the freed pages across the + * on-line nodes with memory and will handle the hstate accounting. ++ * ++ * Note that we decrement resv_huge_pages as we free the pages. If ++ * we drop the lock, resv_huge_pages will still be sufficiently large ++ * to cover subsequent pages we may free. + */ + while (nr_pages--) { ++ h->resv_huge_pages--; ++ unused_resv_pages--; + if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) +- break; ++ goto out; + cond_resched_lock(&hugetlb_lock); + } ++ ++out: ++ /* Fully uncommit the reservation */ ++ h->resv_huge_pages -= unused_resv_pages; + } + + /* +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 494a081ec5e4..4e8927539299 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -5060,15 +5060,18 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) + sizeof(arch_zone_lowest_possible_pfn)); + memset(arch_zone_highest_possible_pfn, 0, + sizeof(arch_zone_highest_possible_pfn)); +- arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); +- arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; +- for (i = 1; i < MAX_NR_ZONES; i++) { ++ ++ start_pfn = find_min_pfn_with_active_regions(); ++ ++ for (i = 0; i < MAX_NR_ZONES; i++) { + if (i == ZONE_MOVABLE) + continue; +- arch_zone_lowest_possible_pfn[i] = +- arch_zone_highest_possible_pfn[i-1]; +- arch_zone_highest_possible_pfn[i] = +- max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); ++ ++ end_pfn = max(max_zone_pfn[i], start_pfn); ++ arch_zone_lowest_possible_pfn[i] = start_pfn; ++ arch_zone_highest_possible_pfn[i] = end_pfn; ++ ++ start_pfn = end_pfn; + } + arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; + arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; +diff --git a/net/can/af_can.c b/net/can/af_can.c +index d3668c55b088..34064aa88f02 100644 +--- a/net/can/af_can.c ++++ b/net/can/af_can.c +@@ -425,6 +425,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, + * @func: callback function on filter match + * @data: returned parameter for callback function + * @ident: string for calling module indentification ++ * @sk: socket pointer (might be NULL) + * + * Description: + * Invokes the callback function with the received sk_buff and the given +@@ -448,7 +449,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, + */ + int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, + void (*func)(struct sk_buff *, void *), void *data, +- char *ident) ++ char *ident, struct sock *sk) + { + struct receiver *r; + struct hlist_head *rl; +@@ -476,6 +477,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, + r->func = func; + r->data = data; + r->ident = ident; ++ r->sk = sk; + + hlist_add_head_rcu(&r->list, rl); + d->entries++; +@@ -500,8 +502,11 @@ EXPORT_SYMBOL(can_rx_register); + static void can_rx_delete_receiver(struct rcu_head *rp) + { + struct receiver *r = container_of(rp, struct receiver, rcu); ++ struct sock *sk = r->sk; + + kmem_cache_free(rcv_cache, r); ++ if (sk) ++ sock_put(sk); + } + + /** +@@ -576,8 +581,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, + spin_unlock(&can_rcvlists_lock); + + /* schedule the receiver item for deletion */ +- if (r) ++ if (r) { ++ if (r->sk) ++ sock_hold(r->sk); + call_rcu(&r->rcu, can_rx_delete_receiver); ++ } + } + EXPORT_SYMBOL(can_rx_unregister); + +diff --git a/net/can/af_can.h b/net/can/af_can.h +index 1dccb4c33894..0e95be423587 100644 +--- a/net/can/af_can.h ++++ b/net/can/af_can.h +@@ -50,13 +50,14 @@ + + struct receiver { + struct hlist_node list; +- struct rcu_head rcu; + canid_t can_id; + canid_t mask; + unsigned long matches; + void (*func)(struct sk_buff *, void *); + void *data; + char *ident; ++ struct sock *sk; ++ struct rcu_head rcu; + }; + + enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX }; +diff --git a/net/can/bcm.c b/net/can/bcm.c +index dd0781c49ebb..725ce812cfbc 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -1169,7 +1169,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + err = can_rx_register(dev, op->can_id, + REGMASK(op->can_id), + bcm_rx_handler, op, +- "bcm"); ++ "bcm", sk); + + op->rx_reg_dev = dev; + dev_put(dev); +@@ -1178,7 +1178,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + } else + err = can_rx_register(NULL, op->can_id, + REGMASK(op->can_id), +- bcm_rx_handler, op, "bcm"); ++ bcm_rx_handler, op, "bcm", sk); + if (err) { + /* this bcm rx op is broken -> remove it */ + list_del(&op->list); +diff --git a/net/can/gw.c b/net/can/gw.c +index de25455b4e3e..2ad8aa4f9f0b 100644 +--- a/net/can/gw.c ++++ b/net/can/gw.c +@@ -435,7 +435,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj) + { + return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id, + gwj->ccgw.filter.can_mask, can_can_gw_rcv, +- gwj, "gw"); ++ gwj, "gw", NULL); + } + + static inline void cgw_unregister_filter(struct cgw_job *gwj) +diff --git a/net/can/raw.c b/net/can/raw.c +index 1085e65f848e..f4d86485571f 100644 +--- a/net/can/raw.c ++++ b/net/can/raw.c +@@ -168,7 +168,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk, + for (i = 0; i < count; i++) { + err = can_rx_register(dev, filter[i].can_id, + filter[i].can_mask, +- raw_rcv, sk, "raw"); ++ raw_rcv, sk, "raw", sk); + if (err) { + /* clean up successfully registered filters */ + while (--i >= 0) +@@ -189,7 +189,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk, + + if (err_mask) + err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, +- raw_rcv, sk, "raw"); ++ raw_rcv, sk, "raw", sk); + + return err; + } +diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c +index c1de8d404c47..26e2235356c5 100644 +--- a/net/ceph/osdmap.c ++++ b/net/ceph/osdmap.c +@@ -870,7 +870,6 @@ static int decode_new_up_state_weight(void **p, void *end, + if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && + (xorstate & CEPH_OSD_EXISTS)) { + pr_info("osd%d does not exist\n", osd); +- map->osd_weight[osd] = CEPH_OSD_IN; + memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr)); + map->osd_state[osd] = 0; + } else { +diff --git a/net/core/dev.c b/net/core/dev.c +index 6494918b3eaa..682bf5ad63a0 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1559,37 +1559,59 @@ EXPORT_SYMBOL(call_netdevice_notifiers); + + static struct static_key netstamp_needed __read_mostly; + #ifdef HAVE_JUMP_LABEL +-/* We are not allowed to call static_key_slow_dec() from irq context +- * If net_disable_timestamp() is called from irq context, defer the +- * static_key_slow_dec() calls. +- */ + static atomic_t netstamp_needed_deferred; ++static atomic_t netstamp_wanted; ++static void netstamp_clear(struct work_struct *work) ++{ ++ int deferred = atomic_xchg(&netstamp_needed_deferred, 0); ++ int wanted; ++ ++ wanted = atomic_add_return(deferred, &netstamp_wanted); ++ if (wanted > 0) ++ static_key_enable(&netstamp_needed); ++ else ++ static_key_disable(&netstamp_needed); ++} ++static DECLARE_WORK(netstamp_work, netstamp_clear); + #endif + + void net_enable_timestamp(void) + { + #ifdef HAVE_JUMP_LABEL +- int deferred = atomic_xchg(&netstamp_needed_deferred, 0); ++ int wanted; + +- if (deferred) { +- while (--deferred) +- static_key_slow_dec(&netstamp_needed); +- return; ++ while (1) { ++ wanted = atomic_read(&netstamp_wanted); ++ if (wanted <= 0) ++ break; ++ if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) ++ return; + } +-#endif ++ atomic_inc(&netstamp_needed_deferred); ++ schedule_work(&netstamp_work); ++#else + static_key_slow_inc(&netstamp_needed); ++#endif + } + EXPORT_SYMBOL(net_enable_timestamp); + + void net_disable_timestamp(void) + { + #ifdef HAVE_JUMP_LABEL +- if (in_interrupt()) { +- atomic_inc(&netstamp_needed_deferred); +- return; ++ int wanted; ++ ++ while (1) { ++ wanted = atomic_read(&netstamp_wanted); ++ if (wanted <= 1) ++ break; ++ if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) ++ return; + } +-#endif ++ atomic_dec(&netstamp_needed_deferred); ++ schedule_work(&netstamp_work); ++#else + static_key_slow_dec(&netstamp_needed); ++#endif + } + EXPORT_SYMBOL(net_disable_timestamp); + +@@ -2461,9 +2483,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb, + if (skb->ip_summed != CHECKSUM_NONE && + !can_checksum_protocol(features, protocol)) { + features &= ~NETIF_F_ALL_CSUM; +- } else if (illegal_highdma(dev, skb)) { +- features &= ~NETIF_F_SG; + } ++ if (illegal_highdma(dev, skb)) ++ features &= ~NETIF_F_SG; + + return features; + } +@@ -3891,7 +3913,9 @@ static void skb_gro_reset_offset(struct sk_buff *skb) + pinfo->nr_frags && + !PageHighMem(skb_frag_page(frag0))) { + NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); +- NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); ++ NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, ++ skb_frag_size(frag0), ++ skb->end - skb->tail); + } + } + +diff --git a/net/core/sock.c b/net/core/sock.c +index e3cb45411f34..96e125919324 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -1403,6 +1403,11 @@ static void __sk_free(struct sock *sk) + pr_debug("%s: optmem leakage (%d bytes) detected\n", + __func__, atomic_read(&sk->sk_omem_alloc)); + ++ if (sk->sk_frag.page) { ++ put_page(sk->sk_frag.page); ++ sk->sk_frag.page = NULL; ++ } ++ + if (sk->sk_peer_cred) + put_cred(sk->sk_peer_cred); + put_pid(sk->sk_peer_pid); +@@ -2556,11 +2561,6 @@ void sk_common_release(struct sock *sk) + + sk_refcnt_debug_release(sk); + +- if (sk->sk_frag.page) { +- put_page(sk->sk_frag.page); +- sk->sk_frag.page = NULL; +- } +- + sock_put(sk); + } + EXPORT_SYMBOL(sk_common_release); +diff --git a/net/dccp/input.c b/net/dccp/input.c +index 14cdafad7a90..e511ccc74a07 100644 +--- a/net/dccp/input.c ++++ b/net/dccp/input.c +@@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, + if (inet_csk(sk)->icsk_af_ops->conn_request(sk, + skb) < 0) + return 1; +- goto discard; ++ consume_skb(skb); ++ return 0; + } + if (dh->dccph_type == DCCP_PKT_RESET) + goto discard; +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c +index 294c642fbebb..3bb5ff9e14a2 100644 +--- a/net/dccp/ipv4.c ++++ b/net/dccp/ipv4.c +@@ -263,7 +263,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) + + switch (type) { + case ICMP_REDIRECT: +- dccp_do_redirect(skb, sk); ++ if (!sock_owned_by_user(sk)) ++ dccp_do_redirect(skb, sk); + goto out; + case ICMP_SOURCE_QUENCH: + /* Just silently ignore these. */ +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c +index 94f8224d543e..9ad2416f8e33 100644 +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -132,10 +132,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + np = inet6_sk(sk); + + if (type == NDISC_REDIRECT) { +- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); ++ if (!sock_owned_by_user(sk)) { ++ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); + +- if (dst) +- dst->ops->redirect(dst, sk, skb); ++ if (dst) ++ dst->ops->redirect(dst, sk, skb); ++ } + goto out; + } + +diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c +index ca118e8cb141..9d06b37acc4c 100644 +--- a/net/ieee802154/6lowpan.c ++++ b/net/ieee802154/6lowpan.c +@@ -459,7 +459,7 @@ static int lowpan_header_create(struct sk_buff *skb, + hc06_ptr += 3; + } else { + /* compress nothing */ +- memcpy(hc06_ptr, &hdr, 4); ++ memcpy(hc06_ptr, hdr, 4); + /* replace the top byte with new ECN | DSCP format */ + *hc06_ptr = tmp; + hc06_ptr += 4; +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c +index 667c1d4ca984..4322372dddbe 100644 +--- a/net/ipv4/cipso_ipv4.c ++++ b/net/ipv4/cipso_ipv4.c +@@ -1649,6 +1649,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) + goto validate_return_locked; + } + ++ if (opt_iter + 1 == opt_len) { ++ err_offset = opt_iter; ++ goto validate_return_locked; ++ } + tag_len = tag[1]; + if (tag_len > (opt_len - opt_iter)) { + err_offset = opt_iter + 1; +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c +index 155adf8729c2..b0178b04bd81 100644 +--- a/net/ipv4/igmp.c ++++ b/net/ipv4/igmp.c +@@ -1874,7 +1874,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) + + rtnl_lock(); + in_dev = ip_mc_find_dev(net, imr); +- if (!in_dev) { ++ if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) { + ret = -ENODEV; + goto out; + } +@@ -1895,8 +1895,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) + + *imlp = iml->next_rcu; + +- ip_mc_dec_group(in_dev, group); ++ if (in_dev) ++ ip_mc_dec_group(in_dev, group); + rtnl_unlock(); ++ + /* decrease mem now to avoid the memleak warning */ + atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); + kfree_rcu(iml, rcu); +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c +index 6acb541c9091..40ac1e2cbb34 100644 +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -688,6 +688,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, + inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port; + newsk->sk_write_space = sk_stream_write_space; + ++ inet_sk(newsk)->mc_list = NULL; ++ + newicsk->icsk_retransmits = 0; + newicsk->icsk_backoff = 0; + newicsk->icsk_probes_out = 0; +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c +index f6603142cb33..3d009e174166 100644 +--- a/net/ipv4/ip_sockglue.c ++++ b/net/ipv4/ip_sockglue.c +@@ -1042,7 +1042,14 @@ void ipv4_pktinfo_prepare(struct sk_buff *skb) + pktinfo->ipi_ifindex = 0; + pktinfo->ipi_spec_dst.s_addr = 0; + } +- skb_dst_drop(skb); ++ /* We need to keep the dst for __ip_options_echo() ++ * We could restrict the test to opt.ts_needtime || opt.srr, ++ * but the following is good enough as IP options are not often used. ++ */ ++ if (unlikely(IPCB(skb)->opt.optlen)) ++ skb_dst_force(skb); ++ else ++ skb_dst_drop(skb); + } + + int ip_setsockopt(struct sock *sk, int level, +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c +index 4ec34275160b..eadafac6f461 100644 +--- a/net/ipv4/ip_vti.c ++++ b/net/ipv4/ip_vti.c +@@ -582,7 +582,6 @@ static void vti_tunnel_setup(struct net_device *dev) + dev->type = ARPHRD_TUNNEL; + dev->destructor = vti_dev_free; + +- dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); + dev->mtu = ETH_DATA_LEN; + dev->flags = IFF_NOARP; + dev->iflink = 0; +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c +index 95a5f261fe8a..f8f0518772d6 100644 +--- a/net/ipv4/netfilter/arp_tables.c ++++ b/net/ipv4/netfilter/arp_tables.c +@@ -1309,8 +1309,8 @@ static int translate_compat_table(struct xt_table_info **pinfo, + + newinfo->number = compatr->num_entries; + for (i = 0; i < NF_ARP_NUMHOOKS; i++) { +- newinfo->hook_entry[i] = info->hook_entry[i]; +- newinfo->underflow[i] = info->underflow[i]; ++ newinfo->hook_entry[i] = compatr->hook_entry[i]; ++ newinfo->underflow[i] = compatr->underflow[i]; + } + entry1 = newinfo->entries[raw_smp_processor_id()]; + pos = entry1; +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index 459b957104a8..0b778d75e38d 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -151,16 +151,16 @@ void ping_unhash(struct sock *sk) + { + struct inet_sock *isk = inet_sk(sk); + pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); ++ write_lock_bh(&ping_table.lock); + if (sk_hashed(sk)) { +- write_lock_bh(&ping_table.lock); + hlist_nulls_del(&sk->sk_nulls_node); + sk_nulls_node_init(&sk->sk_nulls_node); + sock_put(sk); + isk->inet_num = 0; + isk->inet_sport = 0; +- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); +- write_unlock_bh(&ping_table.lock); ++ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); + } ++ write_unlock_bh(&ping_table.lock); + } + EXPORT_SYMBOL_GPL(ping_unhash); + +@@ -450,6 +451,8 @@ static int ping_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, + { + struct sk_buff *skb = skb_peek(&sk->sk_write_queue); + ++ if (!skb) ++ return 0; + pfh->wcheck = csum_partial((char *)&pfh->icmph, + sizeof(struct icmphdr), pfh->wcheck); + pfh->icmph.checksum = csum_fold(pfh->wcheck); +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index e59d6332458b..d9c791343638 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -1789,6 +1789,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, + { + int res; + ++ tos &= IPTOS_RT_MASK; + rcu_read_lock(); + + /* Multicast recognition logic is moved from route cache to here. +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 5d4bd6ca3ab1..d1e04221c275 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -723,6 +723,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, + ret = -EAGAIN; + break; + } ++ /* if __tcp_splice_read() got nothing while we have ++ * an skb in receive queue, we do not want to loop. ++ * This might happen with URG data. ++ */ ++ if (!skb_queue_empty(&sk->sk_receive_queue)) ++ break; + sk_wait_data(sk, &timeo); + if (signal_pending(current)) { + ret = sock_intr_errno(timeo); +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index 6504a085ca60..195c618aba6c 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -389,7 +389,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) + + switch (type) { + case ICMP_REDIRECT: +- do_redirect(icmp_skb, sk); ++ if (!sock_owned_by_user(sk)) ++ do_redirect(icmp_skb, sk); + goto out; + case ICMP_SOURCE_QUENCH: + /* Just silently ignore these. */ +@@ -1422,6 +1423,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk, + * scaled. So correct it appropriately. + */ + tp->snd_wnd = ntohs(tcp_hdr(skb)->window); ++ tp->max_window = tp->snd_wnd; + + /* Activate the retrans timer so that SYNACK can be retransmitted. + * The request socket is not added to the SYN table of the parent +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c +index 603f251b6ca2..ae88e17f5c72 100644 +--- a/net/ipv6/ip6_gre.c ++++ b/net/ipv6/ip6_gre.c +@@ -55,6 +55,7 @@ + #include + #include + #include ++#include + + + static bool log_ecn_error = true; +@@ -365,35 +366,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) + + + static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, +- u8 type, u8 code, int offset, __be32 info) ++ u8 type, u8 code, int offset, __be32 info) + { +- const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; +- __be16 *p = (__be16 *)(skb->data + offset); +- int grehlen = offset + 4; ++ const struct gre_base_hdr *greh; ++ const struct ipv6hdr *ipv6h; ++ int grehlen = sizeof(*greh); + struct ip6_tnl *t; ++ int key_off = 0; + __be16 flags; ++ __be32 key; + +- flags = p[0]; +- if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { +- if (flags&(GRE_VERSION|GRE_ROUTING)) +- return; +- if (flags&GRE_KEY) { +- grehlen += 4; +- if (flags&GRE_CSUM) +- grehlen += 4; +- } ++ if (!pskb_may_pull(skb, offset + grehlen)) ++ return; ++ greh = (const struct gre_base_hdr *)(skb->data + offset); ++ flags = greh->flags; ++ if (flags & (GRE_VERSION | GRE_ROUTING)) ++ return; ++ if (flags & GRE_CSUM) ++ grehlen += 4; ++ if (flags & GRE_KEY) { ++ key_off = grehlen + offset; ++ grehlen += 4; + } + +- /* If only 8 bytes returned, keyed message will be dropped here */ +- if (!pskb_may_pull(skb, grehlen)) ++ if (!pskb_may_pull(skb, offset + grehlen)) + return; + ipv6h = (const struct ipv6hdr *)skb->data; +- p = (__be16 *)(skb->data + offset); ++ greh = (const struct gre_base_hdr *)(skb->data + offset); ++ key = key_off ? *(__be32 *)(skb->data + key_off) : 0; + + t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, +- flags & GRE_KEY ? +- *(((__be32 *)p) + (grehlen / 4) - 1) : 0, +- p[1]); ++ key, greh->protocol); + if (t == NULL) + return; + +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c +index 71b766ee821d..88a2e8827ef7 100644 +--- a/net/ipv6/ip6_offload.c ++++ b/net/ipv6/ip6_offload.c +@@ -174,6 +174,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, + ops = rcu_dereference(inet6_offloads[proto]); + if (!ops || !ops->callbacks.gro_receive) { + __pskb_pull(skb, skb_gro_offset(skb)); ++ skb_gro_frag0_invalidate(skb); + proto = ipv6_gso_pull_exthdrs(skb, proto); + skb_gro_pull(skb, -skb_transport_offset(skb)); + skb_reset_transport_header(skb); +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index 12984e6794b9..efc77acbe9e1 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -103,16 +103,25 @@ struct ip6_tnl_net { + + static struct net_device_stats *ip6_get_stats(struct net_device *dev) + { +- struct pcpu_tstats sum = { 0 }; ++ struct pcpu_tstats tmp, sum = { 0 }; + int i; + + for_each_possible_cpu(i) { ++ unsigned int start; + const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); + +- sum.rx_packets += tstats->rx_packets; +- sum.rx_bytes += tstats->rx_bytes; +- sum.tx_packets += tstats->tx_packets; +- sum.tx_bytes += tstats->tx_bytes; ++ do { ++ start = u64_stats_fetch_begin_bh(&tstats->syncp); ++ tmp.rx_packets = tstats->rx_packets; ++ tmp.rx_bytes = tstats->rx_bytes; ++ tmp.tx_packets = tstats->tx_packets; ++ tmp.tx_bytes = tstats->tx_bytes; ++ } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); ++ ++ sum.rx_packets += tmp.rx_packets; ++ sum.rx_bytes += tmp.rx_bytes; ++ sum.tx_packets += tmp.tx_packets; ++ sum.tx_bytes += tmp.tx_bytes; + } + dev->stats.rx_packets = sum.rx_packets; + dev->stats.rx_bytes = sum.rx_bytes; +@@ -394,18 +403,19 @@ ip6_tnl_dev_uninit(struct net_device *dev) + + __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) + { +- const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; +- __u8 nexthdr = ipv6h->nexthdr; +- __u16 off = sizeof (*ipv6h); ++ const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw; ++ unsigned int nhoff = raw - skb->data; ++ unsigned int off = nhoff + sizeof(*ipv6h); ++ u8 next, nexthdr = ipv6h->nexthdr; + + while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { +- __u16 optlen = 0; + struct ipv6_opt_hdr *hdr; +- if (raw + off + sizeof (*hdr) > skb->data && +- !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) ++ u16 optlen; ++ ++ if (!pskb_may_pull(skb, off + sizeof(*hdr))) + break; + +- hdr = (struct ipv6_opt_hdr *) (raw + off); ++ hdr = (struct ipv6_opt_hdr *)(skb->data + off); + if (nexthdr == NEXTHDR_FRAGMENT) { + struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; + if (frag_hdr->frag_off) +@@ -416,20 +426,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) + } else { + optlen = ipv6_optlen(hdr); + } ++ /* cache hdr->nexthdr, since pskb_may_pull() might ++ * invalidate hdr ++ */ ++ next = hdr->nexthdr; + if (nexthdr == NEXTHDR_DEST) { +- __u16 i = off + 2; ++ u16 i = 2; ++ ++ /* Remember : hdr is no longer valid at this point. */ ++ if (!pskb_may_pull(skb, off + optlen)) ++ break; ++ + while (1) { + struct ipv6_tlv_tnl_enc_lim *tel; + + /* No more room for encapsulation limit */ +- if (i + sizeof (*tel) > off + optlen) ++ if (i + sizeof(*tel) > optlen) + break; + +- tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; ++ tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i); + /* return index of option if found and valid */ + if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && + tel->length == 1) +- return i; ++ return i + off - nhoff; + /* else jump to next option */ + if (tel->type) + i += tel->length + 2; +@@ -437,7 +456,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) + i++; + } + } +- nexthdr = hdr->nexthdr; ++ nexthdr = next; + off += optlen; + } + return 0; +@@ -822,8 +841,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, + } + + tstats = this_cpu_ptr(t->dev->tstats); ++ u64_stats_update_begin(&tstats->syncp); + tstats->rx_packets++; + tstats->rx_bytes += skb->len; ++ u64_stats_update_end(&tstats->syncp); + + netif_rx(skb); + +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index 464b1c9c08e4..989bd7987985 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -578,8 +578,11 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, + } + + offset += skb_transport_offset(skb); +- if (skb_copy_bits(skb, offset, &csum, 2)) +- BUG(); ++ err = skb_copy_bits(skb, offset, &csum, 2); ++ if (err < 0) { ++ ip6_flush_pending_frames(sk); ++ goto out; ++ } + + /* in case cksum was not initialized */ + if (unlikely(csum)) +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 70b10ed169ae..ecbdc4b29f29 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -382,10 +382,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + np = inet6_sk(sk); + + if (type == NDISC_REDIRECT) { +- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); ++ if (!sock_owned_by_user(sk)) { ++ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); + +- if (dst) +- dst->ops->redirect(dst, sk, skb); ++ if (dst) ++ dst->ops->redirect(dst, sk, skb); ++ } + goto out; + } + +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c +index 797ff373e486..787ac0ef1823 100644 +--- a/net/l2tp/l2tp_core.c ++++ b/net/l2tp/l2tp_core.c +@@ -280,7 +280,8 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn + } + EXPORT_SYMBOL_GPL(l2tp_session_find); + +-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) ++struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, ++ bool do_ref) + { + int hash; + struct l2tp_session *session; +@@ -290,6 +291,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) + for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { + hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { + if (++count > nth) { ++ l2tp_session_inc_refcount(session); ++ if (do_ref && session->ref) ++ session->ref(session); + read_unlock_bh(&tunnel->hlist_lock); + return session; + } +@@ -300,7 +304,7 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) + + return NULL; + } +-EXPORT_SYMBOL_GPL(l2tp_session_find_nth); ++EXPORT_SYMBOL_GPL(l2tp_session_get_nth); + + /* Lookup a session by interface name. + * This is very inefficient but is only used by management interfaces. +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h +index 2f89d43877d7..54f89f38386e 100644 +--- a/net/l2tp/l2tp_core.h ++++ b/net/l2tp/l2tp_core.h +@@ -236,7 +236,8 @@ out: + extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel); + extern void l2tp_tunnel_sock_put(struct sock *sk); + extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); +-extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); ++extern struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, ++ bool do_ref); + extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); + extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); + extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); +@@ -256,6 +257,7 @@ extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int + + extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops); + extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); ++int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); + + /* Session reference counts. Incremented when code obtains a reference + * to a session. +diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c +index 072d7202e182..c6bd783cfb1b 100644 +--- a/net/l2tp/l2tp_debugfs.c ++++ b/net/l2tp/l2tp_debugfs.c +@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) + + static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) + { +- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); ++ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); + pd->session_idx++; + + if (pd->session == NULL) { +@@ -237,10 +237,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v) + } + + /* Show the tunnel or session context */ +- if (pd->session == NULL) ++ if (!pd->session) { + l2tp_dfs_seq_tunnel_show(m, pd->tunnel); +- else ++ } else { + l2tp_dfs_seq_session_show(m, pd->session); ++ if (pd->session->deref) ++ pd->session->deref(pd->session); ++ l2tp_session_dec_refcount(pd->session); ++ } + + out: + return 0; +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c +index 07f8b97f9ae9..f4d30b509cdf 100644 +--- a/net/l2tp/l2tp_ip.c ++++ b/net/l2tp/l2tp_ip.c +@@ -11,6 +11,7 @@ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + ++#include + #include + #include + #include +@@ -554,6 +555,30 @@ out: + return err ? err : copied; + } + ++int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg) ++{ ++ struct sk_buff *skb; ++ int amount; ++ ++ switch (cmd) { ++ case SIOCOUTQ: ++ amount = sk_wmem_alloc_get(sk); ++ break; ++ case SIOCINQ: ++ spin_lock_bh(&sk->sk_receive_queue.lock); ++ skb = skb_peek(&sk->sk_receive_queue); ++ amount = skb ? skb->len : 0; ++ spin_unlock_bh(&sk->sk_receive_queue.lock); ++ break; ++ ++ default: ++ return -ENOIOCTLCMD; ++ } ++ ++ return put_user(amount, (int __user *)arg); ++} ++EXPORT_SYMBOL(l2tp_ioctl); ++ + static struct proto l2tp_ip_prot = { + .name = "L2TP/IP", + .owner = THIS_MODULE, +@@ -562,7 +587,7 @@ static struct proto l2tp_ip_prot = { + .bind = l2tp_ip_bind, + .connect = l2tp_ip_connect, + .disconnect = l2tp_ip_disconnect, +- .ioctl = udp_ioctl, ++ .ioctl = l2tp_ioctl, + .destroy = l2tp_ip_destroy_sock, + .setsockopt = ip_setsockopt, + .getsockopt = ip_getsockopt, +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c +index db96af978da5..15367918413c 100644 +--- a/net/l2tp/l2tp_ip6.c ++++ b/net/l2tp/l2tp_ip6.c +@@ -716,7 +716,7 @@ static struct proto l2tp_ip6_prot = { + .bind = l2tp_ip6_bind, + .connect = l2tp_ip6_connect, + .disconnect = l2tp_ip6_disconnect, +- .ioctl = udp_ioctl, ++ .ioctl = l2tp_ioctl, + .destroy = l2tp_ip6_destroy_sock, + .setsockopt = ipv6_setsockopt, + .getsockopt = ipv6_getsockopt, +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c +index 0825ff26e113..490024eaece8 100644 +--- a/net/l2tp/l2tp_netlink.c ++++ b/net/l2tp/l2tp_netlink.c +@@ -719,7 +719,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback + goto out; + } + +- session = l2tp_session_find_nth(tunnel, si); ++ session = l2tp_session_get_nth(tunnel, si, false); + if (session == NULL) { + ti++; + tunnel = NULL; +@@ -729,8 +729,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback + + if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, +- session) <= 0) ++ session) <= 0) { ++ l2tp_session_dec_refcount(session); + break; ++ } ++ l2tp_session_dec_refcount(session); + + si++; + } +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c +index c3ae2411650c..c06c7ed47b69 100644 +--- a/net/l2tp/l2tp_ppp.c ++++ b/net/l2tp/l2tp_ppp.c +@@ -1576,7 +1576,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) + + static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) + { +- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); ++ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); + pd->session_idx++; + + if (pd->session == NULL) { +@@ -1703,10 +1703,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v) + + /* Show the tunnel or session context. + */ +- if (pd->session == NULL) ++ if (!pd->session) { + pppol2tp_seq_tunnel_show(m, pd->tunnel); +- else ++ } else { + pppol2tp_seq_session_show(m, pd->session); ++ if (pd->session->deref) ++ pd->session->deref(pd->session); ++ l2tp_session_dec_refcount(pd->session); ++ } + + out: + return 0; +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c +index f8765cc84e47..ddc63f92fa2a 100644 +--- a/net/mac80211/mesh.c ++++ b/net/mac80211/mesh.c +@@ -345,7 +345,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata, + /* fast-forward to vendor IEs */ + offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0); + +- if (offset) { ++ if (offset < ifmsh->ie_len) { + len = ifmsh->ie_len - offset; + data = ifmsh->ie + offset; + if (skb_tailroom(skb) < len) +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 24f006623f7c..4b1734a14ff9 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -1257,6 +1257,8 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po) + f->arr[f->num_members] = sk; + smp_wmb(); + f->num_members++; ++ if (f->num_members == 1) ++ dev_add_pack(&f->prot_hook); + spin_unlock(&f->lock); + } + +@@ -1273,6 +1275,8 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po) + BUG_ON(i >= f->num_members); + f->arr[i] = f->arr[f->num_members - 1]; + f->num_members--; ++ if (f->num_members == 0) ++ __dev_remove_pack(&f->prot_hook); + spin_unlock(&f->lock); + } + +@@ -1304,13 +1308,16 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) + return -EINVAL; + } + ++ mutex_lock(&fanout_mutex); ++ ++ err = -EINVAL; + if (!po->running) +- return -EINVAL; ++ goto out; + ++ err = -EALREADY; + if (po->fanout) +- return -EALREADY; ++ goto out; + +- mutex_lock(&fanout_mutex); + match = NULL; + list_for_each_entry(f, &fanout_list, list) { + if (f->id == id && +@@ -1340,7 +1347,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) + match->prot_hook.func = packet_rcv_fanout; + match->prot_hook.af_packet_priv = match; + match->prot_hook.id_match = match_fanout_group; +- dev_add_pack(&match->prot_hook); + list_add(&match->list, &fanout_list); + } + err = -EINVAL; +@@ -1361,24 +1367,29 @@ out: + return err; + } + +-static void fanout_release(struct sock *sk) ++/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes ++ * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. ++ * It is the responsibility of the caller to call fanout_release_data() and ++ * free the returned packet_fanout (after synchronize_net()) ++ */ ++static struct packet_fanout *fanout_release(struct sock *sk) + { + struct packet_sock *po = pkt_sk(sk); + struct packet_fanout *f; + +- f = po->fanout; +- if (!f) +- return; +- + mutex_lock(&fanout_mutex); +- po->fanout = NULL; ++ f = po->fanout; ++ if (f) { ++ po->fanout = NULL; + +- if (atomic_dec_and_test(&f->sk_ref)) { +- list_del(&f->list); +- dev_remove_pack(&f->prot_hook); +- kfree(f); ++ if (atomic_dec_and_test(&f->sk_ref)) ++ list_del(&f->list); ++ else ++ f = NULL; + } + mutex_unlock(&fanout_mutex); ++ ++ return f; + } + + static const struct proto_ops packet_ops; +@@ -2428,6 +2439,7 @@ static int packet_release(struct socket *sock) + { + struct sock *sk = sock->sk; + struct packet_sock *po; ++ struct packet_fanout *f; + struct net *net; + union tpacket_req_u req_u; + +@@ -2467,9 +2479,13 @@ static int packet_release(struct socket *sock) + packet_set_ring(sk, &req_u, 1, 1); + } + +- fanout_release(sk); ++ f = fanout_release(sk); + + synchronize_net(); ++ ++ if (f) { ++ kfree(f); ++ } + /* + * Now the socket is dead. No more input will appear. + */ +@@ -3135,19 +3151,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv + + if (optlen != sizeof(val)) + return -EINVAL; +- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) +- return -EBUSY; + if (copy_from_user(&val, optval, sizeof(val))) + return -EFAULT; + switch (val) { + case TPACKET_V1: + case TPACKET_V2: + case TPACKET_V3: +- po->tp_version = val; +- return 0; ++ break; + default: + return -EINVAL; + } ++ lock_sock(sk); ++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { ++ ret = -EBUSY; ++ } else { ++ po->tp_version = val; ++ ret = 0; ++ } ++ release_sock(sk); ++ return ret; + } + case PACKET_RESERVE: + { +@@ -3384,7 +3406,6 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void + } + if (msg == NETDEV_UNREGISTER) { + packet_cached_dev_reset(po); +- fanout_release(sk); + po->ifindex = -1; + if (po->prot_hook.dev) + dev_put(po->prot_hook.dev); +@@ -3603,6 +3624,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + /* Added to avoid minimal code churn */ + struct tpacket_req *req = &req_u->req; + ++ lock_sock(sk); + /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ + if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { + WARN(1, "Tx-ring is not supported.\n"); +@@ -3684,7 +3706,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + goto out; + } + +- lock_sock(sk); + + /* Detach socket from network */ + spin_lock(&po->bind_lock); +@@ -3733,11 +3754,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + if (!tx_ring) + prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue); + } +- release_sock(sk); + + if (pg_vec) + free_pg_vec(pg_vec, order, req->tp_block_nr); + out: ++ release_sock(sk); + return err; + } + +diff --git a/net/sched/act_api.c b/net/sched/act_api.c +index 15d46b9166de..0a31f2c51e94 100644 +--- a/net/sched/act_api.c ++++ b/net/sched/act_api.c +@@ -814,10 +814,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, + goto out_module_put; + + err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); +- if (err < 0) ++ if (err <= 0) + goto out_module_put; +- if (err == 0) +- goto noflush_out; + + nla_nest_end(skb, nest); + +@@ -835,7 +833,6 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, + out_module_put: + module_put(a->ops->owner); + err_out: +-noflush_out: + kfree_skb(skb); + kfree(a); + return err; +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c +index 2ea40d1877a6..042e5d839623 100644 +--- a/net/sched/cls_api.c ++++ b/net/sched/cls_api.c +@@ -136,12 +136,14 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) + unsigned long cl; + unsigned long fh; + int err; +- int tp_created = 0; ++ int tp_created; + + if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN)) + return -EPERM; + + replay: ++ tp_created = 0; ++ + err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL); + if (err < 0) + return err; +diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c +index 7c3de6ffa516..eba9d1e49faf 100644 +--- a/net/sched/em_meta.c ++++ b/net/sched/em_meta.c +@@ -176,11 +176,12 @@ META_COLLECTOR(int_vlan_tag) + { + unsigned short tag; + +- tag = vlan_tx_tag_get(skb); +- if (!tag && __vlan_get_tag(skb, &tag)) +- *err = -1; +- else ++ if (vlan_tx_tag_present(skb)) ++ dst->value = vlan_tx_tag_get(skb); ++ else if (!__vlan_get_tag(skb, &tag)) + dst->value = tag; ++ else ++ *err = -1; + } + + +diff --git a/net/sctp/associola.c b/net/sctp/associola.c +index 6360a14edeab..59ab0c40e15c 100644 +--- a/net/sctp/associola.c ++++ b/net/sctp/associola.c +@@ -1301,82 +1301,111 @@ void sctp_assoc_update(struct sctp_association *asoc, + } + + /* Update the retran path for sending a retransmitted packet. +- * Round-robin through the active transports, else round-robin +- * through the inactive transports as this is the next best thing +- * we can try. ++ * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints: ++ * ++ * When there is outbound data to send and the primary path ++ * becomes inactive (e.g., due to failures), or where the ++ * SCTP user explicitly requests to send data to an ++ * inactive destination transport address, before reporting ++ * an error to its ULP, the SCTP endpoint should try to send ++ * the data to an alternate active destination transport ++ * address if one exists. ++ * ++ * When retransmitting data that timed out, if the endpoint ++ * is multihomed, it should consider each source-destination ++ * address pair in its retransmission selection policy. ++ * When retransmitting timed-out data, the endpoint should ++ * attempt to pick the most divergent source-destination ++ * pair from the original source-destination pair to which ++ * the packet was transmitted. ++ * ++ * Note: Rules for picking the most divergent source-destination ++ * pair are an implementation decision and are not specified ++ * within this document. ++ * ++ * Our basic strategy is to round-robin transports in priorities ++ * according to sctp_state_prio_map[] e.g., if no such ++ * transport with state SCTP_ACTIVE exists, round-robin through ++ * SCTP_UNKNOWN, etc. You get the picture. + */ +-void sctp_assoc_update_retran_path(struct sctp_association *asoc) ++static const u8 sctp_trans_state_to_prio_map[] = { ++ [SCTP_ACTIVE] = 3, /* best case */ ++ [SCTP_UNKNOWN] = 2, ++ [SCTP_PF] = 1, ++ [SCTP_INACTIVE] = 0, /* worst case */ ++}; ++ ++static u8 sctp_trans_score(const struct sctp_transport *trans) + { +- struct sctp_transport *t, *next; +- struct list_head *head = &asoc->peer.transport_addr_list; +- struct list_head *pos; ++ return sctp_trans_state_to_prio_map[trans->state]; ++} + +- if (asoc->peer.transport_count == 1) +- return; ++static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, ++ struct sctp_transport *best) ++{ ++ if (best == NULL) ++ return curr; + +- /* Find the next transport in a round-robin fashion. */ +- t = asoc->peer.retran_path; +- pos = &t->transports; +- next = NULL; ++ return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best; ++} + +- while (1) { +- /* Skip the head. */ +- if (pos->next == head) +- pos = head->next; +- else +- pos = pos->next; ++void sctp_assoc_update_retran_path(struct sctp_association *asoc) ++{ ++ struct sctp_transport *trans = asoc->peer.retran_path; ++ struct sctp_transport *trans_next = NULL; + +- t = list_entry(pos, struct sctp_transport, transports); ++ /* We're done as we only have the one and only path. */ ++ if (asoc->peer.transport_count == 1) ++ return; ++ /* If active_path and retran_path are the same and active, ++ * then this is the only active path. Use it. ++ */ ++ if (asoc->peer.active_path == asoc->peer.retran_path && ++ asoc->peer.active_path->state == SCTP_ACTIVE) ++ return; + +- /* We have exhausted the list, but didn't find any +- * other active transports. If so, use the next +- * transport. +- */ +- if (t == asoc->peer.retran_path) { +- t = next; ++ /* Iterate from retran_path's successor back to retran_path. */ ++ for (trans = list_next_entry(trans, transports); 1; ++ trans = list_next_entry(trans, transports)) { ++ /* Manually skip the head element. */ ++ if (&trans->transports == &asoc->peer.transport_addr_list) ++ continue; ++ if (trans->state == SCTP_UNCONFIRMED) ++ continue; ++ trans_next = sctp_trans_elect_best(trans, trans_next); ++ /* Active is good enough for immediate return. */ ++ if (trans_next->state == SCTP_ACTIVE) + break; +- } +- +- /* Try to find an active transport. */ +- +- if ((t->state == SCTP_ACTIVE) || +- (t->state == SCTP_UNKNOWN)) { ++ /* We've reached the end, time to update path. */ ++ if (trans == asoc->peer.retran_path) + break; +- } else { +- /* Keep track of the next transport in case +- * we don't find any active transport. +- */ +- if (t->state != SCTP_UNCONFIRMED && !next) +- next = t; +- } + } + +- if (t) +- asoc->peer.retran_path = t; +- else +- t = asoc->peer.retran_path; ++ if (trans_next != NULL) ++ asoc->peer.retran_path = trans_next; + + SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" +- " %p addr: ", ++ " %p updated new path to addr: ", + " port: %d\n", + asoc, +- (&t->ipaddr), +- ntohs(t->ipaddr.v4.sin_port)); ++ (&asoc->peer.retran_path->ipaddr), ++ ntohs(asoc->peer.retran_path->ipaddr.v4.sin_port)); + } + +-/* Choose the transport for sending retransmit packet. */ +-struct sctp_transport *sctp_assoc_choose_alter_transport( +- struct sctp_association *asoc, struct sctp_transport *last_sent_to) ++struct sctp_transport * ++sctp_assoc_choose_alter_transport(struct sctp_association *asoc, ++ struct sctp_transport *last_sent_to) + { + /* If this is the first time packet is sent, use the active path, + * else use the retran path. If the last packet was sent over the + * retran path, update the retran path and use it. + */ +- if (!last_sent_to) ++ if (last_sent_to == NULL) { + return asoc->peer.active_path; +- else { ++ } else { + if (last_sent_to == asoc->peer.retran_path) + sctp_assoc_update_retran_path(asoc); ++ + return asoc->peer.retran_path; + } + } +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index ede7c540ea24..4178cf387d21 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -4310,6 +4310,12 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) + if (!asoc) + return -EINVAL; + ++ /* If there is a thread waiting on more sndbuf space for ++ * sending on this asoc, it cannot be peeled. ++ */ ++ if (waitqueue_active(&asoc->wait)) ++ return -EBUSY; ++ + /* An association cannot be branched off from an already peeled-off + * socket, nor is this supported for tcp style sockets. + */ +@@ -6724,7 +6730,6 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, + */ + sctp_release_sock(sk); + current_timeo = schedule_timeout(current_timeo); +- BUG_ON(sk != asoc->base.sk); + sctp_lock_sock(sk); + + *timeo_p = current_timeo; +diff --git a/net/socket.c b/net/socket.c +index e91e8ed1b8df..773ba3abb10b 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -2326,8 +2326,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, + return err; + + err = sock_error(sock->sk); +- if (err) ++ if (err) { ++ datagrams = err; + goto out_put; ++ } + + entry = mmsg; + compat_entry = (struct compat_mmsghdr __user *)mmsg; +diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c +index e0062c544ac8..a9ca70579eb9 100644 +--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c ++++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c +@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr, + if (!oa->data) + return -ENOMEM; + +- creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL); ++ creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL); + if (!creds) { + kfree(oa->data); + return -ENOMEM; +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c +index 62663a08ffbd..e625efe0e035 100644 +--- a/net/sunrpc/auth_gss/svcauth_gss.c ++++ b/net/sunrpc/auth_gss/svcauth_gss.c +@@ -1518,7 +1518,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) + case RPC_GSS_PROC_DESTROY: + if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) + goto auth_err; +- rsci->h.expiry_time = get_seconds(); ++ rsci->h.expiry_time = seconds_since_boot(); + set_bit(CACHE_NEGATIVE, &rsci->h.flags); + if (resv->iov_len + 4 > PAGE_SIZE) + goto drop; +diff --git a/net/unix/garbage.c b/net/unix/garbage.c +index a72182d6750f..58ba0e5f147b 100644 +--- a/net/unix/garbage.c ++++ b/net/unix/garbage.c +@@ -152,6 +152,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp) + if (s) { + struct unix_sock *u = unix_sk(s); + ++ BUG_ON(!atomic_long_read(&u->inflight)); + BUG_ON(list_empty(&u->link)); + if (atomic_long_dec_and_test(&u->inflight)) + list_del_init(&u->link); +@@ -358,6 +359,14 @@ void unix_gc(void) + } + list_del(&cursor); + ++ /* Now gc_candidates contains only garbage. Restore original ++ * inflight counters for these as well, and remove the skbuffs ++ * which are creating the cycle(s). ++ */ ++ skb_queue_head_init(&hitlist); ++ list_for_each_entry(u, &gc_candidates, link) ++ scan_children(&u->sk, inc_inflight, &hitlist); ++ + /* + * not_cycle_list contains those sockets which do not make up a + * cycle. Restore these to the inflight list. +@@ -368,15 +377,6 @@ void unix_gc(void) + list_move_tail(&u->link, &gc_inflight_list); + } + +- /* +- * Now gc_candidates contains only garbage. Restore original +- * inflight counters for these as well, and remove the skbuffs +- * which are creating the cycle(s). +- */ +- skb_queue_head_init(&hitlist); +- list_for_each_entry(u, &gc_candidates, link) +- scan_children(&u->sk, inc_inflight, &hitlist); +- + spin_unlock(&unix_gc_lock); + + /* Here we are. Hitlist is filled. Die. */ +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c +index 7a70a5a5671a..91a6a2903e8d 100644 +--- a/net/xfrm/xfrm_user.c ++++ b/net/xfrm/xfrm_user.c +@@ -390,7 +390,14 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es + up = nla_data(rp); + ulen = xfrm_replay_state_esn_len(up); + +- if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) ++ /* Check the overall length and the internal bitmap length to avoid ++ * potential overflow. */ ++ if (nla_len(rp) < ulen || ++ xfrm_replay_state_esn_len(replay_esn) != ulen || ++ replay_esn->bmp_len != up->bmp_len) ++ return -EINVAL; ++ ++ if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) + return -EINVAL; + + return 0; +diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c +index b980a6ce5c79..3db2bf1f0a6c 100644 +--- a/security/integrity/evm/evm_main.c ++++ b/security/integrity/evm/evm_main.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + #include "evm.h" + + int evm_initialized; +@@ -128,7 +129,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry, + xattr_value_len, calc.digest); + if (rc) + break; +- rc = memcmp(xattr_data->digest, calc.digest, ++ rc = crypto_memneq(xattr_data->digest, calc.digest, + sizeof(calc.digest)); + if (rc) + rc = -EINVAL; +diff --git a/security/keys/gc.c b/security/keys/gc.c +index de34c290bd6f..2e01e23295aa 100644 +--- a/security/keys/gc.c ++++ b/security/keys/gc.c +@@ -46,7 +46,7 @@ static unsigned long key_gc_flags; + * immediately unlinked. + */ + struct key_type key_type_dead = { +- .name = "dead", ++ .name = ".dead", + }; + + /* +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c +index 3242195bfa95..066baa1926bb 100644 +--- a/security/keys/keyctl.c ++++ b/security/keys/keyctl.c +@@ -271,7 +271,8 @@ error: + * Create and join an anonymous session keyring or join a named session + * keyring, creating it if necessary. A named session keyring must have Search + * permission for it to be joined. Session keyrings without this permit will +- * be skipped over. ++ * be skipped over. It is not permitted for userspace to create or join ++ * keyrings whose name begin with a dot. + * + * If successful, the ID of the joined session keyring will be returned. + */ +@@ -288,12 +289,16 @@ long keyctl_join_session_keyring(const char __user *_name) + ret = PTR_ERR(name); + goto error; + } ++ ++ ret = -EPERM; ++ if (name[0] == '.') ++ goto error_name; + } + + /* join the session */ + ret = join_session_keyring(name); ++error_name: + kfree(name); +- + error: + return ret; + } +@@ -1240,8 +1245,8 @@ error: + * Read or set the default keyring in which request_key() will cache keys and + * return the old setting. + * +- * If a process keyring is specified then this will be created if it doesn't +- * yet exist. The old setting will be returned if successful. ++ * If a thread or process keyring is specified then it will be created if it ++ * doesn't yet exist. The old setting will be returned if successful. + */ + long keyctl_set_reqkey_keyring(int reqkey_defl) + { +@@ -1266,11 +1271,8 @@ long keyctl_set_reqkey_keyring(int reqkey_defl) + + case KEY_REQKEY_DEFL_PROCESS_KEYRING: + ret = install_process_keyring_to_cred(new); +- if (ret < 0) { +- if (ret != -EEXIST) +- goto error; +- ret = 0; +- } ++ if (ret < 0) ++ goto error; + goto set; + + case KEY_REQKEY_DEFL_DEFAULT: +diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c +index cd871dc8b7c0..33384662fc82 100644 +--- a/security/keys/process_keys.c ++++ b/security/keys/process_keys.c +@@ -125,13 +125,18 @@ error: + } + + /* +- * Install a fresh thread keyring directly to new credentials. This keyring is +- * allowed to overrun the quota. ++ * Install a thread keyring to the given credentials struct if it didn't have ++ * one already. This is allowed to overrun the quota. ++ * ++ * Return: 0 if a thread keyring is now present; -errno on failure. + */ + int install_thread_keyring_to_cred(struct cred *new) + { + struct key *keyring; + ++ if (new->thread_keyring) ++ return 0; ++ + keyring = keyring_alloc("_tid", new->uid, new->gid, new, + KEY_POS_ALL | KEY_USR_VIEW, + KEY_ALLOC_QUOTA_OVERRUN, NULL); +@@ -143,7 +148,9 @@ int install_thread_keyring_to_cred(struct cred *new) + } + + /* +- * Install a fresh thread keyring, discarding the old one. ++ * Install a thread keyring to the current task if it didn't have one already. ++ * ++ * Return: 0 if a thread keyring is now present; -errno on failure. + */ + static int install_thread_keyring(void) + { +@@ -154,8 +161,6 @@ static int install_thread_keyring(void) + if (!new) + return -ENOMEM; + +- BUG_ON(new->thread_keyring); +- + ret = install_thread_keyring_to_cred(new); + if (ret < 0) { + abort_creds(new); +@@ -166,17 +171,17 @@ static int install_thread_keyring(void) + } + + /* +- * Install a process keyring directly to a credentials struct. ++ * Install a process keyring to the given credentials struct if it didn't have ++ * one already. This is allowed to overrun the quota. + * +- * Returns -EEXIST if there was already a process keyring, 0 if one installed, +- * and other value on any other error ++ * Return: 0 if a process keyring is now present; -errno on failure. + */ + int install_process_keyring_to_cred(struct cred *new) + { + struct key *keyring; + + if (new->process_keyring) +- return -EEXIST; ++ return 0; + + keyring = keyring_alloc("_pid", new->uid, new->gid, new, + KEY_POS_ALL | KEY_USR_VIEW, +@@ -189,11 +194,9 @@ int install_process_keyring_to_cred(struct cred *new) + } + + /* +- * Make sure a process keyring is installed for the current process. The +- * existing process keyring is not replaced. ++ * Install a process keyring to the current task if it didn't have one already. + * +- * Returns 0 if there is a process keyring by the end of this function, some +- * error otherwise. ++ * Return: 0 if a process keyring is now present; -errno on failure. + */ + static int install_process_keyring(void) + { +@@ -207,14 +210,18 @@ static int install_process_keyring(void) + ret = install_process_keyring_to_cred(new); + if (ret < 0) { + abort_creds(new); +- return ret != -EEXIST ? ret : 0; ++ return ret; + } + + return commit_creds(new); + } + + /* +- * Install a session keyring directly to a credentials struct. ++ * Install the given keyring as the session keyring of the given credentials ++ * struct, replacing the existing one if any. If the given keyring is NULL, ++ * then install a new anonymous session keyring. ++ * ++ * Return: 0 on success; -errno on failure. + */ + int install_session_keyring_to_cred(struct cred *cred, struct key *keyring) + { +@@ -249,8 +256,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring) + } + + /* +- * Install a session keyring, discarding the old one. If a keyring is not +- * supplied, an empty one is invented. ++ * Install the given keyring as the session keyring of the current task, ++ * replacing the existing one if any. If the given keyring is NULL, then ++ * install a new anonymous session keyring. ++ * ++ * Return: 0 on success; -errno on failure. + */ + static int install_session_keyring(struct key *keyring) + { +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index fdd6e4f8be39..c08d4a10b07e 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -5442,7 +5442,7 @@ static int selinux_setprocattr(struct task_struct *p, + return error; + + /* Obtain a SID for the context, if one was specified. */ +- if (size && str[1] && str[1] != '\n') { ++ if (size && str[0] && str[0] != '\n') { + if (str[size-1] == '\n') { + str[size-1] = 0; + size--; +diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c +index 6ac40beb49da..7f414b05644b 100644 +--- a/sound/pci/ctxfi/cthw20k1.c ++++ b/sound/pci/ctxfi/cthw20k1.c +@@ -27,12 +27,6 @@ + #include "cthw20k1.h" + #include "ct20k1reg.h" + +-#if BITS_PER_LONG == 32 +-#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */ +-#else +-#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */ +-#endif +- + struct hw20k1 { + struct hw hw; + spinlock_t reg_20k1_lock; +@@ -1903,19 +1897,18 @@ static int hw_card_start(struct hw *hw) + { + int err; + struct pci_dev *pci = hw->pci; ++ const unsigned int dma_bits = BITS_PER_LONG; + + err = pci_enable_device(pci); + if (err < 0) + return err; + + /* Set DMA transfer mask */ +- if (pci_set_dma_mask(pci, CT_XFI_DMA_MASK) < 0 || +- pci_set_consistent_dma_mask(pci, CT_XFI_DMA_MASK) < 0) { +- printk(KERN_ERR "architecture does not support PCI " +- "busmaster DMA with mask 0x%llx\n", +- CT_XFI_DMA_MASK); +- err = -ENXIO; +- goto error1; ++ if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { ++ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); ++ } else { ++ dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); ++ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32)); + } + + if (!hw->io_base) { +diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c +index b1438861d38a..5828a3ec58bb 100644 +--- a/sound/pci/ctxfi/cthw20k2.c ++++ b/sound/pci/ctxfi/cthw20k2.c +@@ -26,12 +26,6 @@ + #include "cthw20k2.h" + #include "ct20k2reg.h" + +-#if BITS_PER_LONG == 32 +-#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */ +-#else +-#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */ +-#endif +- + struct hw20k2 { + struct hw hw; + /* for i2c */ +@@ -2026,18 +2020,18 @@ static int hw_card_start(struct hw *hw) + int err = 0; + struct pci_dev *pci = hw->pci; + unsigned int gctl; ++ const unsigned int dma_bits = BITS_PER_LONG; + + err = pci_enable_device(pci); + if (err < 0) + return err; + + /* Set DMA transfer mask */ +- if (pci_set_dma_mask(pci, CT_XFI_DMA_MASK) < 0 || +- pci_set_consistent_dma_mask(pci, CT_XFI_DMA_MASK) < 0) { +- printk(KERN_ERR "ctxfi: architecture does not support PCI " +- "busmaster DMA with mask 0x%llx\n", CT_XFI_DMA_MASK); +- err = -ENXIO; +- goto error1; ++ if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { ++ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); ++ } else { ++ dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); ++ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32)); + } + + if (!hw->io_base) { +diff --git a/sound/usb/card.c b/sound/usb/card.c +index 64952e2d3ed1..7344ac083263 100644 +--- a/sound/usb/card.c ++++ b/sound/usb/card.c +@@ -205,7 +205,6 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int + if (! snd_usb_parse_audio_interface(chip, interface)) { + usb_set_interface(dev, interface, 0); /* reset the current interface */ + usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); +- return -EINVAL; + } + + return 0; +diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c +index ab3ed4af1466..9f2afbd33702 100644 +--- a/tools/perf/builtin-trace.c ++++ b/tools/perf/builtin-trace.c +@@ -330,7 +330,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, + + if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) { + if (!trace->duration_filter) { +- trace__fprintf_entry_head(trace, thread, 1, sample->time, stdout); ++ trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, stdout); + printf("%-70s\n", ttrace->entry_str); + } + } else +@@ -364,7 +364,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, + } else if (trace->duration_filter) + goto out; + +- trace__fprintf_entry_head(trace, thread, duration, sample->time, stdout); ++ trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, stdout); + + if (ttrace->entry_pending) { + printf("%-70s", ttrace->entry_str); +diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c +index 8715a1006d00..ae061a45fa04 100644 +--- a/tools/perf/util/trace-event-scripting.c ++++ b/tools/perf/util/trace-event-scripting.c +@@ -90,7 +90,8 @@ static void register_python_scripting(struct scripting_ops *scripting_ops) + if (err) + die("error registering py script extension"); + +- scripting_context = malloc(sizeof(struct scripting_context)); ++ if (scripting_context == NULL) ++ scripting_context = malloc(sizeof(*scripting_context)); + } + + #ifdef NO_LIBPYTHON +@@ -153,7 +154,8 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops) + if (err) + die("error registering pl script extension"); + +- scripting_context = malloc(sizeof(struct scripting_context)); ++ if (scripting_context == NULL) ++ scripting_context = malloc(sizeof(*scripting_context)); + } + + #ifdef NO_LIBPERL +diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl +index 0d7fd8b51544..a0a8314df4b9 100755 +--- a/tools/testing/ktest/ktest.pl ++++ b/tools/testing/ktest/ktest.pl +@@ -2375,7 +2375,7 @@ sub do_run_test { + } + + waitpid $child_pid, 0; +- $child_exit = $?; ++ $child_exit = $? >> 8; + + if (!$bug && $in_bisect) { + if (defined($bisect_ret_good)) { +diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c +index 64ee720b75c7..362908c5f6c3 100644 +--- a/virt/kvm/eventfd.c ++++ b/virt/kvm/eventfd.c +@@ -753,6 +753,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) + if (ret < 0) + goto unlock_fail; + ++ kvm->buses[bus_idx]->ioeventfd_count++; + list_add_tail(&p->list, &kvm->ioeventfds); + + mutex_unlock(&kvm->slots_lock); +@@ -798,6 +799,8 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) + continue; + + kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); ++ if (kvm->buses[bus_idx]) ++ kvm->buses[bus_idx]->ioeventfd_count--; + ioeventfd_release(p); + ret = 0; + break; +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index f71c4ad425c6..0715673b6965 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -607,8 +607,11 @@ static void kvm_destroy_vm(struct kvm *kvm) + list_del(&kvm->vm_list); + raw_spin_unlock(&kvm_lock); + kvm_free_irq_routing(kvm); +- for (i = 0; i < KVM_NR_BUSES; i++) +- kvm_io_bus_destroy(kvm->buses[i]); ++ for (i = 0; i < KVM_NR_BUSES; i++) { ++ if (kvm->buses[i]) ++ kvm_io_bus_destroy(kvm->buses[i]); ++ kvm->buses[i] = NULL; ++ } + kvm_coalesced_mmio_free(kvm); + #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) + mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); +@@ -2885,6 +2888,8 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + }; + + bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); ++ if (!bus) ++ return -ENOMEM; + idx = kvm_io_bus_get_first_dev(bus, addr, len); + if (idx < 0) + return -EOPNOTSUPP; +@@ -2913,6 +2918,8 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + }; + + bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); ++ if (!bus) ++ return -ENOMEM; + idx = kvm_io_bus_get_first_dev(bus, addr, len); + if (idx < 0) + return -EOPNOTSUPP; +@@ -2934,7 +2941,11 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + struct kvm_io_bus *new_bus, *bus; + + bus = kvm->buses[bus_idx]; +- if (bus->dev_count > NR_IOBUS_DEVS - 1) ++ if (!bus) ++ return -ENOMEM; ++ ++ /* exclude ioeventfd which is limited by maximum fd */ ++ if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) + return -ENOSPC; + + new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) * +@@ -2952,37 +2963,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + } + + /* Caller must hold slots_lock. */ +-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, +- struct kvm_io_device *dev) ++void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, ++ struct kvm_io_device *dev) + { +- int i, r; ++ int i; + struct kvm_io_bus *new_bus, *bus; + + bus = kvm->buses[bus_idx]; +- r = -ENOENT; ++ if (!bus) ++ return; ++ + for (i = 0; i < bus->dev_count; i++) + if (bus->range[i].dev == dev) { +- r = 0; + break; + } + +- if (r) +- return r; ++ if (i == bus->dev_count) ++ return; + + new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) * + sizeof(struct kvm_io_range)), GFP_KERNEL); +- if (!new_bus) +- return -ENOMEM; ++ if (!new_bus) { ++ pr_err("kvm: failed to shrink bus, removing it completely\n"); ++ goto broken; ++ } + + memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); + new_bus->dev_count--; + memcpy(new_bus->range + i, bus->range + i + 1, + (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); + ++broken: + rcu_assign_pointer(kvm->buses[bus_idx], new_bus); + synchronize_srcu_expedited(&kvm->srcu); + kfree(bus); +- return r; ++ return; + } + + static struct notifier_block kvm_cpu_notifier = { diff --git a/patch/kernel/odroidc1-default/patch-3.10.106-107.patch b/patch/kernel/odroidc1-default/patch-3.10.106-107.patch new file mode 100644 index 000000000..7f9ee2ab0 --- /dev/null +++ b/patch/kernel/odroidc1-default/patch-3.10.106-107.patch @@ -0,0 +1,8746 @@ +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index daf83824fda5..ed0c7e3ba8da 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -2889,6 +2889,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + spia_pedr= + spia_peddr= + ++ stack_guard_gap= [MM] ++ override the default stack gap protection. The value ++ is in page units and it defines how many pages prior ++ to (for stacks growing down) resp. after (for stacks ++ growing up) the main stack are reserved for no other ++ mapping. Default value is 256 pages. ++ + stacktrace [FTRACE] + Enabled the stack tracer on boot up. + +diff --git a/Makefile b/Makefile +index 2f87f67fb9f7..752b1c67daa0 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 106 ++SUBLEVEL = 107 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c +index 116d3e09b5b5..e6b365d9e0ad 100644 +--- a/arch/arc/kernel/unaligned.c ++++ b/arch/arc/kernel/unaligned.c +@@ -228,8 +228,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, + if (state.fault) + goto fault; + ++ /* clear any remanants of delay slot */ + if (delay_mode(regs)) { +- regs->ret = regs->bta; ++ regs->ret = regs->bta & ~1U; + regs->status32 &= ~STATUS_DE_MASK; + } else { + regs->ret += state.instr_len; +diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c +index 2e06d56e987b..cf4ae6958240 100644 +--- a/arch/arc/mm/mmap.c ++++ b/arch/arc/mm/mmap.c +@@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts +index c914357c0d89..d3c206e78870 100644 +--- a/arch/arm/boot/dts/da850-evm.dts ++++ b/arch/arm/boot/dts/da850-evm.dts +@@ -59,6 +59,7 @@ + #size-cells = <1>; + compatible = "m25p64"; + spi-max-frequency = <30000000>; ++ m25p,fast-read; + reg = <0>; + partition@0 { + label = "U-Boot-SPL"; +diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h +index dba62cb1ad08..f389107947e0 100644 +--- a/arch/arm/include/asm/cputype.h ++++ b/arch/arm/include/asm/cputype.h +@@ -58,6 +58,9 @@ + #define ARM_CPU_XSCALE_ARCH_V2 0x4000 + #define ARM_CPU_XSCALE_ARCH_V3 0x6000 + ++/* Qualcomm implemented cores */ ++#define ARM_CPU_PART_SCORPION 0x510002d0 ++ + extern unsigned int processor_id; + + #ifdef CONFIG_CPU_CP15 +diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c +index 1fd749ee4a1b..b0b69e9ce660 100644 +--- a/arch/arm/kernel/hw_breakpoint.c ++++ b/arch/arm/kernel/hw_breakpoint.c +@@ -1066,6 +1066,22 @@ static int __init arch_hw_breakpoint_init(void) + return 0; + } + ++ /* ++ * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD ++ * whenever a WFI is issued, even if the core is not powered down, in ++ * violation of the architecture. When DBGPRSR.SPD is set, accesses to ++ * breakpoint and watchpoint registers are treated as undefined, so ++ * this results in boot time and runtime failures when these are ++ * accessed and we unexpectedly take a trap. ++ * ++ * It's not clear if/how this can be worked around, so we blacklist ++ * Scorpion CPUs to avoid these issues. ++ */ ++ if ((read_cpuid_id() & 0xff00fff0) == ARM_CPU_PART_SCORPION) { ++ pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n"); ++ return 0; ++ } ++ + has_ossr = core_has_os_save_restore(); + + /* Determine how many BRPs/WRPs are available. */ +diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c +index 5ef506c6f492..984509eb44b9 100644 +--- a/arch/arm/mm/mmap.c ++++ b/arch/arm/mm/mmap.c +@@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +@@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c +index 81edd31bb4ac..810ae2db40ef 100644 +--- a/arch/arm/xen/enlighten.c ++++ b/arch/arm/xen/enlighten.c +@@ -258,8 +258,7 @@ static int __init xen_guest_init(void) + * for secondary CPUs as they are brought up. + * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. + */ +- xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), +- sizeof(struct vcpu_info)); ++ xen_vcpu_info = alloc_percpu(struct vcpu_info); + if (xen_vcpu_info == NULL) + return -ENOMEM; + +diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c +index 3c494e84444d..a511ac16a8e3 100644 +--- a/arch/c6x/kernel/ptrace.c ++++ b/arch/c6x/kernel/ptrace.c +@@ -69,46 +69,6 @@ static int gpr_get(struct task_struct *target, + 0, sizeof(*regs)); + } + +-static int gpr_set(struct task_struct *target, +- const struct user_regset *regset, +- unsigned int pos, unsigned int count, +- const void *kbuf, const void __user *ubuf) +-{ +- int ret; +- struct pt_regs *regs = task_pt_regs(target); +- +- /* Don't copyin TSR or CSR */ +- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, +- ®s, +- 0, PT_TSR * sizeof(long)); +- if (ret) +- return ret; +- +- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, +- PT_TSR * sizeof(long), +- (PT_TSR + 1) * sizeof(long)); +- if (ret) +- return ret; +- +- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, +- ®s, +- (PT_TSR + 1) * sizeof(long), +- PT_CSR * sizeof(long)); +- if (ret) +- return ret; +- +- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, +- PT_CSR * sizeof(long), +- (PT_CSR + 1) * sizeof(long)); +- if (ret) +- return ret; +- +- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, +- ®s, +- (PT_CSR + 1) * sizeof(long), -1); +- return ret; +-} +- + enum c6x_regset { + REGSET_GPR, + }; +@@ -120,7 +80,6 @@ static const struct user_regset c6x_regsets[] = { + .size = sizeof(u32), + .align = sizeof(u32), + .get = gpr_get, +- .set = gpr_set + }, + }; + +diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c +index 836f14707a62..efa59f1f8022 100644 +--- a/arch/frv/mm/elf-fdpic.c ++++ b/arch/frv/mm/elf-fdpic.c +@@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + addr = PAGE_ALIGN(addr); + vma = find_vma(current->mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + goto success; + } + +diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h +index 7841f2290385..9d523375f68a 100644 +--- a/arch/metag/include/asm/uaccess.h ++++ b/arch/metag/include/asm/uaccess.h +@@ -192,20 +192,21 @@ extern long __must_check strnlen_user(const char __user *src, long count); + + #define strlen_user(str) strnlen_user(str, 32767) + +-extern unsigned long __must_check __copy_user_zeroing(void *to, +- const void __user *from, +- unsigned long n); ++extern unsigned long raw_copy_from_user(void *to, const void __user *from, ++ unsigned long n); + + static inline unsigned long + copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ unsigned long res = n; + if (likely(access_ok(VERIFY_READ, from, n))) +- return __copy_user_zeroing(to, from, n); +- memset(to, 0, n); +- return n; ++ res = raw_copy_from_user(to, from, n); ++ if (unlikely(res)) ++ memset(to + (n - res), 0, res); ++ return res; + } + +-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) ++#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n) + #define __copy_from_user_inatomic __copy_from_user + + extern unsigned long __must_check __copy_user(void __user *to, +diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c +index 7563628822bd..5e2dc7defd2c 100644 +--- a/arch/metag/kernel/ptrace.c ++++ b/arch/metag/kernel/ptrace.c +@@ -24,6 +24,16 @@ + * user_regset definitions. + */ + ++static unsigned long user_txstatus(const struct pt_regs *regs) ++{ ++ unsigned long data = (unsigned long)regs->ctx.Flags; ++ ++ if (regs->ctx.SaveMask & TBICTX_CBUF_BIT) ++ data |= USER_GP_REGS_STATUS_CATCH_BIT; ++ ++ return data; ++} ++ + int metag_gp_regs_copyout(const struct pt_regs *regs, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +@@ -62,9 +72,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs, + if (ret) + goto out; + /* TXSTATUS */ +- data = (unsigned long)regs->ctx.Flags; +- if (regs->ctx.SaveMask & TBICTX_CBUF_BIT) +- data |= USER_GP_REGS_STATUS_CATCH_BIT; ++ data = user_txstatus(regs); + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &data, 4*25, 4*26); + if (ret) +@@ -119,6 +127,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs, + if (ret) + goto out; + /* TXSTATUS */ ++ data = user_txstatus(regs); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &data, 4*25, 4*26); + if (ret) +@@ -244,6 +253,8 @@ int metag_rp_state_copyin(struct pt_regs *regs, + unsigned long long *ptr; + int ret, i; + ++ if (count < 4*13) ++ return -EINVAL; + /* Read the entire pipeline before making any changes */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &rp, 0, 4*13); +@@ -303,7 +314,7 @@ static int metag_tls_set(struct task_struct *target, + const void *kbuf, const void __user *ubuf) + { + int ret; +- void __user *tls; ++ void __user *tls = target->thread.tls_ptr; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); + if (ret) +diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c +index dfe77b26beaa..2792fc621088 100644 +--- a/arch/metag/lib/usercopy.c ++++ b/arch/metag/lib/usercopy.c +@@ -29,7 +29,6 @@ + COPY \ + "1:\n" \ + " .section .fixup,\"ax\"\n" \ +- " MOV D1Ar1,#0\n" \ + FIXUP \ + " MOVT D1Ar1,#HI(1b)\n" \ + " JUMP D1Ar1,#LO(1b)\n" \ +@@ -661,16 +660,14 @@ EXPORT_SYMBOL(__copy_user); + __asm_copy_user_cont(to, from, ret, \ + " GETB D1Ar1,[%1++]\n" \ + "2: SETB [%0++],D1Ar1\n", \ +- "3: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ ++ "3: ADD %2,%2,#1\n", \ + " .long 2b,3b\n") + + #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ + __asm_copy_user_cont(to, from, ret, \ + " GETW D1Ar1,[%1++]\n" \ + "2: SETW [%0++],D1Ar1\n" COPY, \ +- "3: ADD %2,%2,#2\n" \ +- " SETW [%0++],D1Ar1\n" FIXUP, \ ++ "3: ADD %2,%2,#2\n" FIXUP, \ + " .long 2b,3b\n" TENTRY) + + #define __asm_copy_from_user_2(to, from, ret) \ +@@ -680,145 +677,26 @@ EXPORT_SYMBOL(__copy_user); + __asm_copy_from_user_2x_cont(to, from, ret, \ + " GETB D1Ar1,[%1++]\n" \ + "4: SETB [%0++],D1Ar1\n", \ +- "5: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ ++ "5: ADD %2,%2,#1\n", \ + " .long 4b,5b\n") + + #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ + __asm_copy_user_cont(to, from, ret, \ + " GETD D1Ar1,[%1++]\n" \ + "2: SETD [%0++],D1Ar1\n" COPY, \ +- "3: ADD %2,%2,#4\n" \ +- " SETD [%0++],D1Ar1\n" FIXUP, \ ++ "3: ADD %2,%2,#4\n" FIXUP, \ + " .long 2b,3b\n" TENTRY) + + #define __asm_copy_from_user_4(to, from, ret) \ + __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") + +-#define __asm_copy_from_user_5(to, from, ret) \ +- __asm_copy_from_user_4x_cont(to, from, ret, \ +- " GETB D1Ar1,[%1++]\n" \ +- "4: SETB [%0++],D1Ar1\n", \ +- "5: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ +- " .long 4b,5b\n") +- +-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ +- __asm_copy_from_user_4x_cont(to, from, ret, \ +- " GETW D1Ar1,[%1++]\n" \ +- "4: SETW [%0++],D1Ar1\n" COPY, \ +- "5: ADD %2,%2,#2\n" \ +- " SETW [%0++],D1Ar1\n" FIXUP, \ +- " .long 4b,5b\n" TENTRY) +- +-#define __asm_copy_from_user_6(to, from, ret) \ +- __asm_copy_from_user_6x_cont(to, from, ret, "", "", "") +- +-#define __asm_copy_from_user_7(to, from, ret) \ +- __asm_copy_from_user_6x_cont(to, from, ret, \ +- " GETB D1Ar1,[%1++]\n" \ +- "6: SETB [%0++],D1Ar1\n", \ +- "7: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ +- " .long 6b,7b\n") +- +-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ +- __asm_copy_from_user_4x_cont(to, from, ret, \ +- " GETD D1Ar1,[%1++]\n" \ +- "4: SETD [%0++],D1Ar1\n" COPY, \ +- "5: ADD %2,%2,#4\n" \ +- " SETD [%0++],D1Ar1\n" FIXUP, \ +- " .long 4b,5b\n" TENTRY) +- +-#define __asm_copy_from_user_8(to, from, ret) \ +- __asm_copy_from_user_8x_cont(to, from, ret, "", "", "") +- +-#define __asm_copy_from_user_9(to, from, ret) \ +- __asm_copy_from_user_8x_cont(to, from, ret, \ +- " GETB D1Ar1,[%1++]\n" \ +- "6: SETB [%0++],D1Ar1\n", \ +- "7: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ +- " .long 6b,7b\n") +- +-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ +- __asm_copy_from_user_8x_cont(to, from, ret, \ +- " GETW D1Ar1,[%1++]\n" \ +- "6: SETW [%0++],D1Ar1\n" COPY, \ +- "7: ADD %2,%2,#2\n" \ +- " SETW [%0++],D1Ar1\n" FIXUP, \ +- " .long 6b,7b\n" TENTRY) +- +-#define __asm_copy_from_user_10(to, from, ret) \ +- __asm_copy_from_user_10x_cont(to, from, ret, "", "", "") +- +-#define __asm_copy_from_user_11(to, from, ret) \ +- __asm_copy_from_user_10x_cont(to, from, ret, \ +- " GETB D1Ar1,[%1++]\n" \ +- "8: SETB [%0++],D1Ar1\n", \ +- "9: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ +- " .long 8b,9b\n") +- +-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ +- __asm_copy_from_user_8x_cont(to, from, ret, \ +- " GETD D1Ar1,[%1++]\n" \ +- "6: SETD [%0++],D1Ar1\n" COPY, \ +- "7: ADD %2,%2,#4\n" \ +- " SETD [%0++],D1Ar1\n" FIXUP, \ +- " .long 6b,7b\n" TENTRY) +- +-#define __asm_copy_from_user_12(to, from, ret) \ +- __asm_copy_from_user_12x_cont(to, from, ret, "", "", "") +- +-#define __asm_copy_from_user_13(to, from, ret) \ +- __asm_copy_from_user_12x_cont(to, from, ret, \ +- " GETB D1Ar1,[%1++]\n" \ +- "8: SETB [%0++],D1Ar1\n", \ +- "9: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ +- " .long 8b,9b\n") +- +-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ +- __asm_copy_from_user_12x_cont(to, from, ret, \ +- " GETW D1Ar1,[%1++]\n" \ +- "8: SETW [%0++],D1Ar1\n" COPY, \ +- "9: ADD %2,%2,#2\n" \ +- " SETW [%0++],D1Ar1\n" FIXUP, \ +- " .long 8b,9b\n" TENTRY) +- +-#define __asm_copy_from_user_14(to, from, ret) \ +- __asm_copy_from_user_14x_cont(to, from, ret, "", "", "") +- +-#define __asm_copy_from_user_15(to, from, ret) \ +- __asm_copy_from_user_14x_cont(to, from, ret, \ +- " GETB D1Ar1,[%1++]\n" \ +- "10: SETB [%0++],D1Ar1\n", \ +- "11: ADD %2,%2,#1\n" \ +- " SETB [%0++],D1Ar1\n", \ +- " .long 10b,11b\n") +- +-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ +- __asm_copy_from_user_12x_cont(to, from, ret, \ +- " GETD D1Ar1,[%1++]\n" \ +- "8: SETD [%0++],D1Ar1\n" COPY, \ +- "9: ADD %2,%2,#4\n" \ +- " SETD [%0++],D1Ar1\n" FIXUP, \ +- " .long 8b,9b\n" TENTRY) +- +-#define __asm_copy_from_user_16(to, from, ret) \ +- __asm_copy_from_user_16x_cont(to, from, ret, "", "", "") +- + #define __asm_copy_from_user_8x64(to, from, ret) \ + asm volatile ( \ + " GETL D0Ar2,D1Ar1,[%1++]\n" \ + "2: SETL [%0++],D0Ar2,D1Ar1\n" \ + "1:\n" \ + " .section .fixup,\"ax\"\n" \ +- " MOV D1Ar1,#0\n" \ +- " MOV D0Ar2,#0\n" \ + "3: ADD %2,%2,#8\n" \ +- " SETL [%0++],D0Ar2,D1Ar1\n" \ + " MOVT D0Ar2,#HI(1b)\n" \ + " JUMP D0Ar2,#LO(1b)\n" \ + " .previous\n" \ +@@ -878,11 +756,12 @@ EXPORT_SYMBOL(__copy_user); + "SUB %1, %1, D0Ar2\n") + + +-/* Copy from user to kernel, zeroing the bytes that were inaccessible in +- userland. The return-value is the number of bytes that were +- inaccessible. */ +-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, +- unsigned long n) ++/* ++ * Copy from user to kernel. The return-value is the number of bytes that were ++ * inaccessible. ++ */ ++unsigned long raw_copy_from_user(void *pdst, const void __user *psrc, ++ unsigned long n) + { + register char *dst asm ("A0.2") = pdst; + register const char __user *src asm ("A1.2") = psrc; +@@ -895,7 +774,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + __asm_copy_from_user_1(dst, src, retn); + n--; + if (retn) +- goto copy_exception_bytes; ++ return retn + n; + } + if ((unsigned long) dst & 1) { + /* Worst case - byte copy */ +@@ -903,14 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + __asm_copy_from_user_1(dst, src, retn); + n--; + if (retn) +- goto copy_exception_bytes; ++ return retn + n; + } + } + if (((unsigned long) src & 2) && n >= 2) { + __asm_copy_from_user_2(dst, src, retn); + n -= 2; + if (retn) +- goto copy_exception_bytes; ++ return retn + n; + } + if ((unsigned long) dst & 2) { + /* Second worst case - word copy */ +@@ -918,7 +797,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + __asm_copy_from_user_2(dst, src, retn); + n -= 2; + if (retn) +- goto copy_exception_bytes; ++ return retn + n; + } + } + +@@ -934,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + __asm_copy_from_user_8x64(dst, src, retn); + n -= 8; + if (retn) +- goto copy_exception_bytes; ++ return retn + n; + } + } + +@@ -950,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + __asm_copy_from_user_8x64(dst, src, retn); + n -= 8; + if (retn) +- goto copy_exception_bytes; ++ return retn + n; + } + } + #endif +@@ -960,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + n -= 4; + + if (retn) +- goto copy_exception_bytes; ++ return retn + n; + } + + /* If we get here, there were no memory read faults. */ +@@ -986,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, + /* If we get here, retn correctly reflects the number of failing + bytes. */ + return retn; +- +- copy_exception_bytes: +- /* We already have "retn" bytes cleared, and need to clear the +- remaining "n" bytes. A non-optimized simple byte-for-byte in-line +- memset is preferred here, since this isn't speed-critical code and +- we'd rather have this a leaf-function than calling memset. */ +- { +- char *endp; +- for (endp = dst + n; dst < endp; dst++) +- *dst = 0; +- } +- +- return retn + n; + } +-EXPORT_SYMBOL(__copy_user_zeroing); ++EXPORT_SYMBOL(raw_copy_from_user); + + #define __asm_clear_8x64(to, ret) \ + asm volatile ( \ +diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h +index ac3d2b8a20d4..d48cf440010c 100644 +--- a/arch/mips/include/asm/checksum.h ++++ b/arch/mips/include/asm/checksum.h +@@ -155,7 +155,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, + " daddu %0, %4 \n" + " dsll32 $1, %0, 0 \n" + " daddu %0, $1 \n" ++ " sltu $1, %0, $1 \n" + " dsra32 %0, %0, 0 \n" ++ " addu %0, $1 \n" + #endif + " .set pop" + : "=r" (sum) +diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c +index 93aa302948d7..c68312947ed9 100644 +--- a/arch/mips/kernel/crash.c ++++ b/arch/mips/kernel/crash.c +@@ -15,12 +15,22 @@ static int crashing_cpu = -1; + static cpumask_t cpus_in_crash = CPU_MASK_NONE; + + #ifdef CONFIG_SMP +-static void crash_shutdown_secondary(void *ignore) ++static void crash_shutdown_secondary(void *passed_regs) + { +- struct pt_regs *regs; ++ struct pt_regs *regs = passed_regs; + int cpu = smp_processor_id(); + +- regs = task_pt_regs(current); ++ /* ++ * If we are passed registers, use those. Otherwise get the ++ * regs from the last interrupt, which should be correct, as ++ * we are in an interrupt. But if the regs are not there, ++ * pull them from the top of the stack. They are probably ++ * wrong, but we need something to keep from crashing again. ++ */ ++ if (!regs) ++ regs = get_irq_regs(); ++ if (!regs) ++ regs = task_pt_regs(current); + + if (!cpu_online(cpu)) + return; +diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c +index 7e5fe2790d8a..0bb42959948e 100644 +--- a/arch/mips/mm/mmap.c ++++ b/arch/mips/mm/mmap.c +@@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h +index 8c9b631d2a78..8c00e6c06266 100644 +--- a/arch/parisc/include/asm/bitops.h ++++ b/arch/parisc/include/asm/bitops.h +@@ -6,7 +6,7 @@ + #endif + + #include +-#include /* for BITS_PER_LONG/SHIFT_PER_LONG */ ++#include + #include + #include + +@@ -16,6 +16,12 @@ + * to include/asm-i386/bitops.h or kerneldoc + */ + ++#if __BITS_PER_LONG == 64 ++#define SHIFT_PER_LONG 6 ++#else ++#define SHIFT_PER_LONG 5 ++#endif ++ + #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) + + +diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h +index 75196b415d3f..540c94de4427 100644 +--- a/arch/parisc/include/uapi/asm/bitsperlong.h ++++ b/arch/parisc/include/uapi/asm/bitsperlong.h +@@ -9,10 +9,8 @@ + */ + #if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__) + #define __BITS_PER_LONG 64 +-#define SHIFT_PER_LONG 6 + #else + #define __BITS_PER_LONG 32 +-#define SHIFT_PER_LONG 5 + #endif + + #include +diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h +index e78403b129ef..928e1bbac98f 100644 +--- a/arch/parisc/include/uapi/asm/swab.h ++++ b/arch/parisc/include/uapi/asm/swab.h +@@ -1,6 +1,7 @@ + #ifndef _PARISC_SWAB_H + #define _PARISC_SWAB_H + ++#include + #include + #include + +@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) + } + #define __arch_swab32 __arch_swab32 + +-#if BITS_PER_LONG > 32 ++#if __BITS_PER_LONG > 32 + /* + ** From "PA-RISC 2.0 Architecture", HP Professional Books. + ** See Appendix I page 8 , "Endian Byte Swapping". +@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x) + return x; + } + #define __arch_swab64 __arch_swab64 +-#endif /* BITS_PER_LONG > 32 */ ++#endif /* __BITS_PER_LONG > 32 */ + + #endif /* _PARISC_SWAB_H */ +diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile +index 56a4a5d205af..a008b872d4b7 100644 +--- a/arch/powerpc/Makefile ++++ b/arch/powerpc/Makefile +@@ -273,6 +273,14 @@ checkbin: + echo 'disable kernel modules' ; \ + false ; \ + fi ++ @if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \ ++ && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \ ++ echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \ ++ echo 'in some circumstances.' ; \ ++ echo -n '*** Please use a different binutils version.' ; \ ++ false ; \ ++ fi ++ + + CLEAN_FILES += $(TOUT) + +diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S +index b6fcbaf5027b..3dc44b05fb97 100644 +--- a/arch/powerpc/boot/ps3-head.S ++++ b/arch/powerpc/boot/ps3-head.S +@@ -57,11 +57,6 @@ __system_reset_overlay: + bctr + + 1: +- /* Save the value at addr zero for a null pointer write check later. */ +- +- li r4, 0 +- lwz r3, 0(r4) +- + /* Primary delays then goes to _zimage_start in wrapper. */ + + or 31, 31, 31 /* db16cyc */ +diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c +index 9954d98871d0..029ea3ce1588 100644 +--- a/arch/powerpc/boot/ps3.c ++++ b/arch/powerpc/boot/ps3.c +@@ -119,13 +119,12 @@ void ps3_copy_vectors(void) + flush_cache((void *)0x100, 512); + } + +-void platform_init(unsigned long null_check) ++void platform_init(void) + { + const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */ + void *chosen; + unsigned long ft_addr; + u64 rm_size; +- unsigned long val; + + console_ops.write = ps3_console_write; + platform_ops.exit = ps3_exit; +@@ -153,11 +152,6 @@ void platform_init(unsigned long null_check) + + printf(" flat tree at 0x%lx\n\r", ft_addr); + +- val = *(unsigned long *)0; +- +- if (val != null_check) +- printf("null check failed: %lx != %lx\n\r", val, null_check); +- + ((kernel_entry_t)0)(ft_addr, 0, NULL); + + ps3_exit(); +diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c +index 52e5758ea368..b3bab9575d31 100644 +--- a/arch/powerpc/kernel/align.c ++++ b/arch/powerpc/kernel/align.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + struct aligninfo { + unsigned char len; +@@ -764,14 +765,25 @@ int fix_alignment(struct pt_regs *regs) + nb = aligninfo[instr].len; + flags = aligninfo[instr].flags; + +- /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ +- if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { +- nb = 8; +- flags = LD+SW; +- } else if (IS_XFORM(instruction) && +- ((instruction >> 1) & 0x3ff) == 660) { +- nb = 8; +- flags = ST+SW; ++ /* ++ * Handle some cases which give overlaps in the DSISR values. ++ */ ++ if (IS_XFORM(instruction)) { ++ switch (get_xop(instruction)) { ++ case 532: /* ldbrx */ ++ nb = 8; ++ flags = LD+SW; ++ break; ++ case 660: /* stdbrx */ ++ nb = 8; ++ flags = ST+SW; ++ break; ++ case 20: /* lwarx */ ++ case 84: /* ldarx */ ++ case 116: /* lharx */ ++ case 276: /* lqarx */ ++ return 0; /* not emulated ever */ ++ } + } + + /* Byteswap little endian loads and stores */ +diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c +index f0b47d1a6b0e..7531f9abf10d 100644 +--- a/arch/powerpc/kernel/hw_breakpoint.c ++++ b/arch/powerpc/kernel/hw_breakpoint.c +@@ -228,8 +228,10 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) + rcu_read_lock(); + + bp = __get_cpu_var(bp_per_reg); +- if (!bp) ++ if (!bp) { ++ rc = NOTIFY_DONE; + goto out; ++ } + info = counter_arch_bp(bp); + + /* +diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c +index 631a2650e4e4..50b482bcbea2 100644 +--- a/arch/powerpc/kvm/emulate.c ++++ b/arch/powerpc/kvm/emulate.c +@@ -511,7 +511,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) + advance = 0; + printk(KERN_ERR "Couldn't emulate instruction 0x%08x " + "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); +- kvmppc_core_queue_program(vcpu, 0); + } + } + +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c +index 7ce9cf3b6988..887365a82c01 100644 +--- a/arch/powerpc/mm/slice.c ++++ b/arch/powerpc/mm/slice.c +@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, + if ((mm->task_size - len) < addr) + return 0; + vma = find_vma(mm, addr); +- return (!vma || (addr + len) <= vma->vm_start); ++ return (!vma || (addr + len) <= vm_start_gap(vma)); + } + + static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) +diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h +index 6b499870662f..52ef30cfedf0 100644 +--- a/arch/s390/include/asm/processor.h ++++ b/arch/s390/include/asm/processor.h +@@ -43,14 +43,17 @@ extern void execve_tail(void); + #ifndef CONFIG_64BIT + + #define TASK_SIZE (1UL << 31) ++#define TASK_MAX_SIZE (1UL << 31) + #define TASK_UNMAPPED_BASE (1UL << 30) + + #else /* CONFIG_64BIT */ + +-#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) ++#define TASK_SIZE_OF(tsk) ((tsk)->mm ? \ ++ (tsk)->mm->context.asce_limit : TASK_MAX_SIZE) + #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ + (1UL << 30) : (1UL << 41)) + #define TASK_SIZE TASK_SIZE_OF(current) ++#define TASK_MAX_SIZE (1UL << 53) + + #endif /* CONFIG_64BIT */ + +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c +index a938b548f07e..14a77e6d8fc7 100644 +--- a/arch/s390/mm/pgtable.c ++++ b/arch/s390/mm/pgtable.c +@@ -335,7 +335,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, + + if ((from | to | len) & (PMD_SIZE - 1)) + return -EINVAL; +- if (len == 0 || from + len > PGDIR_SIZE || ++ if (len == 0 || from + len > TASK_MAX_SIZE || + from + len < from || to + len < to) + return -EINVAL; + +diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c +index f8e69d5bc0a9..aae199b3e046 100644 +--- a/arch/s390/pci/pci_dma.c ++++ b/arch/s390/pci/pci_dma.c +@@ -416,7 +416,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) + zdev->dma_table = dma_alloc_cpu_table(); + if (!zdev->dma_table) { + rc = -ENOMEM; +- goto out_clean; ++ goto out; + } + + zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET; +@@ -429,7 +429,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) + bitmap_order); + if (!zdev->iommu_bitmap) { + rc = -ENOMEM; +- goto out_reg; ++ goto free_dma_table; + } + + rc = zpci_register_ioat(zdev, +@@ -438,12 +438,16 @@ int zpci_dma_init_device(struct zpci_dev *zdev) + zdev->start_dma + zdev->iommu_size - 1, + (u64) zdev->dma_table); + if (rc) +- goto out_reg; +- return 0; ++ goto free_bitmap; + +-out_reg: ++ return 0; ++free_bitmap: ++ vfree(zdev->iommu_bitmap); ++ zdev->iommu_bitmap = NULL; ++free_dma_table: + dma_free_cpu_table(zdev->dma_table); +-out_clean: ++ zdev->dma_table = NULL; ++out: + return rc; + } + +diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c +index 6777177807c2..7df7d5944188 100644 +--- a/arch/sh/mm/mmap.c ++++ b/arch/sh/mm/mmap.c +@@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +@@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c +index 7ff45e4ba681..875ddf00dab4 100644 +--- a/arch/sparc/kernel/ptrace_64.c ++++ b/arch/sparc/kernel/ptrace_64.c +@@ -308,7 +308,7 @@ static int genregs64_set(struct task_struct *target, + } + + if (!ret) { +- unsigned long y; ++ unsigned long y = regs->y; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &y, +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c +index 666510b39870..66e1047c835f 100644 +--- a/arch/sparc/kernel/sys_sparc_64.c ++++ b/arch/sparc/kernel/sys_sparc_64.c +@@ -119,7 +119,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + + vma = find_vma(mm, addr); + if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +@@ -182,7 +182,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + + vma = find_vma(mm, addr); + if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c +index d2b59441ebdd..ce4937025e97 100644 +--- a/arch/sparc/mm/hugetlbpage.c ++++ b/arch/sparc/mm/hugetlbpage.c +@@ -118,7 +118,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + addr = ALIGN(addr, HPAGE_SIZE); + vma = find_vma(mm, addr); + if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) +diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c +index 0f83ed4602b2..d0dac73a2d80 100644 +--- a/arch/tile/kernel/ptrace.c ++++ b/arch/tile/kernel/ptrace.c +@@ -110,7 +110,7 @@ static int tile_gpr_set(struct task_struct *target, + const void *kbuf, const void __user *ubuf) + { + int ret; +- struct pt_regs regs; ++ struct pt_regs regs = *task_pt_regs(target); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, + sizeof(regs)); +diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c +index 650ccff8378c..c75eac7a2316 100644 +--- a/arch/tile/mm/hugetlbpage.c ++++ b/arch/tile/mm/hugetlbpage.c +@@ -297,7 +297,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + if (current->mm->get_unmapped_area == arch_get_unmapped_area) +diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c +index 4bcf841e4701..3deb8e533359 100644 +--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c ++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c +@@ -218,6 +218,29 @@ static int ghash_async_final(struct ahash_request *req) + } + } + ++static int ghash_async_import(struct ahash_request *req, const void *in) ++{ ++ struct ahash_request *cryptd_req = ahash_request_ctx(req); ++ struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ++ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); ++ ++ ghash_async_init(req); ++ memcpy(dctx, in, sizeof(*dctx)); ++ return 0; ++ ++} ++ ++static int ghash_async_export(struct ahash_request *req, void *out) ++{ ++ struct ahash_request *cryptd_req = ahash_request_ctx(req); ++ struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ++ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); ++ ++ memcpy(out, dctx, sizeof(*dctx)); ++ return 0; ++ ++} ++ + static int ghash_async_digest(struct ahash_request *req) + { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); +@@ -285,8 +308,11 @@ static struct ahash_alg ghash_async_alg = { + .final = ghash_async_final, + .setkey = ghash_async_setkey, + .digest = ghash_async_digest, ++ .export = ghash_async_export, ++ .import = ghash_async_import, + .halg = { + .digestsize = GHASH_DIGEST_SIZE, ++ .statesize = sizeof(struct ghash_desc_ctx), + .base = { + .cra_name = "ghash", + .cra_driver_name = "ghash-clmulni", +diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h +index 01f15b227d7e..2fa7f4f6ecb3 100644 +--- a/arch/x86/include/asm/elf.h ++++ b/arch/x86/include/asm/elf.h +@@ -272,7 +272,7 @@ struct task_struct; + + #define ARCH_DLINFO_IA32(vdso_enabled) \ + do { \ +- if (vdso_enabled) { \ ++ if (VDSO_CURRENT_BASE) { \ + NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ + } \ +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c +index 9cb52767999a..338a4ae486bc 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c +@@ -51,7 +51,7 @@ static const char * const th_names[] = { + "load_store", + "insn_fetch", + "combined_unit", +- "", ++ "decode_unit", + "northbridge", + "execution_unit", + }; +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S +index 6ed8f16fd61b..cc89b36e556a 100644 +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -122,7 +122,8 @@ GLOBAL(ftrace_graph_call) + jmp ftrace_stub + #endif + +-GLOBAL(ftrace_stub) ++/* This is weak to keep gas from relaxing the jumps */ ++WEAK(ftrace_stub) + retq + END(ftrace_caller) + +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c +index 1ffc32dbe450..8c43930ce1a7 100644 +--- a/arch/x86/kernel/ftrace.c ++++ b/arch/x86/kernel/ftrace.c +@@ -744,6 +744,18 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long return_hooker = (unsigned long) + &return_to_handler; + ++ /* ++ * When resuming from suspend-to-ram, this function can be indirectly ++ * called from early CPU startup code while the CPU is in real mode, ++ * which would fail miserably. Make sure the stack pointer is a ++ * virtual address. ++ * ++ * This check isn't as accurate as virt_addr_valid(), but it should be ++ * good enough for this purpose, and it's fast. ++ */ ++ if (unlikely((long)__builtin_frame_address(0) >= 0)) ++ return; ++ + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + +diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c +index 30277e27431a..d050393d3be2 100644 +--- a/arch/x86/kernel/sys_x86_64.c ++++ b/arch/x86/kernel/sys_x86_64.c +@@ -127,7 +127,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (end - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +@@ -166,7 +166,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 7e9ca58ae875..d9016e4a80f9 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1047,10 +1047,10 @@ static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12, + return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; + } + +-static inline bool is_exception(u32 intr_info) ++static inline bool is_nmi(u32 intr_info) + { + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) +- == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); ++ == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK); + } + + static void nested_vmx_vmexit(struct kvm_vcpu *vcpu); +@@ -3074,7 +3074,7 @@ static void fix_rmode_seg(int seg, struct kvm_segment *save) + } + + vmcs_write16(sf->selector, var.selector); +- vmcs_write32(sf->base, var.base); ++ vmcs_writel(sf->base, var.base); + vmcs_write32(sf->limit, var.limit); + vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); + } +@@ -4716,7 +4716,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) + if (is_machine_check(intr_info)) + return handle_machine_check(vcpu); + +- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) ++ if (is_nmi(intr_info)) + return 1; /* already handled by vmx_vcpu_run() */ + + if (is_no_device(intr_info)) { +@@ -6507,7 +6507,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) + + switch (exit_reason) { + case EXIT_REASON_EXCEPTION_NMI: +- if (!is_exception(intr_info)) ++ if (is_nmi(intr_info)) + return 0; + else if (is_page_fault(intr_info)) + return enable_ept; +@@ -6803,8 +6803,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) + kvm_machine_check(); + + /* We need to handle NMIs before interrupts are enabled */ +- if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && +- (exit_intr_info & INTR_INFO_VALID_MASK)) { ++ if (is_nmi(exit_intr_info)) { + kvm_before_handle_nmi(&vmx->vcpu); + asm("int $2"); + kvm_after_handle_nmi(&vmx->vcpu); +diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c +index ae1aa71d0115..6adf3d963320 100644 +--- a/arch/x86/mm/hugetlbpage.c ++++ b/arch/x86/mm/hugetlbpage.c +@@ -341,7 +341,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index 7a5bf1b76e2f..40940780c665 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -475,21 +475,40 @@ void __init init_mem_mapping(void) + * devmem_is_allowed() checks to see if /dev/mem access to a certain address + * is valid. The argument is a physical page number. + * +- * +- * On x86, access has to be given to the first megabyte of ram because that area +- * contains bios code and data regions used by X and dosemu and similar apps. +- * Access has to be given to non-kernel-ram areas as well, these contain the PCI +- * mmio resources as well as potential bios/acpi data regions. ++ * On x86, access has to be given to the first megabyte of RAM because that ++ * area traditionally contains BIOS code and data regions used by X, dosemu, ++ * and similar apps. Since they map the entire memory range, the whole range ++ * must be allowed (for mapping), but any areas that would otherwise be ++ * disallowed are flagged as being "zero filled" instead of rejected. ++ * Access has to be given to non-kernel-ram areas as well, these contain the ++ * PCI mmio resources as well as potential bios/acpi data regions. + */ + int devmem_is_allowed(unsigned long pagenr) + { +- if (pagenr < 256) +- return 1; +- if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) ++ if (page_is_ram(pagenr)) { ++ /* ++ * For disallowed memory regions in the low 1MB range, ++ * request that the page be shown as all zeros. ++ */ ++ if (pagenr < 256) ++ return 2; ++ ++ return 0; ++ } ++ ++ /* ++ * This must follow RAM test, since System RAM is considered a ++ * restricted resource under CONFIG_STRICT_IOMEM. ++ */ ++ if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) { ++ /* Low 1MB bypasses iomem restrictions. */ ++ if (pagenr < 256) ++ return 1; ++ + return 0; +- if (!page_is_ram(pagenr)) +- return 1; +- return 0; ++ } ++ ++ return 1; + } + + void free_init_pages(char *what, unsigned long begin, unsigned long end) +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c +index 75f9e5d80d02..7da1b9a234a6 100644 +--- a/arch/x86/mm/mmap.c ++++ b/arch/x86/mm/mmap.c +@@ -67,22 +67,21 @@ static int mmap_is_legacy(void) + + static unsigned long mmap_rnd(void) + { +- unsigned long rnd = 0; ++ unsigned long rnd; + + /* +- * 8 bits of randomness in 32bit mmaps, 20 address space bits +- * 28 bits of randomness in 64bit mmaps, 40 address space bits +- */ +- if (current->flags & PF_RANDOMIZE) { +- if (mmap_is_ia32()) +- rnd = get_random_int() % (1<<8); +- else +- rnd = get_random_int() % (1<<28); +- } ++ * 8 bits of randomness in 32bit mmaps, 20 address space bits ++ * 28 bits of randomness in 64bit mmaps, 40 address space bits ++ */ ++ if (mmap_is_ia32()) ++ rnd = (unsigned long)get_random_int() % (1<<8); ++ else ++ rnd = (unsigned long)get_random_int() % (1<<28); ++ + return rnd << PAGE_SHIFT; + } + +-static unsigned long mmap_base(void) ++static unsigned long mmap_base(unsigned long rnd) + { + unsigned long gap = rlimit(RLIMIT_STACK); + +@@ -91,19 +90,7 @@ static unsigned long mmap_base(void) + else if (gap > MAX_GAP) + gap = MAX_GAP; + +- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); +-} +- +-/* +- * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 +- * does, but not when emulating X86_32 +- */ +-static unsigned long mmap_legacy_base(void) +-{ +- if (mmap_is_ia32()) +- return TASK_UNMAPPED_BASE; +- else +- return TASK_UNMAPPED_BASE + mmap_rnd(); ++ return PAGE_ALIGN(TASK_SIZE - gap - rnd); + } + + /* +@@ -112,14 +99,19 @@ static unsigned long mmap_legacy_base(void) + */ + void arch_pick_mmap_layout(struct mm_struct *mm) + { +- mm->mmap_legacy_base = mmap_legacy_base(); +- mm->mmap_base = mmap_base(); ++ unsigned long random_factor = 0UL; ++ ++ if (current->flags & PF_RANDOMIZE) ++ random_factor = mmap_rnd(); ++ ++ mm->mmap_legacy_base = TASK_UNMAPPED_BASE + random_factor; + + if (mmap_is_legacy()) { + mm->mmap_base = mm->mmap_legacy_base; + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { ++ mm->mmap_base = mmap_base(random_factor); + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c +index a3b0265c2ca7..63462c8db802 100644 +--- a/arch/x86/pci/acpi.c ++++ b/arch/x86/pci/acpi.c +@@ -118,6 +118,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = { + DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"), + }, + }, ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */ ++ { ++ .callback = set_nouse_crs, ++ .ident = "Supermicro X8DTH", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"), ++ DMI_MATCH(DMI_BIOS_VERSION, "2.0a"), ++ }, ++ }, + + /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */ + { +diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c +index 13e8935e2eab..e3600eb618c1 100644 +--- a/arch/x86/xen/time.c ++++ b/arch/x86/xen/time.c +@@ -338,11 +338,11 @@ static int xen_vcpuop_set_next_event(unsigned long delta, + WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); + + single.timeout_abs_ns = get_abs_timeout(delta); +- single.flags = VCPU_SSHOTTMR_future; ++ /* Get an event anyway, even if the timeout is already expired */ ++ single.flags = 0; + + ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single); +- +- BUG_ON(ret != 0 && ret != -ETIME); ++ BUG_ON(ret != 0); + + return ret; + } +diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c +index 14c6c3a6f04b..8164f05d2372 100644 +--- a/arch/xtensa/kernel/setup.c ++++ b/arch/xtensa/kernel/setup.c +@@ -160,6 +160,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag) + + __tagtable(BP_TAG_INITRD, parse_tag_initrd); + ++#endif /* CONFIG_BLK_DEV_INITRD */ ++ + #ifdef CONFIG_OF + + static int __init parse_tag_fdt(const bp_tag_t *tag) +@@ -180,8 +182,6 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start, + + #endif /* CONFIG_OF */ + +-#endif /* CONFIG_BLK_DEV_INITRD */ +- + static int __init parse_tag_cmdline(const bp_tag_t* tag) + { + strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE); +diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c +index 5d3f7a119ed1..1ff0b92eeae7 100644 +--- a/arch/xtensa/kernel/syscall.c ++++ b/arch/xtensa/kernel/syscall.c +@@ -86,7 +86,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; +- if (!vmm || addr + len <= vmm->vm_start) ++ if (!vmm || addr + len <= vm_start_gap(vmm)) + return addr; + addr = vmm->vm_end; + if (flags & MAP_SHARED) +diff --git a/block/bsg.c b/block/bsg.c +index 420a5a9f1b23..76801e57f556 100644 +--- a/block/bsg.c ++++ b/block/bsg.c +@@ -675,6 +675,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) + + dprintk("%s: write %Zd bytes\n", bd->name, count); + ++ if (unlikely(segment_eq(get_fs(), KERNEL_DS))) ++ return -EINVAL; ++ + bsg_set_block(bd, file); + + bytes_written = 0; +diff --git a/block/genhd.c b/block/genhd.c +index 7af2f6a18d9b..afd8206e530d 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -662,7 +662,6 @@ void del_gendisk(struct gendisk *disk) + + kobject_put(disk->part0.holder_dir); + kobject_put(disk->slave_dir); +- disk->driverfs_dev = NULL; + if (!sysfs_deprecated) + sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); + pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); +diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c +index 1b4988b4bc11..9bfbb51aa75e 100644 +--- a/block/scsi_ioctl.c ++++ b/block/scsi_ioctl.c +@@ -175,6 +175,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) + __set_bit(WRITE_16, filter->write_ok); + __set_bit(WRITE_LONG, filter->write_ok); + __set_bit(WRITE_LONG_2, filter->write_ok); ++ __set_bit(WRITE_SAME, filter->write_ok); ++ __set_bit(WRITE_SAME_16, filter->write_ok); ++ __set_bit(WRITE_SAME_32, filter->write_ok); + __set_bit(ERASE, filter->write_ok); + __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok); + __set_bit(MODE_SELECT, filter->write_ok); +diff --git a/crypto/Makefile b/crypto/Makefile +index b54916590d3a..139e7e0a06b4 100644 +--- a/crypto/Makefile ++++ b/crypto/Makefile +@@ -52,6 +52,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o + obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o + obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o + obj-$(CONFIG_CRYPTO_WP512) += wp512.o ++CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 + obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o + obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o + obj-$(CONFIG_CRYPTO_ECB) += ecb.o +@@ -72,6 +73,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o + obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o + obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o + obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o ++CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 + obj-$(CONFIG_CRYPTO_AES) += aes_generic.o + obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o + obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o +diff --git a/crypto/algapi.c b/crypto/algapi.c +index daf2f653b131..8ea7a5dc3839 100644 +--- a/crypto/algapi.c ++++ b/crypto/algapi.c +@@ -337,6 +337,7 @@ int crypto_register_alg(struct crypto_alg *alg) + struct crypto_larval *larval; + int err; + ++ alg->cra_flags &= ~CRYPTO_ALG_DEAD; + err = crypto_check_alg(alg); + if (err) + return err; +diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c +index d11d431251f7..63e154017f53 100644 +--- a/crypto/algif_hash.c ++++ b/crypto/algif_hash.c +@@ -195,7 +195,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags) + struct alg_sock *ask = alg_sk(sk); + struct hash_ctx *ctx = ask->private; + struct ahash_request *req = &ctx->req; +- char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))]; ++ char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1]; + struct sock *sk2; + struct alg_sock *ask2; + struct hash_ctx *ctx2; +diff --git a/crypto/cryptd.c b/crypto/cryptd.c +index d85fab975514..acbe1b978431 100644 +--- a/crypto/cryptd.c ++++ b/crypto/cryptd.c +@@ -606,6 +606,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; + + inst->alg.halg.digestsize = salg->digestsize; ++ inst->alg.halg.statesize = salg->statesize; + inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); + + inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; +diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile +index 97c949abfabb..2af5b5a7d7e2 100644 +--- a/drivers/acpi/Makefile ++++ b/drivers/acpi/Makefile +@@ -2,7 +2,6 @@ + # Makefile for the Linux ACPI interpreter + # + +-ccflags-y := -Os + ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT + + # +diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c +index 288bb270f8ed..9954200c32d0 100644 +--- a/drivers/acpi/power.c ++++ b/drivers/acpi/power.c +@@ -211,6 +211,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state) + return -EINVAL; + + /* The state of the list is 'on' IFF all resources are 'on'. */ ++ cur_state = 0; + list_for_each_entry(entry, list, node) { + struct acpi_power_resource *resource = entry->resource; + acpi_handle handle = resource->device.handle; +diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c +index 0dc9ff61d7c2..e3ecaf4d64f4 100644 +--- a/drivers/acpi/video.c ++++ b/drivers/acpi/video.c +@@ -1263,6 +1263,9 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video) + union acpi_object *dod = NULL; + union acpi_object *obj; + ++ if (!video->cap._DOD) ++ return AE_NOT_EXIST; ++ + status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer); + if (!ACPI_SUCCESS(status)) { + ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD")); +diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c +index b256ff5b6579..d9f45c821ac4 100644 +--- a/drivers/ata/sata_mv.c ++++ b/drivers/ata/sata_mv.c +@@ -4097,6 +4097,9 @@ static int mv_platform_probe(struct platform_device *pdev) + host->iomap = NULL; + hpriv->base = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); ++ if (!hpriv->base) ++ return -ENOMEM; ++ + hpriv->base -= SATAHC0_REG_BASE; + + #if defined(CONFIG_HAVE_CLK) +diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c +index f72f52b4b1dd..65e36873656b 100644 +--- a/drivers/bcma/main.c ++++ b/drivers/bcma/main.c +@@ -432,8 +432,11 @@ static int bcma_device_probe(struct device *dev) + drv); + int err = 0; + ++ get_device(dev); + if (adrv->probe) + err = adrv->probe(core); ++ if (err) ++ put_device(dev); + + return err; + } +@@ -446,6 +449,7 @@ static int bcma_device_remove(struct device *dev) + + if (adrv->remove) + adrv->remove(core); ++ put_device(dev); + + return 0; + } +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig +index 30878924e65b..8ccb96ae3fd9 100644 +--- a/drivers/char/Kconfig ++++ b/drivers/char/Kconfig +@@ -579,9 +579,12 @@ config TELCLOCK + controlling the behavior of this hardware. + + config DEVPORT +- bool ++ bool "/dev/port character device" + depends on ISA || PCI + default y ++ help ++ Say Y here if you want to support the /dev/port device. The /dev/port ++ device is similar to /dev/mem, but for I/O ports. + + source "drivers/s390/char/Kconfig" + +diff --git a/drivers/char/mem.c b/drivers/char/mem.c +index 598ece77ee9e..40d2e99c6ba7 100644 +--- a/drivers/char/mem.c ++++ b/drivers/char/mem.c +@@ -61,6 +61,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) + #endif + + #ifdef CONFIG_STRICT_DEVMEM ++static inline int page_is_allowed(unsigned long pfn) ++{ ++ return devmem_is_allowed(pfn); ++} + static inline int range_is_allowed(unsigned long pfn, unsigned long size) + { + u64 from = ((u64)pfn) << PAGE_SHIFT; +@@ -76,6 +80,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) + return 1; + } + #else ++static inline int page_is_allowed(unsigned long pfn) ++{ ++ return 1; ++} + static inline int range_is_allowed(unsigned long pfn, unsigned long size) + { + return 1; +@@ -117,23 +125,31 @@ static ssize_t read_mem(struct file *file, char __user *buf, + + while (count > 0) { + unsigned long remaining; ++ int allowed; + + sz = size_inside_page(p, count); + +- if (!range_is_allowed(p >> PAGE_SHIFT, count)) ++ allowed = page_is_allowed(p >> PAGE_SHIFT); ++ if (!allowed) + return -EPERM; ++ if (allowed == 2) { ++ /* Show zeros for restricted memory. */ ++ remaining = clear_user(buf, sz); ++ } else { ++ /* ++ * On ia64 if a page has been mapped somewhere as ++ * uncached, then it must also be accessed uncached ++ * by the kernel or data corruption may occur. ++ */ ++ ptr = xlate_dev_mem_ptr(p); ++ if (!ptr) ++ return -EFAULT; + +- /* +- * On ia64 if a page has been mapped somewhere as uncached, then +- * it must also be accessed uncached by the kernel or data +- * corruption may occur. +- */ +- ptr = xlate_dev_mem_ptr(p); +- if (!ptr) +- return -EFAULT; ++ remaining = copy_to_user(buf, ptr, sz); ++ ++ unxlate_dev_mem_ptr(p, ptr); ++ } + +- remaining = copy_to_user(buf, ptr, sz); +- unxlate_dev_mem_ptr(p, ptr); + if (remaining) + return -EFAULT; + +@@ -173,30 +189,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf, + #endif + + while (count > 0) { ++ int allowed; ++ + sz = size_inside_page(p, count); + +- if (!range_is_allowed(p >> PAGE_SHIFT, sz)) ++ allowed = page_is_allowed(p >> PAGE_SHIFT); ++ if (!allowed) + return -EPERM; + +- /* +- * On ia64 if a page has been mapped somewhere as uncached, then +- * it must also be accessed uncached by the kernel or data +- * corruption may occur. +- */ +- ptr = xlate_dev_mem_ptr(p); +- if (!ptr) { +- if (written) +- break; +- return -EFAULT; +- } ++ /* Skip actual writing when a page is marked as restricted. */ ++ if (allowed == 1) { ++ /* ++ * On ia64 if a page has been mapped somewhere as ++ * uncached, then it must also be accessed uncached ++ * by the kernel or data corruption may occur. ++ */ ++ ptr = xlate_dev_mem_ptr(p); ++ if (!ptr) { ++ if (written) ++ break; ++ return -EFAULT; ++ } + +- copied = copy_from_user(ptr, buf, sz); +- unxlate_dev_mem_ptr(p, ptr); +- if (copied) { +- written += sz - copied; +- if (written) +- break; +- return -EFAULT; ++ copied = copy_from_user(ptr, buf, sz); ++ unxlate_dev_mem_ptr(p, ptr); ++ if (copied) { ++ written += sz - copied; ++ if (written) ++ break; ++ return -EFAULT; ++ } + } + + buf += sz; +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index ec3bd62eeaf6..d69c63fdae67 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -1129,6 +1129,8 @@ static int put_chars(u32 vtermno, const char *buf, int count) + { + struct port *port; + struct scatterlist sg[1]; ++ void *data; ++ int ret; + + if (unlikely(early_put_chars)) + return early_put_chars(vtermno, buf, count); +@@ -1137,8 +1139,14 @@ static int put_chars(u32 vtermno, const char *buf, int count) + if (!port) + return -EPIPE; + +- sg_init_one(sg, buf, count); +- return __send_to_port(port, sg, 1, count, (void *)buf, false); ++ data = kmemdup(buf, count, GFP_ATOMIC); ++ if (!data) ++ return -ENOMEM; ++ ++ sg_init_one(sg, data, count); ++ ret = __send_to_port(port, sg, 1, count, data, false); ++ kfree(data); ++ return ret; + } + + /* +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 66f6cf5064ec..d85c41800952 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -437,9 +437,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, + char *buf) + { + unsigned int cur_freq = __cpufreq_get(policy->cpu); +- if (!cur_freq) +- return sprintf(buf, ""); +- return sprintf(buf, "%u\n", cur_freq); ++ ++ if (cur_freq) ++ return sprintf(buf, "%u\n", cur_freq); ++ ++ return sprintf(buf, "\n"); + } + + +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index 62834322b337..4e7c97aa9e59 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -120,7 +120,8 @@ static int ast_get_dram_info(struct drm_device *dev) + ast_write32(ast, 0x10000, 0xfc600309); + + do { +- ; ++ if (pci_channel_offline(dev->pdev)) ++ return -EIO; + } while (ast_read32(ast, 0x10000) != 0x01); + data = ast_read32(ast, 0x10004); + +@@ -343,7 +344,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) + ast_detect_chip(dev); + + if (ast->chip != AST1180) { +- ast_get_dram_info(dev); ++ ret = ast_get_dram_info(dev); ++ if (ret) ++ goto out_free; + ast->vram_size = ast_get_vram_info(dev); + DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size); + } +diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c +index 977cfb35837a..d3464f35f427 100644 +--- a/drivers/gpu/drm/ast/ast_post.c ++++ b/drivers/gpu/drm/ast/ast_post.c +@@ -53,13 +53,9 @@ ast_is_vga_enabled(struct drm_device *dev) + /* TODO 1180 */ + } else { + ch = ast_io_read8(ast, 0x43); +- if (ch) { +- ast_open_key(ast); +- ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff); +- return ch & 0x04; +- } ++ return !!(ch & 0x01); + } +- return 0; ++ return false; + } + #endif + +diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c +index 973056b86207..b16e051e48f0 100644 +--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c ++++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c +@@ -224,6 +224,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) + uint32_t mpllP; + + pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); ++ mpllP = (mpllP >> 8) & 0xf; + if (!mpllP) + mpllP = 4; + +@@ -234,7 +235,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) + uint32_t clock; + + pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); +- return clock; ++ return clock / 1000; + } + + ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c +index 0ac0a88860a4..f1672f388983 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo.c ++++ b/drivers/gpu/drm/ttm/ttm_bo.c +@@ -1866,7 +1866,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) + struct ttm_buffer_object *bo; + int ret = -EBUSY; + int put_count; +- uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); + + spin_lock(&glob->lru_lock); + list_for_each_entry(bo, &glob->swap_lru, swap) { +@@ -1904,7 +1903,8 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) + if (unlikely(ret != 0)) + goto out; + +- if ((bo->mem.placement & swap_placement) != swap_placement) { ++ if (bo->mem.mem_type != TTM_PL_SYSTEM || ++ bo->ttm->caching_state != tt_cached) { + struct ttm_mem_reg evict_mem; + + evict_mem = bo->mem; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +index c509d40c4897..17a503ff260f 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +@@ -69,8 +69,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, + break; + } + default: +- DRM_ERROR("Illegal vmwgfx get param request: %d\n", +- param->param); + return -EINVAL; + } + +@@ -90,7 +88,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, + void *bounce; + int ret; + +- if (unlikely(arg->pad64 != 0)) { ++ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) { + DRM_ERROR("Illegal GET_3D_CAP argument.\n"); + return -EINVAL; + } +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +index 582814339748..12969378c06e 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +@@ -677,11 +677,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, + 128; + + num_sizes = 0; +- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) ++ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { ++ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) ++ return -EINVAL; + num_sizes += req->mip_levels[i]; ++ } + +- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * +- DRM_VMW_MAX_MIP_LEVELS) ++ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS || ++ num_sizes == 0) + return -EINVAL; + + size = vmw_user_surface_size + 128 + +diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c +index c4ef3bc726e3..e299576004ce 100644 +--- a/drivers/hid/hid-cypress.c ++++ b/drivers/hid/hid-cypress.c +@@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, + if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX)) + return rdesc; + ++ if (*rsize < 4) ++ return rdesc; ++ + for (i = 0; i < *rsize - 4; i++) + if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) { + __u8 tmp; +diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c +index 12fc48c968e6..34dbb9d6d852 100644 +--- a/drivers/hid/hid-lg.c ++++ b/drivers/hid/hid-lg.c +@@ -790,7 +790,7 @@ static const struct hid_device_id lg_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG), + .driver_data = LG_FF }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), +- .driver_data = LG_FF2 }, ++ .driver_data = LG_NOGET | LG_FF2 }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), + .driver_data = LG_FF3 }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c +index ccc2f36bb334..6584a4d6b880 100644 +--- a/drivers/hid/i2c-hid/i2c-hid.c ++++ b/drivers/hid/i2c-hid/i2c-hid.c +@@ -326,6 +326,15 @@ static int i2c_hid_hwreset(struct i2c_client *client) + if (ret) + return ret; + ++ /* ++ * The HID over I2C specification states that if a DEVICE needs time ++ * after the PWR_ON request, it should utilise CLOCK stretching. ++ * However, it has been observered that the Windows driver provides a ++ * 1ms sleep between the PWR_ON and RESET requests and that some devices ++ * rely on this. ++ */ ++ usleep_range(1000, 5000); ++ + i2c_hid_dbg(ihid, "resetting...\n"); + + ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c +index 05e6a7d13d4e..50e6ba9548b4 100644 +--- a/drivers/hv/channel.c ++++ b/drivers/hv/channel.c +@@ -114,7 +114,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, + struct vmbus_channel_msginfo *open_info = NULL; + void *in, *out; + unsigned long flags; +- int ret, t, err = 0; ++ int ret, err = 0; + + newchannel->onchannel_callback = onchannelcallback; + newchannel->channel_callback_context = context; +@@ -204,11 +204,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, + goto error1; + } + +- t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ); +- if (t == 0) { +- err = -ETIMEDOUT; +- goto error1; +- } ++ wait_for_completion(&open_info->waitevent); + + + if (open_info->response.open_result.status) +@@ -391,7 +387,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, + struct vmbus_channel_gpadl_header *gpadlmsg; + struct vmbus_channel_gpadl_body *gpadl_body; + struct vmbus_channel_msginfo *msginfo = NULL; +- struct vmbus_channel_msginfo *submsginfo; ++ struct vmbus_channel_msginfo *submsginfo, *tmp; + u32 msgcount; + struct list_head *curr; + u32 next_gpadl_handle; +@@ -453,6 +449,13 @@ cleanup: + list_del(&msginfo->msglistentry); + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); + ++ if (msgcount > 1) { ++ list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist, ++ msglistentry) { ++ kfree(submsginfo); ++ } ++ } ++ + kfree(msginfo); + return ret; + } +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c +index b1039552b623..4e4cb3db3239 100644 +--- a/drivers/hv/hv.c ++++ b/drivers/hv/hv.c +@@ -154,7 +154,7 @@ int hv_init(void) + /* See if the hypercall page is already set */ + rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + +- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC); ++ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX); + + if (!virtaddr) + goto cleanup; +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c +index 694173f662d1..d285165435d7 100644 +--- a/drivers/hv/hv_balloon.c ++++ b/drivers/hv/hv_balloon.c +@@ -673,7 +673,7 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) + * If the pfn range we are dealing with is not in the current + * "hot add block", move on. + */ +- if ((start_pfn >= has->end_pfn)) ++ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) + continue; + /* + * If the current hot add-request extends beyond +@@ -728,7 +728,7 @@ static unsigned long handle_pg_range(unsigned long pg_start, + * If the pfn range we are dealing with is not in the current + * "hot add block", move on. + */ +- if ((start_pfn >= has->end_pfn)) ++ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) + continue; + + old_covered_state = has->covered_end_pfn; +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c +index c3ccdea3d180..fa3ecec524fa 100644 +--- a/drivers/i2c/i2c-dev.c ++++ b/drivers/i2c/i2c-dev.c +@@ -328,7 +328,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client, + unsigned long arg) + { + struct i2c_smbus_ioctl_data data_arg; +- union i2c_smbus_data temp; ++ union i2c_smbus_data temp = {}; + int datasize, res; + + if (copy_from_user(&data_arg, +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index 71c2c7116802..818cac9bbd8a 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -2772,6 +2772,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv, + struct iw_cm_conn_param iw_param; + int ret; + ++ if (!conn_param) ++ return -EINVAL; ++ + ret = cma_modify_qp_rtr(id_priv, conn_param); + if (ret) + return ret; +diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c +index f362883c94e3..3736c1759524 100644 +--- a/drivers/input/joydev.c ++++ b/drivers/input/joydev.c +@@ -188,6 +188,17 @@ static void joydev_detach_client(struct joydev *joydev, + synchronize_rcu(); + } + ++static void joydev_refresh_state(struct joydev *joydev) ++{ ++ struct input_dev *dev = joydev->handle.dev; ++ int i, val; ++ ++ for (i = 0; i < joydev->nabs; i++) { ++ val = input_abs_get_val(dev, joydev->abspam[i]); ++ joydev->abs[i] = joydev_correct(val, &joydev->corr[i]); ++ } ++} ++ + static int joydev_open_device(struct joydev *joydev) + { + int retval; +@@ -202,6 +213,8 @@ static int joydev_open_device(struct joydev *joydev) + retval = input_open_device(&joydev->handle); + if (retval) + joydev->open--; ++ else ++ joydev_refresh_state(joydev); + } + + mutex_unlock(&joydev->mutex); +@@ -823,7 +836,6 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, + j = joydev->abspam[i]; + if (input_abs_get_max(dev, j) == input_abs_get_min(dev, j)) { + joydev->corr[i].type = JS_CORR_NONE; +- joydev->abs[i] = input_abs_get_val(dev, j); + continue; + } + joydev->corr[i].type = JS_CORR_BROKEN; +@@ -838,10 +850,6 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, + if (t) { + joydev->corr[i].coef[2] = (1 << 29) / t; + joydev->corr[i].coef[3] = (1 << 29) / t; +- +- joydev->abs[i] = +- joydev_correct(input_abs_get_val(dev, j), +- joydev->corr + i); + } + } + +diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c +index d96aa27dfcdc..db64adfbe1af 100644 +--- a/drivers/input/joystick/iforce/iforce-usb.c ++++ b/drivers/input/joystick/iforce/iforce-usb.c +@@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf, + + interface = intf->cur_altsetting; + ++ if (interface->desc.bNumEndpoints < 2) ++ return -ENODEV; ++ + epirq = &interface->endpoint[0].desc; + epout = &interface->endpoint[1].desc; + +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 685e125d6366..24e5683d6c91 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -901,6 +901,12 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id + input_dev->name = xpad_device[i].name; + input_dev->phys = xpad->phys; + usb_to_input_id(udev, &input_dev->id); ++ ++ if (xpad->xtype == XTYPE_XBOX360W) { ++ /* x360w controllers and the receiver have different ids */ ++ input_dev->id.product = 0x02a1; ++ } ++ + input_dev->dev.parent = &intf->dev; + + input_set_drvdata(input_dev, xpad); +diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c +index f7f3e9a9fd3f..e13713b7658c 100644 +--- a/drivers/input/keyboard/mpr121_touchkey.c ++++ b/drivers/input/keyboard/mpr121_touchkey.c +@@ -88,7 +88,8 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id) + struct mpr121_touchkey *mpr121 = dev_id; + struct i2c_client *client = mpr121->client; + struct input_dev *input = mpr121->input_dev; +- unsigned int key_num, key_val, pressed; ++ unsigned long bit_changed; ++ unsigned int key_num; + int reg; + + reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR); +@@ -106,18 +107,22 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id) + + reg &= TOUCH_STATUS_MASK; + /* use old press bit to figure out which bit changed */ +- key_num = ffs(reg ^ mpr121->statusbits) - 1; +- pressed = reg & (1 << key_num); ++ bit_changed = reg ^ mpr121->statusbits; + mpr121->statusbits = reg; ++ for_each_set_bit(key_num, &bit_changed, mpr121->keycount) { ++ unsigned int key_val, pressed; + +- key_val = mpr121->keycodes[key_num]; ++ pressed = reg & BIT(key_num); ++ key_val = mpr121->keycodes[key_num]; + +- input_event(input, EV_MSC, MSC_SCAN, key_num); +- input_report_key(input, key_val, pressed); +- input_sync(input); ++ input_event(input, EV_MSC, MSC_SCAN, key_num); ++ input_report_key(input, key_val, pressed); ++ ++ dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val, ++ pressed ? "pressed" : "released"); + +- dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val, +- pressed ? "pressed" : "released"); ++ } ++ input_sync(input); + + out: + return IRQ_HANDLED; +@@ -230,6 +235,7 @@ static int mpr_touchkey_probe(struct i2c_client *client, + input_dev->id.bustype = BUS_I2C; + input_dev->dev.parent = &client->dev; + input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); ++ input_set_capability(input_dev, EV_MSC, MSC_SCAN); + + input_dev->keycode = mpr121->keycodes; + input_dev->keycodesize = sizeof(mpr121->keycodes[0]); +diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c +index 55c15304ddbc..92c742420e20 100644 +--- a/drivers/input/keyboard/tca8418_keypad.c ++++ b/drivers/input/keyboard/tca8418_keypad.c +@@ -274,6 +274,7 @@ static int tca8418_keypad_probe(struct i2c_client *client, + bool irq_is_gpio = false; + int irq; + int error, row_shift, max_keys; ++ unsigned long trigger = 0; + + /* Copy the platform data */ + if (pdata) { +@@ -286,6 +287,7 @@ static int tca8418_keypad_probe(struct i2c_client *client, + cols = pdata->cols; + rep = pdata->rep; + irq_is_gpio = pdata->irq_is_gpio; ++ trigger = IRQF_TRIGGER_FALLING; + } else { + struct device_node *np = dev->of_node; + int err; +@@ -360,9 +362,7 @@ static int tca8418_keypad_probe(struct i2c_client *client, + irq = gpio_to_irq(irq); + + error = devm_request_threaded_irq(dev, irq, NULL, tca8418_irq_handler, +- IRQF_TRIGGER_FALLING | +- IRQF_SHARED | +- IRQF_ONESHOT, ++ trigger | IRQF_SHARED | IRQF_ONESHOT, + client->name, keypad_data); + if (error) { + dev_err(dev, "Unable to claim irq %d; error %d\n", +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 875e680e90c2..566ced8b3bb7 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -120,6 +120,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { + }, + }, + { ++ /* Dell Embedded Box PC 3000 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"), ++ }, ++ }, ++ { + /* OQO Model 01 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "OQO"), +@@ -580,6 +587,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "20046"), + }, + }, ++ { ++ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c +index 3fba74b9b602..f0d532684afd 100644 +--- a/drivers/input/tablet/kbtab.c ++++ b/drivers/input/tablet/kbtab.c +@@ -123,6 +123,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i + struct input_dev *input_dev; + int error = -ENOMEM; + ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) ++ return -ENODEV; ++ + kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!kbtab || !input_dev) +diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c +index a82e542ffc21..fecbf1d2f60b 100644 +--- a/drivers/isdn/hardware/eicon/message.c ++++ b/drivers/isdn/hardware/eicon/message.c +@@ -11304,7 +11304,8 @@ static void mixer_notify_update(PLCI *plci, byte others) + ((CAPI_MSG *) msg)->header.ncci = 0; + ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT; + ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3; +- PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE); ++ ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff; ++ ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8; + ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0; + w = api_put(notify_plci->appl, (CAPI_MSG *) msg); + if (w != _QUEUE_FULL) +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index 7409d79729ee..53ce281e4129 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -1283,12 +1283,15 @@ static int crypt_set_key(struct crypt_config *cc, char *key) + if (!cc->key_size && strcmp(key, "-")) + goto out; + ++ /* clear the flag since following operations may invalidate previously valid key */ ++ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); ++ + if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) + goto out; + +- set_bit(DM_CRYPT_KEY_VALID, &cc->flags); +- + r = crypt_setkey_allcpus(cc); ++ if (!r) ++ set_bit(DM_CRYPT_KEY_VALID, &cc->flags); + + out: + /* Hex key string not needed after here, so wipe it. */ +diff --git a/drivers/md/linear.c b/drivers/md/linear.c +index f03fabd2b37b..f169afac0266 100644 +--- a/drivers/md/linear.c ++++ b/drivers/md/linear.c +@@ -97,6 +97,12 @@ static int linear_mergeable_bvec(struct request_queue *q, + return maxsectors << 9; + } + ++/* ++ * In linear_congested() conf->raid_disks is used as a copy of ++ * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks ++ * and conf->disks[] are created in linear_conf(), they are always ++ * consitent with each other, but mddev->raid_disks does not. ++ */ + static int linear_congested(void *data, int bits) + { + struct mddev *mddev = data; +@@ -109,7 +115,7 @@ static int linear_congested(void *data, int bits) + rcu_read_lock(); + conf = rcu_dereference(mddev->private); + +- for (i = 0; i < mddev->raid_disks && !ret ; i++) { ++ for (i = 0; i < conf->raid_disks && !ret ; i++) { + struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); + ret |= bdi_congested(&q->backing_dev_info, bits); + } +@@ -196,6 +202,19 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) + conf->disks[i-1].end_sector + + conf->disks[i].rdev->sectors; + ++ /* ++ * conf->raid_disks is copy of mddev->raid_disks. The reason to ++ * keep a copy of mddev->raid_disks in struct linear_conf is, ++ * mddev->raid_disks may not be consistent with pointers number of ++ * conf->disks[] when it is updated in linear_add() and used to ++ * iterate old conf->disks[] earray in linear_congested(). ++ * Here conf->raid_disks is always consitent with number of ++ * pointers in conf->disks[] array, and mddev->private is updated ++ * with rcu_assign_pointer() in linear_addr(), such race can be ++ * avoided. ++ */ ++ conf->raid_disks = raid_disks; ++ + return conf; + + out: +@@ -252,10 +271,18 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) + if (!newconf) + return -ENOMEM; + ++ /* newconf->raid_disks already keeps a copy of * the increased ++ * value of mddev->raid_disks, WARN_ONCE() is just used to make ++ * sure of this. It is possible that oldconf is still referenced ++ * in linear_congested(), therefore kfree_rcu() is used to free ++ * oldconf until no one uses it anymore. ++ */ + oldconf = rcu_dereference_protected(mddev->private, + lockdep_is_held( + &mddev->reconfig_mutex)); + mddev->raid_disks++; ++ WARN_ONCE(mddev->raid_disks != newconf->raid_disks, ++ "copied raid_disks doesn't match mddev->raid_disks"); + rcu_assign_pointer(mddev->private, newconf); + md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); + set_capacity(mddev->gendisk, mddev->array_sectors); +diff --git a/drivers/md/linear.h b/drivers/md/linear.h +index b685ddd7d7f7..8d392e6098b3 100644 +--- a/drivers/md/linear.h ++++ b/drivers/md/linear.h +@@ -10,6 +10,7 @@ struct linear_conf + { + struct rcu_head rcu; + sector_t array_sectors; ++ int raid_disks; /* a copy of mddev->raid_disks */ + struct dev_info disks[0]; + }; + #endif +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c +index 056d09c33af1..c79d6480fbed 100644 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c +@@ -679,15 +679,13 @@ int dm_sm_metadata_create(struct dm_space_map *sm, + memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); + + r = sm_ll_new_metadata(&smm->ll, tm); ++ if (!r) { ++ r = sm_ll_extend(&smm->ll, nr_blocks); ++ } ++ memcpy(&smm->sm, &ops, sizeof(smm->sm)); + if (r) + return r; + +- r = sm_ll_extend(&smm->ll, nr_blocks); +- if (r) +- return r; +- +- memcpy(&smm->sm, &ops, sizeof(smm->sm)); +- + /* + * Now we need to update the newly created data structures with the + * allocated blocks that they were built from. +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 63d42ae56a1c..a8315aaba9fe 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect + if (best_dist_disk < 0) { + if (is_badblock(rdev, this_sector, sectors, + &first_bad, &bad_sectors)) { +- if (first_bad < this_sector) ++ if (first_bad <= this_sector) + /* Cannot use this */ + continue; + best_good_sectors = first_bad - this_sector; +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 9ee3c460fa37..8f5c890f9983 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -5616,6 +5616,15 @@ static int run(struct mddev *mddev) + stripe = (stripe | (stripe-1)) + 1; + mddev->queue->limits.discard_alignment = stripe; + mddev->queue->limits.discard_granularity = stripe; ++ ++ /* ++ * We use 16-bit counter of active stripes in bi_phys_segments ++ * (minus one for over-loaded initialization) ++ */ ++ blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS); ++ blk_queue_max_discard_sectors(mddev->queue, ++ 0xfffe * STRIPE_SECTORS); ++ + /* + * unaligned part of discard request will be ignored, so can't + * guarantee discard_zeroes_data +diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c +index 03761c6f472f..8e7c78567138 100644 +--- a/drivers/media/usb/siano/smsusb.c ++++ b/drivers/media/usb/siano/smsusb.c +@@ -206,20 +206,28 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev) + static int smsusb_sendrequest(void *context, void *buffer, size_t size) + { + struct smsusb_device_t *dev = (struct smsusb_device_t *) context; +- struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer; +- int dummy; ++ struct sms_msg_hdr *phdr; ++ int dummy, ret; + + if (dev->state != SMSUSB_ACTIVE) + return -ENOENT; + ++ phdr = kmalloc(size, GFP_KERNEL); ++ if (!phdr) ++ return -ENOMEM; ++ memcpy(phdr, buffer, size); ++ + sms_debug("sending %s(%d) size: %d", + smscore_translate_msg(phdr->msg_type), phdr->msg_type, + phdr->msg_length); + + smsendian_handle_tx_message((struct sms_msg_data *) phdr); +- smsendian_handle_message_header((struct sms_msg_hdr *)buffer); +- return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), +- buffer, size, &dummy, 1000); ++ smsendian_handle_message_header((struct sms_msg_hdr *)phdr); ++ ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), ++ phdr, size, &dummy, 1000); ++ ++ kfree(phdr); ++ return ret; + } + + static char *smsusb1_fw_lkup[] = { +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c +index 363cdbf4ac8d..5422093d135c 100644 +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -1533,6 +1533,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain) + return buffer; + } + ++static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev) ++{ ++ struct uvc_video_chain *chain; ++ ++ chain = kzalloc(sizeof(*chain), GFP_KERNEL); ++ if (chain == NULL) ++ return NULL; ++ ++ INIT_LIST_HEAD(&chain->entities); ++ mutex_init(&chain->ctrl_mutex); ++ chain->dev = dev; ++ v4l2_prio_init(&chain->prio); ++ ++ return chain; ++} ++ ++/* ++ * Fallback heuristic for devices that don't connect units and terminals in a ++ * valid chain. ++ * ++ * Some devices have invalid baSourceID references, causing uvc_scan_chain() ++ * to fail, but if we just take the entities we can find and put them together ++ * in the most sensible chain we can think of, turns out they do work anyway. ++ * Note: This heuristic assumes there is a single chain. ++ * ++ * At the time of writing, devices known to have such a broken chain are ++ * - Acer Integrated Camera (5986:055a) ++ * - Realtek rtl157a7 (0bda:57a7) ++ */ ++static int uvc_scan_fallback(struct uvc_device *dev) ++{ ++ struct uvc_video_chain *chain; ++ struct uvc_entity *iterm = NULL; ++ struct uvc_entity *oterm = NULL; ++ struct uvc_entity *entity; ++ struct uvc_entity *prev; ++ ++ /* ++ * Start by locating the input and output terminals. We only support ++ * devices with exactly one of each for now. ++ */ ++ list_for_each_entry(entity, &dev->entities, list) { ++ if (UVC_ENTITY_IS_ITERM(entity)) { ++ if (iterm) ++ return -EINVAL; ++ iterm = entity; ++ } ++ ++ if (UVC_ENTITY_IS_OTERM(entity)) { ++ if (oterm) ++ return -EINVAL; ++ oterm = entity; ++ } ++ } ++ ++ if (iterm == NULL || oterm == NULL) ++ return -EINVAL; ++ ++ /* Allocate the chain and fill it. */ ++ chain = uvc_alloc_chain(dev); ++ if (chain == NULL) ++ return -ENOMEM; ++ ++ if (uvc_scan_chain_entity(chain, oterm) < 0) ++ goto error; ++ ++ prev = oterm; ++ ++ /* ++ * Add all Processing and Extension Units with two pads. The order ++ * doesn't matter much, use reverse list traversal to connect units in ++ * UVC descriptor order as we build the chain from output to input. This ++ * leads to units appearing in the order meant by the manufacturer for ++ * the cameras known to require this heuristic. ++ */ ++ list_for_each_entry_reverse(entity, &dev->entities, list) { ++ if (entity->type != UVC_VC_PROCESSING_UNIT && ++ entity->type != UVC_VC_EXTENSION_UNIT) ++ continue; ++ ++ if (entity->num_pads != 2) ++ continue; ++ ++ if (uvc_scan_chain_entity(chain, entity) < 0) ++ goto error; ++ ++ prev->baSourceID[0] = entity->id; ++ prev = entity; ++ } ++ ++ if (uvc_scan_chain_entity(chain, iterm) < 0) ++ goto error; ++ ++ prev->baSourceID[0] = iterm->id; ++ ++ list_add_tail(&chain->list, &dev->chains); ++ ++ uvc_trace(UVC_TRACE_PROBE, ++ "Found a video chain by fallback heuristic (%s).\n", ++ uvc_print_chain(chain)); ++ ++ return 0; ++ ++error: ++ kfree(chain); ++ return -EINVAL; ++} ++ + /* + * Scan the device for video chains and register video devices. + * +@@ -1555,15 +1663,10 @@ static int uvc_scan_device(struct uvc_device *dev) + if (term->chain.next || term->chain.prev) + continue; + +- chain = kzalloc(sizeof(*chain), GFP_KERNEL); ++ chain = uvc_alloc_chain(dev); + if (chain == NULL) + return -ENOMEM; + +- INIT_LIST_HEAD(&chain->entities); +- mutex_init(&chain->ctrl_mutex); +- chain->dev = dev; +- v4l2_prio_init(&chain->prio); +- + term->flags |= UVC_ENTITY_FLAG_DEFAULT; + + if (uvc_scan_chain(chain, term) < 0) { +@@ -1577,6 +1680,9 @@ static int uvc_scan_device(struct uvc_device *dev) + list_add_tail(&chain->list, &dev->chains); + } + ++ if (list_empty(&dev->chains)) ++ uvc_scan_fallback(dev); ++ + if (list_empty(&dev->chains)) { + uvc_printk(KERN_INFO, "No valid video chain found.\n"); + return -1; +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c +index 836e2ac36a0d..16d7f939a747 100644 +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -1220,7 +1220,9 @@ clock_set: + return; + } + timeout--; +- mdelay(1); ++ spin_unlock_irq(&host->lock); ++ usleep_range(900, 1100); ++ spin_lock_irq(&host->lock); + } + + clk |= SDHCI_CLOCK_CARD_EN; +diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c +index 9279a9174f84..04e2e4308890 100644 +--- a/drivers/mtd/bcm47xxpart.c ++++ b/drivers/mtd/bcm47xxpart.c +@@ -159,12 +159,10 @@ static int bcm47xxpart_parse(struct mtd_info *master, + + last_trx_part = curr_part - 1; + +- /* +- * We have whole TRX scanned, skip to the next part. Use +- * roundown (not roundup), as the loop will increase +- * offset in next step. +- */ +- offset = rounddown(offset + trx->length, blocksize); ++ /* Jump to the end of TRX */ ++ offset = roundup(offset + trx->length, blocksize); ++ /* Next loop iteration will increase the offset */ ++ offset -= blocksize; + continue; + } + } +diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c +index f9fa3fad728e..2051f28ddac6 100644 +--- a/drivers/mtd/maps/pmcmsp-flash.c ++++ b/drivers/mtd/maps/pmcmsp-flash.c +@@ -139,15 +139,13 @@ static int __init init_msp_flash(void) + } + + msp_maps[i].bankwidth = 1; +- msp_maps[i].name = kmalloc(7, GFP_KERNEL); ++ msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL); + if (!msp_maps[i].name) { + iounmap(msp_maps[i].virt); + kfree(msp_parts[i]); + goto cleanup_loop; + } + +- msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7); +- + for (j = 0; j < pcnt; j++) { + part_name[5] = '0' + i; + part_name[7] = '0' + j; +diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c +index 0134ba32a057..39712560b4c1 100644 +--- a/drivers/mtd/ubi/upd.c ++++ b/drivers/mtd/ubi/upd.c +@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, + return err; + } + +- if (bytes == 0) { +- err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL); +- if (err) +- return err; ++ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL); ++ if (err) ++ return err; + ++ if (bytes == 0) { + err = clear_update_marker(ubi, vol, 0); + if (err) + return err; +diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c +index b374be7891a2..b905e5e840f7 100644 +--- a/drivers/net/can/c_can/c_can_pci.c ++++ b/drivers/net/can/c_can/c_can_pci.c +@@ -109,6 +109,7 @@ static int c_can_pci_probe(struct pci_dev *pdev, + + dev->irq = pdev->irq; + priv->base = addr; ++ priv->device = &pdev->dev; + + if (!c_can_pci_data->freq) { + dev_err(&pdev->dev, "no clock frequency defined\n"); +diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c +index f21fc37ec578..2c19b4ffe823 100644 +--- a/drivers/net/can/ti_hecc.c ++++ b/drivers/net/can/ti_hecc.c +@@ -962,7 +962,12 @@ static int ti_hecc_probe(struct platform_device *pdev) + netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, + HECC_DEF_NAPI_WEIGHT); + +- clk_enable(priv->clk); ++ err = clk_prepare_enable(priv->clk); ++ if (err) { ++ dev_err(&pdev->dev, "clk_prepare_enable() failed\n"); ++ goto probe_exit_clk; ++ } ++ + err = register_candev(ndev); + if (err) { + dev_err(&pdev->dev, "register_candev() failed\n"); +@@ -995,7 +1000,7 @@ static int ti_hecc_remove(struct platform_device *pdev) + struct ti_hecc_priv *priv = netdev_priv(ndev); + + unregister_candev(ndev); +- clk_disable(priv->clk); ++ clk_disable_unprepare(priv->clk); + clk_put(priv->clk); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iounmap(priv->base); +@@ -1021,7 +1026,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state) + hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR); + priv->can.state = CAN_STATE_SLEEPING; + +- clk_disable(priv->clk); ++ clk_disable_unprepare(priv->clk); + + return 0; + } +@@ -1030,8 +1035,11 @@ static int ti_hecc_resume(struct platform_device *pdev) + { + struct net_device *dev = platform_get_drvdata(pdev); + struct ti_hecc_priv *priv = netdev_priv(dev); ++ int err; + +- clk_enable(priv->clk); ++ err = clk_prepare_enable(priv->clk); ++ if (err) ++ return err; + + hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR); + priv->can.state = CAN_STATE_ERROR_ACTIVE; +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c +index 3a220d2f2ee1..9a82890f64e5 100644 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c +@@ -817,23 +817,25 @@ lbl_free_candev: + static void peak_usb_disconnect(struct usb_interface *intf) + { + struct peak_usb_device *dev; ++ struct peak_usb_device *dev_prev_siblings; + + /* unregister as many netdev devices as siblings */ +- for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) { ++ for (dev = usb_get_intfdata(intf); dev; dev = dev_prev_siblings) { + struct net_device *netdev = dev->netdev; + char name[IFNAMSIZ]; + ++ dev_prev_siblings = dev->prev_siblings; + dev->state &= ~PCAN_USB_STATE_CONNECTED; + strncpy(name, netdev->name, IFNAMSIZ); + + unregister_netdev(netdev); +- free_candev(netdev); + + kfree(dev->cmd_buf); + dev->next_siblings = NULL; + if (dev->adapter->dev_free) + dev->adapter->dev_free(dev); + ++ free_candev(netdev); + dev_info(&intf->dev, "%s removed\n", name); + } + +diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c +index cbd388eea682..f8b84fed537b 100644 +--- a/drivers/net/can/usb/usb_8dev.c ++++ b/drivers/net/can/usb/usb_8dev.c +@@ -956,8 +956,8 @@ static int usb_8dev_probe(struct usb_interface *intf, + for (i = 0; i < MAX_TX_URBS; i++) + priv->tx_contexts[i].echo_index = MAX_TX_URBS; + +- priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg), +- GFP_KERNEL); ++ priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg), ++ GFP_KERNEL); + if (!priv->cmd_msg_buffer) + goto cleanup_candev; + +@@ -971,7 +971,7 @@ static int usb_8dev_probe(struct usb_interface *intf, + if (err) { + netdev_err(netdev, + "couldn't register CAN device: %d\n", err); +- goto cleanup_cmd_msg_buffer; ++ goto cleanup_candev; + } + + err = usb_8dev_cmd_version(priv, &version); +@@ -992,9 +992,6 @@ static int usb_8dev_probe(struct usb_interface *intf, + cleanup_unregister_candev: + unregister_netdev(priv->netdev); + +-cleanup_cmd_msg_buffer: +- kfree(priv->cmd_msg_buffer); +- + cleanup_candev: + free_candev(netdev); + +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +index ce1a91618677..9c19f49f0f54 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +@@ -1792,8 +1792,16 @@ static void bnx2x_get_ringparam(struct net_device *dev, + + ering->rx_max_pending = MAX_RX_AVAIL; + ++ /* If size isn't already set, we give an estimation of the number ++ * of buffers we'll have. We're neglecting some possible conditions ++ * [we couldn't know for certain at this point if number of queues ++ * might shrink] but the number would be correct for the likely ++ * scenario. ++ */ + if (bp->rx_ring_size) + ering->rx_pending = bp->rx_ring_size; ++ else if (BNX2X_NUM_RX_QUEUES(bp)) ++ ering->rx_pending = MAX_RX_AVAIL / BNX2X_NUM_RX_QUEUES(bp); + else + ering->rx_pending = MAX_RX_AVAIL; + +diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c +index 07f7ef05c3f2..d18ee75bdd54 100644 +--- a/drivers/net/ethernet/brocade/bna/bnad.c ++++ b/drivers/net/ethernet/brocade/bna/bnad.c +@@ -193,6 +193,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) + return 0; + + hw_cons = *(tcb->hw_consumer_index); ++ rmb(); + cons = tcb->consumer_index; + q_depth = tcb->q_depth; + +@@ -2903,13 +2904,12 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) + BNA_QE_INDX_INC(prod, q_depth); + tcb->producer_index = prod; + +- smp_mb(); ++ wmb(); + + if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + return NETDEV_TX_OK; + + bna_txq_prod_indx_doorbell(tcb); +- smp_mb(); + + return NETDEV_TX_OK; + } +diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c +index 5dec66a96793..583ebff31160 100644 +--- a/drivers/net/ethernet/intel/igb/e1000_phy.c ++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c +@@ -87,6 +87,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw) + s32 ret_val = 0; + u16 phy_id; + ++ /* ensure PHY page selection to fix misconfigured i210 */ ++ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) ++ phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); ++ + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; +diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c +index 31bbbca341a7..922f7dd6028e 100644 +--- a/drivers/net/ethernet/ti/cpmac.c ++++ b/drivers/net/ethernet/ti/cpmac.c +@@ -557,7 +557,8 @@ fatal_error: + + static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) + { +- int queue, len; ++ int queue; ++ unsigned int len; + struct cpmac_desc *desc; + struct cpmac_priv *priv = netdev_priv(dev); + +@@ -567,7 +568,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) + if (unlikely(skb_padto(skb, ETH_ZLEN))) + return NETDEV_TX_OK; + +- len = max(skb->len, ETH_ZLEN); ++ len = max_t(unsigned int, skb->len, ETH_ZLEN); + queue = skb_get_queue_mapping(skb); + netif_stop_subqueue(dev, queue); + +@@ -1241,7 +1242,7 @@ int cpmac_init(void) + goto fail_alloc; + } + +-#warning FIXME: unhardcode gpio&reset bits ++ /* FIXME: unhardcode gpio&reset bits */ + ar7_gpio_disable(26); + ar7_gpio_disable(27); + ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +index 59e9c56e5b8a..493460424a00 100644 +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -48,6 +48,9 @@ struct net_device_context { + struct work_struct work; + }; + ++/* Restrict GSO size to account for NVGRE */ ++#define NETVSC_GSO_MAX_SIZE 62768 ++ + #define RING_SIZE_MIN 64 + static int ring_size = 128; + module_param(ring_size, int, S_IRUGO); +@@ -436,6 +439,7 @@ static int netvsc_probe(struct hv_device *dev, + + SET_ETHTOOL_OPS(net, ðtool_ops); + SET_NETDEV_DEV(net, &dev->device); ++ netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE); + + ret = register_netdev(net); + if (ret != 0) { +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c +index 8fc46fcaee54..1c51abbecedb 100644 +--- a/drivers/net/macvtap.c ++++ b/drivers/net/macvtap.c +@@ -678,7 +678,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, + size_t linear; + + if (q->flags & IFF_VNET_HDR) { +- vnet_hdr_len = q->vnet_hdr_sz; ++ vnet_hdr_len = ACCESS_ONCE(q->vnet_hdr_sz); + + err = -EINVAL; + if (len < vnet_hdr_len) +@@ -809,7 +809,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, + + if (q->flags & IFF_VNET_HDR) { + struct virtio_net_hdr vnet_hdr; +- vnet_hdr_len = q->vnet_hdr_sz; ++ vnet_hdr_len = ACCESS_ONCE(q->vnet_hdr_sz); + if ((len -= vnet_hdr_len) < 0) + return -EINVAL; + +diff --git a/drivers/net/tun.c b/drivers/net/tun.c +index ea6ada39db15..7bbc43fbb720 100644 +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -1087,9 +1087,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, + } + + if (tun->flags & TUN_VNET_HDR) { +- if (len < tun->vnet_hdr_sz) ++ int vnet_hdr_sz = ACCESS_ONCE(tun->vnet_hdr_sz); ++ ++ if (len < vnet_hdr_sz) + return -EINVAL; +- len -= tun->vnet_hdr_sz; ++ len -= vnet_hdr_sz; + + if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) + return -EFAULT; +@@ -1100,7 +1102,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, + + if (gso.hdr_len > len) + return -EINVAL; +- offset += tun->vnet_hdr_sz; ++ offset += vnet_hdr_sz; + } + + if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { +@@ -1275,7 +1277,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, + int vnet_hdr_sz = 0; + + if (tun->flags & TUN_VNET_HDR) +- vnet_hdr_sz = tun->vnet_hdr_sz; ++ vnet_hdr_sz = ACCESS_ONCE(tun->vnet_hdr_sz); + + if (!(tun->flags & TUN_NO_PI)) { + if ((len -= sizeof(pi)) < 0) +diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c +index 8d5cac2d8e33..57da4c10c695 100644 +--- a/drivers/net/usb/catc.c ++++ b/drivers/net/usb/catc.c +@@ -779,7 +779,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id + struct net_device *netdev; + struct catc *catc; + u8 broadcast[6]; +- int i, pktsz; ++ int pktsz, ret; + + if (usb_set_interface(usbdev, + intf->altsetting->desc.bInterfaceNumber, 1)) { +@@ -814,12 +814,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id + if ((!catc->ctrl_urb) || (!catc->tx_urb) || + (!catc->rx_urb) || (!catc->irq_urb)) { + dev_err(&intf->dev, "No free urbs available.\n"); +- usb_free_urb(catc->ctrl_urb); +- usb_free_urb(catc->tx_urb); +- usb_free_urb(catc->rx_urb); +- usb_free_urb(catc->irq_urb); +- free_netdev(netdev); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto fail_free; + } + + /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ +@@ -847,15 +843,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id + catc->irq_buf, 2, catc_irq_done, catc, 1); + + if (!catc->is_f5u011) { ++ u32 *buf; ++ int i; ++ + dev_dbg(dev, "Checking memory size\n"); + +- i = 0x12345678; +- catc_write_mem(catc, 0x7a80, &i, 4); +- i = 0x87654321; +- catc_write_mem(catc, 0xfa80, &i, 4); +- catc_read_mem(catc, 0x7a80, &i, 4); ++ buf = kmalloc(4, GFP_KERNEL); ++ if (!buf) { ++ ret = -ENOMEM; ++ goto fail_free; ++ } ++ ++ *buf = 0x12345678; ++ catc_write_mem(catc, 0x7a80, buf, 4); ++ *buf = 0x87654321; ++ catc_write_mem(catc, 0xfa80, buf, 4); ++ catc_read_mem(catc, 0x7a80, buf, 4); + +- switch (i) { ++ switch (*buf) { + case 0x12345678: + catc_set_reg(catc, TxBufCount, 8); + catc_set_reg(catc, RxBufCount, 32); +@@ -870,6 +875,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id + dev_dbg(dev, "32k Memory\n"); + break; + } ++ ++ kfree(buf); + + dev_dbg(dev, "Getting MAC from SEEROM.\n"); + +@@ -916,16 +923,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id + usb_set_intfdata(intf, catc); + + SET_NETDEV_DEV(netdev, &intf->dev); +- if (register_netdev(netdev) != 0) { +- usb_set_intfdata(intf, NULL); +- usb_free_urb(catc->ctrl_urb); +- usb_free_urb(catc->tx_urb); +- usb_free_urb(catc->rx_urb); +- usb_free_urb(catc->irq_urb); +- free_netdev(netdev); +- return -EIO; +- } ++ ret = register_netdev(netdev); ++ if (ret) ++ goto fail_clear_intfdata; ++ + return 0; ++ ++fail_clear_intfdata: ++ usb_set_intfdata(intf, NULL); ++fail_free: ++ usb_free_urb(catc->ctrl_urb); ++ usb_free_urb(catc->tx_urb); ++ usb_free_urb(catc->rx_urb); ++ usb_free_urb(catc->irq_urb); ++ free_netdev(netdev); ++ return ret; + } + + static void catc_disconnect(struct usb_interface *intf) +diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c +index d0815855d877..e782dd7183db 100644 +--- a/drivers/net/vmxnet3/vmxnet3_drv.c ++++ b/drivers/net/vmxnet3/vmxnet3_drv.c +@@ -2862,7 +2862,6 @@ vmxnet3_tx_timeout(struct net_device *netdev) + + netdev_err(adapter->netdev, "tx hang\n"); + schedule_work(&adapter->work); +- netif_wake_queue(adapter->netdev); + } + + +@@ -2889,6 +2888,7 @@ vmxnet3_reset_work(struct work_struct *data) + } + rtnl_unlock(); + ++ netif_wake_queue(adapter->netdev); + clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); + } + +diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c +index 06f86f435711..1b8422c4ef9b 100644 +--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c ++++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c +@@ -511,8 +511,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + break; + return -EOPNOTSUPP; + default: +- WARN_ON(1); +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + mutex_lock(&ah->lock); +diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c +index 6307a4e36c85..f8639003da95 100644 +--- a/drivers/net/wireless/hostap/hostap_hw.c ++++ b/drivers/net/wireless/hostap/hostap_hw.c +@@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len, + spin_lock_bh(&local->baplock); + + res = hfa384x_setup_bap(dev, BAP0, rid, 0); +- if (!res) +- res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); ++ if (res) ++ goto unlock; ++ ++ res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); ++ if (res) ++ goto unlock; + + if (le16_to_cpu(rec.len) == 0) { + /* RID not available */ + res = -ENODATA; ++ goto unlock; + } + + rlen = (le16_to_cpu(rec.len) - 1) * 2; +- if (!res && exact_len && rlen != len) { ++ if (exact_len && rlen != len) { + printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: " + "rid=0x%04x, len=%d (expected %d)\n", + dev->name, rid, rlen, len); + res = -ENODATA; + } + +- if (!res) +- res = hfa384x_from_bap(dev, BAP0, buf, len); ++ res = hfa384x_from_bap(dev, BAP0, buf, len); + ++unlock: + spin_unlock_bh(&local->baplock); + mutex_unlock(&local->rid_bap_mtx); + +diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c +index 3ad79736b255..3fc7d0845480 100644 +--- a/drivers/net/wireless/rtlwifi/usb.c ++++ b/drivers/net/wireless/rtlwifi/usb.c +@@ -823,6 +823,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); ++ struct urb *urb; + + /* should after adapter start and interrupt enable. */ + set_hal_stop(rtlhal); +@@ -830,6 +831,23 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) + /* Enable software */ + SET_USB_STOP(rtlusb); + rtl_usb_deinit(hw); ++ ++ /* free pre-allocated URBs from rtl_usb_start() */ ++ usb_kill_anchored_urbs(&rtlusb->rx_submitted); ++ ++ tasklet_kill(&rtlusb->rx_work_tasklet); ++ cancel_work_sync(&rtlpriv->works.lps_change_work); ++ ++ flush_workqueue(rtlpriv->works.rtl_wq); ++ ++ skb_queue_purge(&rtlusb->rx_queue); ++ ++ while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { ++ usb_free_coherent(urb->dev, urb->transfer_buffer_length, ++ urb->transfer_buffer, urb->transfer_dma); ++ usb_free_urb(urb); ++ } ++ + rtlpriv->cfg->ops->hw_disable(hw); + } + +diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c +index 3492ec9a33b7..a7d64f94c3cd 100644 +--- a/drivers/pinctrl/sh-pfc/pinctrl.c ++++ b/drivers/pinctrl/sh-pfc/pinctrl.c +@@ -274,7 +274,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin, + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: +- return true; ++ return pin->configs & ++ (SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN); + + case PIN_CONFIG_BIAS_PULL_UP: + return pin->configs & SH_PFC_PIN_CFG_PULL_UP; +diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c +index 59a8d325a697..e4d9a903ca3c 100644 +--- a/drivers/platform/x86/acer-wmi.c ++++ b/drivers/platform/x86/acer-wmi.c +@@ -1860,11 +1860,24 @@ static int acer_wmi_enable_lm(void) + return status; + } + ++#define ACER_WMID_ACCEL_HID "BST0001" ++ + static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level, + void *ctx, void **retval) + { ++ struct acpi_device *dev; ++ ++ if (!strcmp(ctx, "SENR")) { ++ if (acpi_bus_get_device(ah, &dev)) ++ return AE_OK; ++ if (strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev))) ++ return AE_OK; ++ } else ++ return AE_OK; ++ + *(acpi_handle *)retval = ah; +- return AE_OK; ++ ++ return AE_CTRL_TERMINATE; + } + + static int __init acer_wmi_get_handle(const char *name, const char *prop, +@@ -1878,8 +1891,7 @@ static int __init acer_wmi_get_handle(const char *name, const char *prop, + handle = NULL; + status = acpi_get_devices(prop, acer_wmi_get_handle_cb, + (void *)name, &handle); +- +- if (ACPI_SUCCESS(status)) { ++ if (ACPI_SUCCESS(status) && handle) { + *ah = handle; + return 0; + } else { +@@ -1891,7 +1903,7 @@ static int __init acer_wmi_accel_setup(void) + { + int err; + +- err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle); ++ err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle); + if (err) + return err; + +@@ -2262,10 +2274,11 @@ static int __init acer_wmi_init(void) + err = acer_wmi_input_setup(); + if (err) + return err; ++ err = acer_wmi_accel_setup(); ++ if (err && err != -ENODEV) ++ pr_warn("Cannot enable accelerometer\n"); + } + +- acer_wmi_accel_setup(); +- + err = platform_driver_register(&acer_platform_driver); + if (err) { + pr_err("Unable to register platform driver\n"); +diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c +index 42bd57da239d..09198941ee22 100644 +--- a/drivers/rtc/interface.c ++++ b/drivers/rtc/interface.c +@@ -763,9 +763,23 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq); + */ + static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) + { ++ struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); ++ struct rtc_time tm; ++ ktime_t now; ++ + timer->enabled = 1; ++ __rtc_read_time(rtc, &tm); ++ now = rtc_tm_to_ktime(tm); ++ ++ /* Skip over expired timers */ ++ while (next) { ++ if (next->expires.tv64 >= now.tv64) ++ break; ++ next = timerqueue_iterate_next(next); ++ } ++ + timerqueue_add(&rtc->timerqueue, &timer->node); +- if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) { ++ if (!next) { + struct rtc_wkalrm alarm; + int err; + alarm.time = rtc_ktime_to_tm(timer->node.expires); +diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c +index f40afdd0e5f5..b6e220f5963d 100644 +--- a/drivers/rtc/rtc-s35390a.c ++++ b/drivers/rtc/rtc-s35390a.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #define S35390A_CMD_STATUS1 0 + #define S35390A_CMD_STATUS2 1 +@@ -34,10 +35,14 @@ + #define S35390A_ALRM_BYTE_HOURS 1 + #define S35390A_ALRM_BYTE_MINS 2 + ++/* flags for STATUS1 */ + #define S35390A_FLAG_POC 0x01 + #define S35390A_FLAG_BLD 0x02 ++#define S35390A_FLAG_INT2 0x04 + #define S35390A_FLAG_24H 0x40 + #define S35390A_FLAG_RESET 0x80 ++ ++/* flag for STATUS2 */ + #define S35390A_FLAG_TEST 0x01 + + #define S35390A_INT2_MODE_MASK 0xF0 +@@ -94,19 +99,63 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) + return 0; + } + +-static int s35390a_reset(struct s35390a *s35390a) ++/* ++ * Returns <0 on error, 0 if rtc is setup fine and 1 if the chip was reset. ++ * To keep the information if an irq is pending, pass the value read from ++ * STATUS1 to the caller. ++ */ ++static int s35390a_reset(struct s35390a *s35390a, char *status1) + { +- char buf[1]; +- +- if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0) +- return -EIO; +- +- if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD))) ++ char buf; ++ int ret; ++ unsigned initcount = 0; ++ ++ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, status1, 1); ++ if (ret < 0) ++ return ret; ++ ++ if (*status1 & S35390A_FLAG_POC) ++ /* ++ * Do not communicate for 0.5 seconds since the power-on ++ * detection circuit is in operation. ++ */ ++ msleep(500); ++ else if (!(*status1 & S35390A_FLAG_BLD)) ++ /* ++ * If both POC and BLD are unset everything is fine. ++ */ + return 0; + +- buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H); +- buf[0] &= 0xf0; +- return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); ++ /* ++ * At least one of POC and BLD are set, so reinitialise chip. Keeping ++ * this information in the hardware to know later that the time isn't ++ * valid is unfortunately not possible because POC and BLD are cleared ++ * on read. So the reset is best done now. ++ * ++ * The 24H bit is kept over reset, so set it already here. ++ */ ++initialize: ++ *status1 = S35390A_FLAG_24H; ++ buf = S35390A_FLAG_RESET | S35390A_FLAG_24H; ++ ret = s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1); ++ ++ if (ret < 0) ++ return ret; ++ ++ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1); ++ if (ret < 0) ++ return ret; ++ ++ if (buf & (S35390A_FLAG_POC | S35390A_FLAG_BLD)) { ++ /* Try up to five times to reset the chip */ ++ if (initcount < 5) { ++ ++initcount; ++ goto initialize; ++ } else ++ return -EIO; ++ } ++ ++ return 1; + } + + static int s35390a_disable_test_mode(struct s35390a *s35390a) +@@ -265,6 +314,20 @@ static int s35390a_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alm) + char buf[3], sts; + int i, err; + ++ /* ++ * initialize all members to -1 to signal the core that they are not ++ * defined by the hardware. ++ */ ++ alm->time.tm_sec = -1; ++ alm->time.tm_min = -1; ++ alm->time.tm_hour = -1; ++ alm->time.tm_mday = -1; ++ alm->time.tm_mon = -1; ++ alm->time.tm_year = -1; ++ alm->time.tm_wday = -1; ++ alm->time.tm_yday = -1; ++ alm->time.tm_isdst = -1; ++ + err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts)); + if (err < 0) + return err; +@@ -327,11 +390,11 @@ static struct i2c_driver s35390a_driver; + static int s35390a_probe(struct i2c_client *client, + const struct i2c_device_id *id) + { +- int err; ++ int err, err_reset; + unsigned int i; + struct s35390a *s35390a; + struct rtc_time tm; +- char buf[1]; ++ char buf, status1; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + err = -ENODEV; +@@ -360,29 +423,35 @@ static int s35390a_probe(struct i2c_client *client, + } + } + +- err = s35390a_reset(s35390a); +- if (err < 0) { ++ err_reset = s35390a_reset(s35390a, &status1); ++ if (err_reset < 0) { ++ err = err_reset; + dev_err(&client->dev, "error resetting chip\n"); + goto exit_dummy; + } + +- err = s35390a_disable_test_mode(s35390a); +- if (err < 0) { +- dev_err(&client->dev, "error disabling test mode\n"); +- goto exit_dummy; +- } +- +- err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); +- if (err < 0) { +- dev_err(&client->dev, "error checking 12/24 hour mode\n"); +- goto exit_dummy; +- } +- if (buf[0] & S35390A_FLAG_24H) ++ if (status1 & S35390A_FLAG_24H) + s35390a->twentyfourhour = 1; + else + s35390a->twentyfourhour = 0; + +- if (s35390a_get_datetime(client, &tm) < 0) ++ if (status1 & S35390A_FLAG_INT2) { ++ /* disable alarm (and maybe test mode) */ ++ buf = 0; ++ err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &buf, 1); ++ if (err < 0) { ++ dev_err(&client->dev, "error disabling alarm"); ++ goto exit_dummy; ++ } ++ } else { ++ err = s35390a_disable_test_mode(s35390a); ++ if (err < 0) { ++ dev_err(&client->dev, "error disabling test mode\n"); ++ goto exit_dummy; ++ } ++ } ++ ++ if (err_reset > 0 || s35390a_get_datetime(client, &tm) < 0) + dev_warn(&client->dev, "clock needs to be set\n"); + + device_set_wakeup_capable(&client->dev, 1); +@@ -395,6 +464,10 @@ static int s35390a_probe(struct i2c_client *client, + err = PTR_ERR(s35390a->rtc); + goto exit_dummy; + } ++ ++ if (status1 & S35390A_FLAG_INT2) ++ rtc_update_irq(s35390a->rtc, 1, RTC_AF); ++ + return 0; + + exit_dummy: +diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c +index 9b3a24e8d3a0..5e41e8453acd 100644 +--- a/drivers/s390/char/vmlogrdr.c ++++ b/drivers/s390/char/vmlogrdr.c +@@ -873,7 +873,7 @@ static int __init vmlogrdr_init(void) + goto cleanup; + + for (i=0; i < MAXMINOR; ++i ) { +- sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL); ++ sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sys_ser[i].buffer) { + rc = -ENOMEM; + break; +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index e6e0679ec882..b08b1e1a45e5 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -10909,6 +10909,7 @@ static struct pci_driver lpfc_driver = { + .id_table = lpfc_id_table, + .probe = lpfc_pci_probe_one, + .remove = lpfc_pci_remove_one, ++ .shutdown = lpfc_pci_remove_one, + .suspend = lpfc_pci_suspend_one, + .resume = lpfc_pci_resume_one, + .err_handler = &lpfc_err_handler, +diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c +index 2da1959ff2f6..03c8783180a8 100644 +--- a/drivers/scsi/mvsas/mv_sas.c ++++ b/drivers/scsi/mvsas/mv_sas.c +@@ -736,8 +736,8 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf + mv_dprintk("device %016llx not ready.\n", + SAS_ADDR(dev->sas_addr)); + +- rc = SAS_PHY_DOWN; +- return rc; ++ rc = SAS_PHY_DOWN; ++ return rc; + } + tei.port = dev->port->lldd_port; + if (tei.port && !tei.port->port_attached && !tmf) { +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c +index 66c495d21016..40fe8a77236a 100644 +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -3301,7 +3301,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, + sizeof(struct ct6_dsd), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!ctx_cachep) +- goto fail_free_gid_list; ++ goto fail_free_srb_mempool; + } + ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, + ctx_cachep); +@@ -3454,7 +3454,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, + ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), + GFP_KERNEL); + if (!ha->loop_id_map) +- goto fail_async_pd; ++ goto fail_loop_id_map; + else { + qla2x00_set_reserved_loop_ids(ha); + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, +@@ -3463,6 +3463,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, + + return 0; + ++fail_loop_id_map: ++ dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); + fail_async_pd: + dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); + fail_ex_init_cb: +@@ -3490,6 +3492,10 @@ fail_free_ms_iocb: + dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); + ha->ms_iocb = NULL; + ha->ms_iocb_dma = 0; ++ ++ if (ha->sns_cmd) ++ dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), ++ ha->sns_cmd, ha->sns_cmd_dma); + fail_dma_pool: + if (IS_QLA82XX(ha) || ql2xenabledif) { + dma_pool_destroy(ha->fcp_cmnd_dma_pool); +@@ -3507,10 +3513,12 @@ fail_free_nvram: + kfree(ha->nvram); + ha->nvram = NULL; + fail_free_ctx_mempool: +- mempool_destroy(ha->ctx_mempool); ++ if (ha->ctx_mempool) ++ mempool_destroy(ha->ctx_mempool); + ha->ctx_mempool = NULL; + fail_free_srb_mempool: +- mempool_destroy(ha->srb_mempool); ++ if (ha->srb_mempool) ++ mempool_destroy(ha->srb_mempool); + ha->srb_mempool = NULL; + fail_free_gid_list: + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index 60031e15d562..dc1c2f4520f2 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -1009,8 +1009,12 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, + int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) + { + struct request *rq = cmd->request; ++ int error; + +- int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); ++ if (WARN_ON_ONCE(!rq->nr_phys_segments)) ++ return -EINVAL; ++ ++ error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); + if (error) + goto err_exit; + +@@ -1102,11 +1106,7 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) + * submit a request without an attached bio. + */ + if (req->bio) { +- int ret; +- +- BUG_ON(!req->nr_phys_segments); +- +- ret = scsi_init_io(cmd, GFP_ATOMIC); ++ int ret = scsi_init_io(cmd, GFP_ATOMIC); + if (unlikely(ret)) + return ret; + } else { +@@ -1150,11 +1150,6 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) + return ret; + } + +- /* +- * Filesystem requests must transfer data. +- */ +- BUG_ON(!req->nr_phys_segments); +- + cmd = scsi_get_cmd_from_req(sdev, req); + if (unlikely(!cmd)) + return BLKPREP_DEFER; +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index 135d7b56fbe6..53da653988ec 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -865,10 +865,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) + struct request_queue *rq = sdev->request_queue; + struct scsi_target *starget = sdev->sdev_target; + +- error = scsi_device_set_state(sdev, SDEV_RUNNING); +- if (error) +- return error; +- + error = scsi_target_add(starget); + if (error) + return error; +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index 4afce0e838a2..880a300baf7a 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -1354,11 +1354,15 @@ static int media_not_present(struct scsi_disk *sdkp, + **/ + static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) + { +- struct scsi_disk *sdkp = scsi_disk(disk); +- struct scsi_device *sdp = sdkp->device; ++ struct scsi_disk *sdkp = scsi_disk_get(disk); ++ struct scsi_device *sdp; + struct scsi_sense_hdr *sshdr = NULL; + int retval; + ++ if (!sdkp) ++ return 0; ++ ++ sdp = sdkp->device; + SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); + + /* +@@ -1415,6 +1419,7 @@ out: + kfree(sshdr); + retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; + sdp->changed = 0; ++ scsi_disk_put(sdkp); + return retval; + } + +@@ -1919,6 +1924,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, + + #define READ_CAPACITY_RETRIES_ON_RESET 10 + ++/* ++ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set ++ * and the reported logical block size is bigger than 512 bytes. Note ++ * that last_sector is a u64 and therefore logical_to_sectors() is not ++ * applicable. ++ */ ++static bool sd_addressable_capacity(u64 lba, unsigned int sector_size) ++{ ++ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9); ++ ++ if (sizeof(sector_t) == 4 && last_sector > U32_MAX) ++ return false; ++ ++ return true; ++} ++ + static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, + unsigned char *buffer) + { +@@ -1984,7 +2005,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, + return -ENODEV; + } + +- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) { ++ if (!sd_addressable_capacity(lba, sector_size)) { + sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " + "kernel compiled with support for large block " + "devices.\n"); +@@ -2070,7 +2091,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, + return sector_size; + } + +- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) { ++ if (!sd_addressable_capacity(lba, sector_size)) { + sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " + "kernel compiled with support for large block " + "devices.\n"); +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 1f65e32db285..0b27d293dd83 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -568,6 +568,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + sg_io_hdr_t *hp; + unsigned char cmnd[MAX_COMMAND_SIZE]; + ++ if (unlikely(segment_eq(get_fs(), KERNEL_DS))) ++ return -EINVAL; ++ + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) + return -ENXIO; + SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n", +@@ -766,8 +769,14 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, + return k; /* probably out of space --> ENOMEM */ + } + if (sdp->detached) { +- if (srp->bio) ++ if (srp->bio) { ++ if (srp->rq->cmd != srp->rq->__cmd) ++ kfree(srp->rq->cmd); ++ + blk_end_request_all(srp->rq, -EIO); ++ srp->rq = NULL; ++ } ++ + sg_finish_rem_req(srp); + return -ENODEV; + } +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c +index 1ac9943cbb93..c1f23abd754a 100644 +--- a/drivers/scsi/sr.c ++++ b/drivers/scsi/sr.c +@@ -855,6 +855,7 @@ static void get_capabilities(struct scsi_cd *cd) + unsigned char *buffer; + struct scsi_mode_data data; + struct scsi_sense_hdr sshdr; ++ unsigned int ms_len = 128; + int rc, n; + + static const char *loadmech[] = +@@ -881,10 +882,11 @@ static void get_capabilities(struct scsi_cd *cd) + scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); + + /* ask for mode page 0x2a */ +- rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, ++ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len, + SR_TIMEOUT, 3, &data, NULL); + +- if (!scsi_status_is_good(rc)) { ++ if (!scsi_status_is_good(rc) || data.length > ms_len || ++ data.header_length + data.block_descriptor_length > data.length) { + /* failed, drive doesn't have capabilities mode page */ + cd->cdi.speed = 1; + cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c +index 913b91c78a22..58d898cdff0a 100644 +--- a/drivers/scsi/storvsc_drv.c ++++ b/drivers/scsi/storvsc_drv.c +@@ -204,6 +204,7 @@ enum storvsc_request_type { + #define SRB_STATUS_SUCCESS 0x01 + #define SRB_STATUS_ABORTED 0x02 + #define SRB_STATUS_ERROR 0x04 ++#define SRB_STATUS_DATA_OVERRUN 0x12 + + /* + * This is the end of Protocol specific defines. +@@ -795,6 +796,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, + switch (vm_srb->srb_status) { + case SRB_STATUS_ERROR: + /* ++ * Let upper layer deal with error when ++ * sense message is present. ++ */ ++ ++ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) ++ break; ++ /* + * If there is an error; offline the device since all + * error recovery strategies would have already been + * deployed on the host side. However, if the command +@@ -859,6 +867,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) + struct scsi_sense_hdr sense_hdr; + struct vmscsi_request *vm_srb; + struct stor_mem_pools *memp = scmnd->device->hostdata; ++ u32 data_transfer_length; + struct Scsi_Host *host; + struct storvsc_device *stor_dev; + struct hv_device *dev = host_dev->dev; +@@ -867,6 +876,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) + host = stor_dev->host; + + vm_srb = &cmd_request->vstor_packet.vm_srb; ++ data_transfer_length = vm_srb->data_transfer_length; + if (cmd_request->bounce_sgl_count) { + if (vm_srb->data_in == READ_TYPE) + copy_from_bounce_buffer(scsi_sglist(scmnd), +@@ -885,13 +895,20 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) + scsi_print_sense_hdr("storvsc", &sense_hdr); + } + +- if (vm_srb->srb_status != SRB_STATUS_SUCCESS) ++ if (vm_srb->srb_status != SRB_STATUS_SUCCESS) { + storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, + sense_hdr.ascq); ++ /* ++ * The Windows driver set data_transfer_length on ++ * SRB_STATUS_DATA_OVERRUN. On other errors, this value ++ * is untouched. In these cases we set it to 0. ++ */ ++ if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN) ++ data_transfer_length = 0; ++ } + + scsi_set_resid(scmnd, +- cmd_request->data_buffer.len - +- vm_srb->data_transfer_length); ++ cmd_request->data_buffer.len - data_transfer_length); + + scsi_done_fn = scmnd->scsi_done; + +diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c +index a8dc95ebf2d6..7700cef5e177 100644 +--- a/drivers/ssb/pci.c ++++ b/drivers/ssb/pci.c +@@ -846,6 +846,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, + if (err) { + ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n", + err); ++ goto out_free; + } else { + ssb_dbg("Using SPROM revision %d provided by platform\n", + sprom->revision); +diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c +index 30be6c9bdbc6..ff3ca598e539 100644 +--- a/drivers/target/iscsi/iscsi_target_parameters.c ++++ b/drivers/target/iscsi/iscsi_target_parameters.c +@@ -806,22 +806,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) + if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) + SET_PSTATE_REPLY_OPTIONAL(param); + /* +- * The GlobalSAN iSCSI Initiator for MacOSX does +- * not respond to MaxBurstLength, FirstBurstLength, +- * DefaultTime2Wait or DefaultTime2Retain parameter keys. +- * So, we set them to 'reply optional' here, and assume the +- * the defaults from iscsi_parameters.h if the initiator +- * is not RFC compliant and the keys are not negotiated. +- */ +- if (!strcmp(param->name, MAXBURSTLENGTH)) +- SET_PSTATE_REPLY_OPTIONAL(param); +- if (!strcmp(param->name, FIRSTBURSTLENGTH)) +- SET_PSTATE_REPLY_OPTIONAL(param); +- if (!strcmp(param->name, DEFAULTTIME2WAIT)) +- SET_PSTATE_REPLY_OPTIONAL(param); +- if (!strcmp(param->name, DEFAULTTIME2RETAIN)) +- SET_PSTATE_REPLY_OPTIONAL(param); +- /* + * Required for gPXE iSCSI boot client + */ + if (!strcmp(param->name, MAXCONNECTIONS)) +diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c +index 016e882356d6..eeeea38d4b2e 100644 +--- a/drivers/target/iscsi/iscsi_target_util.c ++++ b/drivers/target/iscsi/iscsi_target_util.c +@@ -722,21 +722,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) + { + struct se_cmd *se_cmd = NULL; + int rc; ++ bool op_scsi = false; + /* + * Determine if a struct se_cmd is associated with + * this struct iscsi_cmd. + */ + switch (cmd->iscsi_opcode) { + case ISCSI_OP_SCSI_CMD: +- se_cmd = &cmd->se_cmd; +- __iscsit_free_cmd(cmd, true, shutdown); ++ op_scsi = true; + /* + * Fallthrough + */ + case ISCSI_OP_SCSI_TMFUNC: +- rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); +- if (!rc && shutdown && se_cmd && se_cmd->se_sess) { +- __iscsit_free_cmd(cmd, true, shutdown); ++ se_cmd = &cmd->se_cmd; ++ __iscsit_free_cmd(cmd, op_scsi, shutdown); ++ rc = transport_generic_free_cmd(se_cmd, shutdown); ++ if (!rc && shutdown && se_cmd->se_sess) { ++ __iscsit_free_cmd(cmd, op_scsi, shutdown); + target_put_sess_cmd(se_cmd->se_sess, se_cmd); + } + break; +diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c +index 244776bec1c7..79fed114a2e0 100644 +--- a/drivers/target/target_core_pscsi.c ++++ b/drivers/target/target_core_pscsi.c +@@ -157,7 +157,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, + + buf = kzalloc(12, GFP_KERNEL); + if (!buf) +- return; ++ goto out_free; + + memset(cdb, 0, MAX_COMMAND_SIZE); + cdb[0] = MODE_SENSE; +@@ -172,9 +172,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, + * If MODE_SENSE still returns zero, set the default value to 1024. + */ + sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); ++out_free: + if (!sdev->sector_size) + sdev->sector_size = 1024; +-out_free: ++ + kfree(buf); + } + +@@ -317,9 +318,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, + sd->lun, sd->queue_depth); + } + +- dev->dev_attrib.hw_block_size = sd->sector_size; ++ dev->dev_attrib.hw_block_size = ++ min_not_zero((int)sd->sector_size, 512); + dev->dev_attrib.hw_max_sectors = +- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); ++ min_not_zero((unsigned)sd->host->max_sectors, queue_max_hw_sectors(q)); + dev->dev_attrib.hw_queue_depth = sd->queue_depth; + + /* +@@ -342,8 +344,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, + /* + * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. + */ +- if (sd->type == TYPE_TAPE) ++ if (sd->type == TYPE_TAPE) { + pscsi_tape_read_blocksize(dev, sd); ++ dev->dev_attrib.hw_block_size = sd->sector_size; ++ } + return 0; + } + +@@ -409,7 +413,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) + /* + * Called with struct Scsi_Host->host_lock called. + */ +-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) ++static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) + __releases(sh->host_lock) + { + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; +@@ -436,28 +440,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) + return 0; + } + +-/* +- * Called with struct Scsi_Host->host_lock called. +- */ +-static int pscsi_create_type_other(struct se_device *dev, +- struct scsi_device *sd) +- __releases(sh->host_lock) +-{ +- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; +- struct Scsi_Host *sh = sd->host; +- int ret; +- +- spin_unlock_irq(sh->host_lock); +- ret = pscsi_add_device_to_list(dev, sd); +- if (ret) +- return ret; +- +- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", +- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, +- sd->channel, sd->id, sd->lun); +- return 0; +-} +- + static int pscsi_configure_device(struct se_device *dev) + { + struct se_hba *hba = dev->se_hba; +@@ -545,11 +527,8 @@ static int pscsi_configure_device(struct se_device *dev) + case TYPE_DISK: + ret = pscsi_create_type_disk(dev, sd); + break; +- case TYPE_ROM: +- ret = pscsi_create_type_rom(dev, sd); +- break; + default: +- ret = pscsi_create_type_other(dev, sd); ++ ret = pscsi_create_type_nondisk(dev, sd); + break; + } + +@@ -606,8 +585,7 @@ static void pscsi_free_device(struct se_device *dev) + else if (pdv->pdv_lld_host) + scsi_host_put(pdv->pdv_lld_host); + +- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) +- scsi_device_put(sd); ++ scsi_device_put(sd); + + pdv->pdv_sd = NULL; + } +@@ -1125,7 +1103,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev) + if (pdv->pdv_bd && pdv->pdv_bd->bd_part) + return pdv->pdv_bd->bd_part->nr_sects; + +- dump_stack(); + return 0; + } + +diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c +index d6080c3831ef..ce2e5d508fe7 100644 +--- a/drivers/tty/nozomi.c ++++ b/drivers/tty/nozomi.c +@@ -823,7 +823,7 @@ static int receive_data(enum port_type index, struct nozomi *dc) + struct tty_struct *tty = tty_port_tty_get(&port->port); + int i, ret; + +- read_mem32((u32 *) &size, addr, 4); ++ size = __le32_to_cpu(readl(addr)); + /* DBG1( "%d bytes port: %d", size, index); */ + + if (tty && test_bit(TTY_THROTTLED, &tty->flags)) { +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index 98b8423793fd..9243dd729dd4 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -55,6 +55,7 @@ struct serial_private { + unsigned int nr; + void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES]; + struct pci_serial_quirk *quirk; ++ const struct pciserial_board *board; + int line[0]; + }; + +@@ -3374,6 +3375,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) + } + } + priv->nr = i; ++ priv->board = board; + return priv; + + err_deinit: +@@ -3384,7 +3386,7 @@ err_out: + } + EXPORT_SYMBOL_GPL(pciserial_init_ports); + +-void pciserial_remove_ports(struct serial_private *priv) ++void pciserial_detach_ports(struct serial_private *priv) + { + struct pci_serial_quirk *quirk; + int i; +@@ -3404,7 +3406,11 @@ void pciserial_remove_ports(struct serial_private *priv) + quirk = find_quirk(priv->dev); + if (quirk->exit) + quirk->exit(priv->dev); ++} + ++void pciserial_remove_ports(struct serial_private *priv) ++{ ++ pciserial_detach_ports(priv); + kfree(priv); + } + EXPORT_SYMBOL_GPL(pciserial_remove_ports); +@@ -4943,7 +4949,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev, + return PCI_ERS_RESULT_DISCONNECT; + + if (priv) +- pciserial_suspend_ports(priv); ++ pciserial_detach_ports(priv); + + pci_disable_device(dev); + +@@ -4968,9 +4974,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) + static void serial8250_io_resume(struct pci_dev *dev) + { + struct serial_private *priv = pci_get_drvdata(dev); ++ const struct pciserial_board *board; + +- if (priv) +- pciserial_resume_ports(priv); ++ if (!priv) ++ return; ++ ++ board = priv->board; ++ kfree(priv); ++ priv = pciserial_init_ports(dev, board); ++ ++ if (!IS_ERR(priv)) { ++ pci_set_drvdata(dev, priv); ++ } + } + + static const struct pci_error_handlers serial8250_err_handler = { +diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c +index b11e99797fd8..876ce8823edc 100644 +--- a/drivers/tty/serial/msm_serial.c ++++ b/drivers/tty/serial/msm_serial.c +@@ -930,6 +930,7 @@ static struct of_device_id msm_match_table[] = { + { .compatible = "qcom,msm-uart" }, + {} + }; ++MODULE_DEVICE_TABLE(of, msm_match_table); + + static struct platform_driver msm_platform_driver = { + .remove = msm_serial_remove, +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c +index b51c15408ff3..602c9a74bec6 100644 +--- a/drivers/tty/sysrq.c ++++ b/drivers/tty/sysrq.c +@@ -881,8 +881,8 @@ static const struct input_device_id sysrq_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | + INPUT_DEVICE_ID_MATCH_KEYBIT, +- .evbit = { BIT_MASK(EV_KEY) }, +- .keybit = { BIT_MASK(KEY_LEFTALT) }, ++ .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) }, ++ .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) }, + }, + { }, + }; +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index b364845de5ad..802df033e24c 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -543,19 +543,18 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) + acm->control->needs_remote_wakeup = 1; + + acm->ctrlurb->dev = acm->dev; +- if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) { ++ retval = usb_submit_urb(acm->ctrlurb, GFP_KERNEL); ++ if (retval) { + dev_err(&acm->control->dev, + "%s - usb_submit_urb(ctrl irq) failed\n", __func__); + goto error_submit_urb; + } + + acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS; +- if (acm_set_control(acm, acm->ctrlout) < 0 && +- (acm->ctrl_caps & USB_CDC_CAP_LINE)) ++ retval = acm_set_control(acm, acm->ctrlout); ++ if (retval < 0 && (acm->ctrl_caps & USB_CDC_CAP_LINE)) + goto error_set_control; + +- usb_autopm_put_interface(acm->control); +- + /* + * Unthrottle device in case the TTY was closed while throttled. + */ +@@ -564,9 +563,12 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) + acm->throttle_req = 0; + spin_unlock_irq(&acm->read_lock); + +- if (acm_submit_read_urbs(acm, GFP_KERNEL)) ++ retval = acm_submit_read_urbs(acm, GFP_KERNEL); ++ if (retval) + goto error_submit_read_urbs; + ++ usb_autopm_put_interface(acm->control); ++ + mutex_unlock(&acm->mutex); + + return 0; +@@ -583,7 +585,8 @@ error_submit_urb: + error_get_interface: + disconnected: + mutex_unlock(&acm->mutex); +- return retval; ++ ++ return usb_translate_errors(retval); + } + + static void acm_port_destruct(struct tty_port *port) +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 4e5156d212dd..55a8e84469ec 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -2579,8 +2579,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1, + if (ret < 0) + return ret; + +- /* The port state is unknown until the reset completes. */ +- if (!(portstatus & USB_PORT_STAT_RESET)) ++ /* ++ * The port state is unknown until the reset completes. ++ * ++ * On top of that, some chips may require additional time ++ * to re-establish a connection after the reset is complete, ++ * so also wait for the connection to be re-established. ++ */ ++ if (!(portstatus & USB_PORT_STAT_RESET) && ++ (portstatus & USB_PORT_STAT_CONNECTION)) + break; + + /* switch to the long delay after two short delay failures */ +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 5a2eaf401b00..8f96e7d1d4da 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -241,6 +241,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, + int status) + { + struct dwc3 *dwc = dep->dwc; ++ unsigned int unmap_after_complete = false; + int i; + + if (req->queued) { +@@ -265,11 +266,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, + if (req->request.status == -EINPROGRESS) + req->request.status = status; + +- if (dwc->ep0_bounced && dep->number <= 1) ++ /* ++ * NOTICE we don't want to unmap before calling ->complete() if we're ++ * dealing with a bounced ep0 request. If we unmap it here, we would end ++ * up overwritting the contents of req->buf and this could confuse the ++ * gadget driver. ++ */ ++ if (dwc->ep0_bounced && dep->number <= 1) { + dwc->ep0_bounced = false; +- +- usb_gadget_unmap_request(&dwc->gadget, &req->request, +- req->direction); ++ unmap_after_complete = true; ++ } else { ++ usb_gadget_unmap_request(&dwc->gadget, ++ &req->request, req->direction); ++ } + + dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", + req, dep->name, req->request.actual, +@@ -278,6 +287,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, + spin_unlock(&dwc->lock); + req->request.complete(&dep->endpoint, &req->request); + spin_lock(&dwc->lock); ++ ++ if (unmap_after_complete) ++ usb_gadget_unmap_request(&dwc->gadget, ++ &req->request, req->direction); + } + + static const char *dwc3_gadget_ep_cmd_string(u8 cmd) +diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h +index b3f25c302e35..40ac1abe3ad4 100644 +--- a/drivers/usb/dwc3/gadget.h ++++ b/drivers/usb/dwc3/gadget.h +@@ -48,23 +48,23 @@ struct dwc3; + #define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget)) + + /* DEPCFG parameter 1 */ +-#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0) ++#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0) + #define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8) + #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9) + #define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10) + #define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11) + #define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13) +-#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16) ++#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16) + #define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24) +-#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25) ++#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25) + #define DWC3_DEPCFG_BULK_BASED (1 << 30) + #define DWC3_DEPCFG_FIFO_BASED (1 << 31) + + /* DEPCFG parameter 0 */ +-#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1) +-#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3) +-#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17) +-#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22) ++#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1) ++#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3) ++#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17) ++#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22) + #define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26) + /* This applies for core versions earlier than 1.94a */ + #define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31) +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index 584e43c9748f..a9142a46ae82 100644 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -125,11 +125,16 @@ int config_ep_by_speed(struct usb_gadget *g, + + ep_found: + /* commit results */ +- _ep->maxpacket = usb_endpoint_maxp(chosen_desc); ++ _ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff; + _ep->desc = chosen_desc; + _ep->comp_desc = NULL; + _ep->maxburst = 0; +- _ep->mult = 0; ++ _ep->mult = 1; ++ ++ if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) || ++ usb_endpoint_xfer_int(_ep->desc))) ++ _ep->mult = ((usb_endpoint_maxp(_ep->desc) & 0x1800) >> 11) + 1; ++ + if (!want_comp_desc) + return 0; + +@@ -146,7 +151,7 @@ ep_found: + switch (usb_endpoint_type(_ep->desc)) { + case USB_ENDPOINT_XFER_ISOC: + /* mult: bits 1:0 of bmAttributes */ +- _ep->mult = comp_desc->bmAttributes & 0x3; ++ _ep->mult = (comp_desc->bmAttributes & 0x3) + 1; + case USB_ENDPOINT_XFER_BULK: + case USB_ENDPOINT_XFER_INT: + _ep->maxburst = comp_desc->bMaxBurst + 1; +diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c +index 3384486c2884..ff30171b6926 100644 +--- a/drivers/usb/gadget/f_acm.c ++++ b/drivers/usb/gadget/f_acm.c +@@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm) + { + struct usb_composite_dev *cdev = acm->port.func.config->cdev; + int status; ++ __le16 serial_state; + + spin_lock(&acm->lock); + if (acm->notify_req) { + DBG(cdev, "acm ttyGS%d serial state %04x\n", + acm->port_num, acm->serial_state); ++ serial_state = cpu_to_le16(acm->serial_state); + status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, +- 0, &acm->serial_state, sizeof(acm->serial_state)); ++ 0, &serial_state, sizeof(acm->serial_state)); + } else { + acm->pending = true; + status = 0; +diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c +index 42a30903d4fd..c9e552bdf051 100644 +--- a/drivers/usb/gadget/inode.c ++++ b/drivers/usb/gadget/inode.c +@@ -1200,7 +1200,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) + /* data and/or status stage for control request */ + } else if (dev->state == STATE_DEV_SETUP) { + +- /* IN DATA+STATUS caller makes len <= wLength */ ++ len = min_t(size_t, len, dev->setup_wLength); + if (dev->setup_in) { + retval = setup_req (dev->gadget->ep0, dev->req, len); + if (retval == 0) { +@@ -1834,10 +1834,12 @@ static struct usb_gadget_driver probe_driver = { + * such as configuration notifications. + */ + +-static int is_valid_config (struct usb_config_descriptor *config) ++static int is_valid_config(struct usb_config_descriptor *config, ++ unsigned int total) + { + return config->bDescriptorType == USB_DT_CONFIG + && config->bLength == USB_DT_CONFIG_SIZE ++ && total >= USB_DT_CONFIG_SIZE + && config->bConfigurationValue != 0 + && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0 + && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0; +@@ -1854,7 +1856,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) + u32 tag; + char *kbuf; + +- if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ++ if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) || ++ (len > PAGE_SIZE * 4)) + return -EINVAL; + + /* we might need to change message format someday */ +@@ -1878,7 +1881,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) + /* full or low speed config */ + dev->config = (void *) kbuf; + total = le16_to_cpu(dev->config->wTotalLength); +- if (!is_valid_config (dev->config) || total >= length) ++ if (!is_valid_config(dev->config, total) || ++ total > length - USB_DT_DEVICE_SIZE) + goto fail; + kbuf += total; + length -= total; +@@ -1887,10 +1891,13 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) + if (kbuf [1] == USB_DT_CONFIG) { + dev->hs_config = (void *) kbuf; + total = le16_to_cpu(dev->hs_config->wTotalLength); +- if (!is_valid_config (dev->hs_config) || total >= length) ++ if (!is_valid_config(dev->hs_config, total) || ++ total > length - USB_DT_DEVICE_SIZE) + goto fail; + kbuf += total; + length -= total; ++ } else { ++ dev->hs_config = NULL; + } + + /* could support multiple configs, using another encoding! */ +diff --git a/drivers/usb/gadget/uvc_video.c b/drivers/usb/gadget/uvc_video.c +index 71e896d4c5ae..43e8c65fd9ed 100644 +--- a/drivers/usb/gadget/uvc_video.c ++++ b/drivers/usb/gadget/uvc_video.c +@@ -240,7 +240,7 @@ uvc_video_alloc_requests(struct uvc_video *video) + + req_size = video->ep->maxpacket + * max_t(unsigned int, video->ep->maxburst, 1) +- * (video->ep->mult + 1); ++ * (video->ep->mult); + + for (i = 0; i < UVC_NUM_REQUESTS; ++i) { + video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL); +diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c +index 0f228c46eeda..ad458ef4b7e9 100644 +--- a/drivers/usb/host/uhci-pci.c ++++ b/drivers/usb/host/uhci-pci.c +@@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd) + if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP) + uhci->wait_for_hp = 1; + ++ /* Intel controllers use non-PME wakeup signalling */ ++ if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL) ++ device_set_run_wake(uhci_dev(uhci), 1); ++ + /* Set up pointers to PCI-specific functions */ + uhci->reset_hc = uhci_pci_reset_hc; + uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc; +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index b07e0754d784..f4348a9ff385 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -925,6 +925,40 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) + xhci->devs[slot_id] = NULL; + } + ++/* ++ * Free a virt_device structure. ++ * If the virt_device added a tt_info (a hub) and has children pointing to ++ * that tt_info, then free the child first. Recursive. ++ * We can't rely on udev at this point to find child-parent relationships. ++ */ ++void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) ++{ ++ struct xhci_virt_device *vdev; ++ struct list_head *tt_list_head; ++ struct xhci_tt_bw_info *tt_info, *next; ++ int i; ++ ++ vdev = xhci->devs[slot_id]; ++ if (!vdev) ++ return; ++ ++ tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); ++ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { ++ /* is this a hub device that added a tt_info to the tts list */ ++ if (tt_info->slot_id == slot_id) { ++ /* are any devices using this tt_info? */ ++ for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) { ++ vdev = xhci->devs[i]; ++ if (vdev && (vdev->tt_info == tt_info)) ++ xhci_free_virt_devices_depth_first( ++ xhci, i); ++ } ++ } ++ } ++ /* we are now at a leaf device */ ++ xhci_free_virt_device(xhci, slot_id); ++} ++ + int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, + struct usb_device *udev, gfp_t flags) + { +@@ -1804,8 +1838,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) + } + } + +- for (i = 1; i < MAX_HC_SLOTS; ++i) +- xhci_free_virt_device(xhci, i); ++ for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--) ++ xhci_free_virt_devices_depth_first(xhci, i); + + if (xhci->segment_pool) + dma_pool_destroy(xhci->segment_pool); +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 2320e20d5be7..cae9881145f6 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -224,6 +224,7 @@ static void xhci_pci_remove(struct pci_dev *dev) + struct xhci_hcd *xhci; + + xhci = hcd_to_xhci(pci_get_drvdata(dev)); ++ xhci->xhc_state |= XHCI_STATE_REMOVING; + if (xhci->shared_hcd) { + usb_remove_hcd(xhci->shared_hcd); + usb_put_hcd(xhci->shared_hcd); +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index 6e70ce976769..411db91b8305 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -174,6 +174,8 @@ static int xhci_plat_remove(struct platform_device *dev) + struct usb_hcd *hcd = platform_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + ++ xhci->xhc_state |= XHCI_STATE_REMOVING; ++ + usb_remove_hcd(xhci->shared_hcd); + usb_put_hcd(xhci->shared_hcd); + +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 507677b9bdc7..0e7dccc271db 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -139,7 +139,8 @@ static int xhci_start(struct xhci_hcd *xhci) + "waited %u microseconds.\n", + XHCI_MAX_HALT_USEC); + if (!ret) +- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING); ++ /* clear state flags. Including dying, halted or removing */ ++ xhci->xhc_state = 0; + + return ret; + } +@@ -2693,7 +2694,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) + if (ret <= 0) + return ret; + xhci = hcd_to_xhci(hcd); +- if (xhci->xhc_state & XHCI_STATE_DYING) ++ if ((xhci->xhc_state & XHCI_STATE_DYING) || ++ (xhci->xhc_state & XHCI_STATE_REMOVING)) + return -ENODEV; + + xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index deb2537ae75c..15e796faa0a8 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1493,6 +1493,7 @@ struct xhci_hcd { + */ + #define XHCI_STATE_DYING (1 << 0) + #define XHCI_STATE_HALTED (1 << 1) ++#define XHCI_STATE_REMOVING (1 << 2) + /* Statistics */ + int error_bitmask; + unsigned int quirks; +diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c +index 20814d528c15..2dd6830fdf7b 100644 +--- a/drivers/usb/serial/io_ti.c ++++ b/drivers/usb/serial/io_ti.c +@@ -1393,8 +1393,7 @@ static int download_fw(struct edgeport_serial *serial) + + dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__); + +- /* return an error on purpose */ +- return -ENODEV; ++ return 1; + } + + stayinbootmode: +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c +index dc55bc254c5c..a1d0fc476146 100644 +--- a/drivers/vfio/pci/vfio_pci.c ++++ b/drivers/vfio/pci/vfio_pci.c +@@ -344,8 +344,9 @@ static long vfio_pci_ioctl(void *device_data, + + } else if (cmd == VFIO_DEVICE_SET_IRQS) { + struct vfio_irq_set hdr; ++ size_t size; + u8 *data = NULL; +- int ret = 0; ++ int max, ret = 0; + + minsz = offsetofend(struct vfio_irq_set, count); + +@@ -353,23 +354,31 @@ static long vfio_pci_ioctl(void *device_data, + return -EFAULT; + + if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS || ++ hdr.count >= (U32_MAX - hdr.start) || + hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | + VFIO_IRQ_SET_ACTION_TYPE_MASK)) + return -EINVAL; + +- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { +- size_t size; +- int max = vfio_pci_get_irq_count(vdev, hdr.index); ++ max = vfio_pci_get_irq_count(vdev, hdr.index); ++ if (hdr.start >= max || hdr.start + hdr.count > max) ++ return -EINVAL; + +- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) +- size = sizeof(uint8_t); +- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD) +- size = sizeof(int32_t); +- else +- return -EINVAL; ++ switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { ++ case VFIO_IRQ_SET_DATA_NONE: ++ size = 0; ++ break; ++ case VFIO_IRQ_SET_DATA_BOOL: ++ size = sizeof(uint8_t); ++ break; ++ case VFIO_IRQ_SET_DATA_EVENTFD: ++ size = sizeof(int32_t); ++ break; ++ default: ++ return -EINVAL; ++ } + +- if (hdr.argsz - minsz < hdr.count * size || +- hdr.start >= max || hdr.start + hdr.count > max) ++ if (size) { ++ if (hdr.argsz - minsz < hdr.count * size) + return -EINVAL; + + data = memdup_user((void __user *)(arg + minsz), +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c +index 4bc704e1b7c7..bfe72a991fa6 100644 +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -468,7 +468,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) + if (!is_irq_none(vdev)) + return -EINVAL; + +- vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); ++ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); + if (!vdev->ctx) + return -ENOMEM; + +diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c +index a92783e480e6..ca55f93b0f62 100644 +--- a/drivers/video/console/fbcon.c ++++ b/drivers/video/console/fbcon.c +@@ -1196,6 +1196,8 @@ static void fbcon_free_font(struct display *p, bool freefont) + p->userfont = 0; + } + ++static void set_vc_hi_font(struct vc_data *vc, bool set); ++ + static void fbcon_deinit(struct vc_data *vc) + { + struct display *p = &fb_display[vc->vc_num]; +@@ -1231,6 +1233,9 @@ finished: + if (free_font) + vc->vc_font.data = NULL; + ++ if (vc->vc_hi_font_mask) ++ set_vc_hi_font(vc, false); ++ + if (!con_is_bound(&fb_con)) + fbcon_exit(); + +@@ -2466,32 +2471,10 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) + return 0; + } + +-static int fbcon_do_set_font(struct vc_data *vc, int w, int h, +- const u8 * data, int userfont) ++/* set/clear vc_hi_font_mask and update vc attrs accordingly */ ++static void set_vc_hi_font(struct vc_data *vc, bool set) + { +- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; +- struct fbcon_ops *ops = info->fbcon_par; +- struct display *p = &fb_display[vc->vc_num]; +- int resize; +- int cnt; +- char *old_data = NULL; +- +- if (CON_IS_VISIBLE(vc) && softback_lines) +- fbcon_set_origin(vc); +- +- resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); +- if (p->userfont) +- old_data = vc->vc_font.data; +- if (userfont) +- cnt = FNTCHARCNT(data); +- else +- cnt = 256; +- vc->vc_font.data = (void *)(p->fontdata = data); +- if ((p->userfont = userfont)) +- REFCOUNT(data)++; +- vc->vc_font.width = w; +- vc->vc_font.height = h; +- if (vc->vc_hi_font_mask && cnt == 256) { ++ if (!set) { + vc->vc_hi_font_mask = 0; + if (vc->vc_can_do_color) { + vc->vc_complement_mask >>= 1; +@@ -2514,7 +2497,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, + ((c & 0xfe00) >> 1) | (c & 0xff); + vc->vc_attr >>= 1; + } +- } else if (!vc->vc_hi_font_mask && cnt == 512) { ++ } else { + vc->vc_hi_font_mask = 0x100; + if (vc->vc_can_do_color) { + vc->vc_complement_mask <<= 1; +@@ -2546,8 +2529,38 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, + } else + vc->vc_video_erase_char = c & ~0x100; + } +- + } ++} ++ ++static int fbcon_do_set_font(struct vc_data *vc, int w, int h, ++ const u8 * data, int userfont) ++{ ++ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; ++ struct fbcon_ops *ops = info->fbcon_par; ++ struct display *p = &fb_display[vc->vc_num]; ++ int resize; ++ int cnt; ++ char *old_data = NULL; ++ ++ if (CON_IS_VISIBLE(vc) && softback_lines) ++ fbcon_set_origin(vc); ++ ++ resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); ++ if (p->userfont) ++ old_data = vc->vc_font.data; ++ if (userfont) ++ cnt = FNTCHARCNT(data); ++ else ++ cnt = 256; ++ vc->vc_font.data = (void *)(p->fontdata = data); ++ if ((p->userfont = userfont)) ++ REFCOUNT(data)++; ++ vc->vc_font.width = w; ++ vc->vc_font.height = h; ++ if (vc->vc_hi_font_mask && cnt == 256) ++ set_vc_hi_font(vc, false); ++ else if (!vc->vc_hi_font_mask && cnt == 512) ++ set_vc_hi_font(vc, true); + + if (resize) { + int cols, rows; +diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c +index cd005c227a23..d026bbb4e501 100644 +--- a/drivers/video/xen-fbfront.c ++++ b/drivers/video/xen-fbfront.c +@@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev, + break; + + case XenbusStateInitWait: +-InitWait: + xenbus_switch_state(dev, XenbusStateConnected); + break; + +@@ -654,7 +653,8 @@ InitWait: + * get Connected twice here. + */ + if (dev->state != XenbusStateConnected) +- goto InitWait; /* no InitWait seen yet, fudge it */ ++ /* no InitWait seen yet, fudge it */ ++ xenbus_switch_state(dev, XenbusStateConnected); + + if (xenbus_scanf(XBT_NIL, info->xbdev->otherend, + "request-update", "%d", &val) < 0) +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c +index 148e8ea1bc96..3d42cde02864 100644 +--- a/drivers/virtio/virtio_balloon.c ++++ b/drivers/virtio/virtio_balloon.c +@@ -349,6 +349,8 @@ static int init_vqs(struct virtio_balloon *vb) + * Prime this virtqueue with one buffer so the hypervisor can + * use it to signal us later. + */ ++ update_balloon_stats(vb); ++ + sg_init_one(&sg, vb->stats, sizeof vb->stats); + if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) + < 0) +diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c +index 8ca1030675a6..131501b3b11b 100644 +--- a/drivers/vme/bridges/vme_ca91cx42.c ++++ b/drivers/vme/bridges/vme_ca91cx42.c +@@ -464,7 +464,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled, + vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]); + pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]); + +- *pci_base = (dma_addr_t)vme_base + pci_offset; ++ *pci_base = (dma_addr_t)*vme_base + pci_offset; + *size = (unsigned long long)((vme_bound - *vme_base) + granularity); + + *enabled = 0; +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c +index 019fc5a68a14..f26f38ccd194 100644 +--- a/fs/btrfs/delayed-inode.c ++++ b/fs/btrfs/delayed-inode.c +@@ -1843,14 +1843,6 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_delayed_node *delayed_node; + int ret = 0; + +- /* +- * we don't do delayed inode updates during log recovery because it +- * leads to enospc problems. This means we also can't do +- * delayed inode refs +- */ +- if (BTRFS_I(inode)->root->fs_info->log_root_recovering) +- return -EAGAIN; +- + delayed_node = btrfs_get_or_create_delayed_node(inode); + if (IS_ERR(delayed_node)) + return PTR_ERR(delayed_node); +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index be7e31a933e5..7f0d9be6e9bc 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -4661,11 +4661,20 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, + lock_page(page); + } + locked_pages++; ++ } ++ /* ++ * We need to firstly lock all pages to make sure that ++ * the uptodate bit of our pages won't be affected by ++ * clear_extent_buffer_uptodate(). ++ */ ++ for (i = start_i; i < num_pages; i++) { ++ page = eb->pages[i]; + if (!PageUptodate(page)) { + num_reads++; + all_uptodate = 0; + } + } ++ + if (all_uptodate) { + if (start_i == 0) + set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); +diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h +index 37e4a72a7d1c..ae4e35bdc2cd 100644 +--- a/fs/cifs/cifs_fs_sb.h ++++ b/fs/cifs/cifs_fs_sb.h +@@ -45,6 +45,9 @@ + #define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */ + #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */ + #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */ ++#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible ++ * root mountable ++ */ + + struct cifs_sb_info { + struct rb_root tlink_tree; +@@ -65,5 +68,6 @@ struct cifs_sb_info { + char *mountdata; /* options received at mount time or via DFS refs */ + struct backing_dev_info bdi; + struct delayed_work prune_tlinks; ++ char *prepath; + }; + #endif /* _CIFS_FS_SB_H */ +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c +index 3752b9f6d9e4..e4e2152b7888 100644 +--- a/fs/cifs/cifsfs.c ++++ b/fs/cifs/cifsfs.c +@@ -565,6 +565,9 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) + char *s, *p; + char sep; + ++ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) ++ return dget(sb->s_root); ++ + full_path = cifs_build_path_to_root(vol, cifs_sb, + cifs_sb_master_tcon(cifs_sb)); + if (full_path == NULL) +@@ -644,10 +647,14 @@ cifs_do_mount(struct file_system_type *fs_type, + cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); + if (cifs_sb->mountdata == NULL) { + root = ERR_PTR(-ENOMEM); +- goto out_cifs_sb; ++ goto out_free; + } + +- cifs_setup_cifs_sb(volume_info, cifs_sb); ++ rc = cifs_setup_cifs_sb(volume_info, cifs_sb); ++ if (rc) { ++ root = ERR_PTR(rc); ++ goto out_free; ++ } + + rc = cifs_mount(cifs_sb, volume_info); + if (rc) { +@@ -655,7 +662,7 @@ cifs_do_mount(struct file_system_type *fs_type, + cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n", + rc); + root = ERR_PTR(rc); +- goto out_mountdata; ++ goto out_free; + } + + mnt_data.vol = volume_info; +@@ -698,9 +705,9 @@ out: + cifs_cleanup_volume_info(volume_info); + return root; + +-out_mountdata: ++out_free: ++ kfree(cifs_sb->prepath); + kfree(cifs_sb->mountdata); +-out_cifs_sb: + kfree(cifs_sb); + out_nls: + unload_nls(volume_info->local_nls); +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h +index f74dfa89c4c4..b7f589918571 100644 +--- a/fs/cifs/cifsglob.h ++++ b/fs/cifs/cifsglob.h +@@ -576,6 +576,8 @@ struct TCP_Server_Info { + #ifdef CONFIG_CIFS_SMB2 + unsigned int max_read; + unsigned int max_write; ++ struct delayed_work reconnect; /* reconnect workqueue job */ ++ struct mutex reconnect_mutex; /* prevent simultaneous reconnects */ + #endif /* CONFIG_CIFS_SMB2 */ + }; + +@@ -750,6 +752,7 @@ cap_unix(struct cifs_ses *ses) + struct cifs_tcon { + struct list_head tcon_list; + int tc_count; ++ struct list_head rlist; /* reconnect list */ + struct list_head openFileList; + struct cifs_ses *ses; /* pointer to session associated with */ + char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */ +@@ -823,7 +826,6 @@ struct cifs_tcon { + bool need_reconnect:1; /* connection reset, tid now invalid */ + #ifdef CONFIG_CIFS_SMB2 + bool print:1; /* set if connection to printer share */ +- bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */ + __u32 capabilities; + __u32 share_flags; + __u32 maximal_access; +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h +index dda188a94332..871a30966736 100644 +--- a/fs/cifs/cifsproto.h ++++ b/fs/cifs/cifsproto.h +@@ -174,7 +174,7 @@ extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, + extern int cifs_readv_from_socket(struct TCP_Server_Info *server, + struct kvec *iov_orig, unsigned int nr_segs, + unsigned int to_read); +-extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, ++extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info, + struct cifs_sb_info *cifs_sb); + extern int cifs_match_super(struct super_block *, void *); + extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info); +@@ -194,6 +194,9 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid, + struct tcon_link *tlink, + struct cifs_pending_open *open); + extern void cifs_del_pending_open(struct cifs_pending_open *open); ++extern void cifs_put_tcp_session(struct TCP_Server_Info *server, ++ int from_reconnect); ++extern void cifs_put_tcon(struct cifs_tcon *tcon); + + #if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) + extern void cifs_dfs_release_automount_timer(void); +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index 7c33afd7d5d3..417ce0a497f4 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -52,6 +52,9 @@ + #include "nterr.h" + #include "rfc1002pdu.h" + #include "fscache.h" ++#ifdef CONFIG_CIFS_SMB2 ++#include "smb2proto.h" ++#endif + + #define CIFS_PORT 445 + #define RFC1001_PORT 139 +@@ -2070,8 +2073,8 @@ cifs_find_tcp_session(struct smb_vol *vol) + return NULL; + } + +-static void +-cifs_put_tcp_session(struct TCP_Server_Info *server) ++void ++cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) + { + struct task_struct *task; + +@@ -2088,6 +2091,19 @@ cifs_put_tcp_session(struct TCP_Server_Info *server) + + cancel_delayed_work_sync(&server->echo); + ++#ifdef CONFIG_CIFS_SMB2 ++ if (from_reconnect) ++ /* ++ * Avoid deadlock here: reconnect work calls ++ * cifs_put_tcp_session() at its end. Need to be sure ++ * that reconnect work does nothing with server pointer after ++ * that step. ++ */ ++ cancel_delayed_work(&server->reconnect); ++ else ++ cancel_delayed_work_sync(&server->reconnect); ++#endif ++ + spin_lock(&GlobalMid_Lock); + server->tcpStatus = CifsExiting; + spin_unlock(&GlobalMid_Lock); +@@ -2158,6 +2174,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info) + INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); + INIT_LIST_HEAD(&tcp_ses->smb_ses_list); + INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); ++#ifdef CONFIG_CIFS_SMB2 ++ INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server); ++ mutex_init(&tcp_ses->reconnect_mutex); ++#endif + memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr, + sizeof(tcp_ses->srcaddr)); + memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr, +@@ -2288,7 +2308,7 @@ cifs_put_smb_ses(struct cifs_ses *ses) + _free_xid(xid); + } + sesInfoFree(ses); +- cifs_put_tcp_session(server); ++ cifs_put_tcp_session(server, 0); + } + + #ifdef CONFIG_KEYS +@@ -2461,7 +2481,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) + mutex_unlock(&ses->session_mutex); + + /* existing SMB ses has a server reference already */ +- cifs_put_tcp_session(server); ++ cifs_put_tcp_session(server, 0); + free_xid(xid); + return ses; + } +@@ -2550,7 +2570,7 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc) + return NULL; + } + +-static void ++void + cifs_put_tcon(struct cifs_tcon *tcon) + { + unsigned int xid; +@@ -2715,6 +2735,24 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) + return 1; + } + ++static int ++match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data) ++{ ++ struct cifs_sb_info *old = CIFS_SB(sb); ++ struct cifs_sb_info *new = mnt_data->cifs_sb; ++ ++ if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) { ++ if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)) ++ return 0; ++ /* The prepath should be null terminated strings */ ++ if (strcmp(new->prepath, old->prepath)) ++ return 0; ++ ++ return 1; ++ } ++ return 0; ++} ++ + int + cifs_match_super(struct super_block *sb, void *data) + { +@@ -2742,7 +2780,8 @@ cifs_match_super(struct super_block *sb, void *data) + + if (!match_server(tcp_srv, volume_info) || + !match_session(ses, volume_info) || +- !match_tcon(tcon, volume_info->UNC)) { ++ !match_tcon(tcon, volume_info->UNC) || ++ !match_prepath(sb, mnt_data)) { + rc = 0; + goto out; + } +@@ -3158,7 +3197,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, + } + } + +-void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, ++int cifs_setup_cifs_sb(struct smb_vol *pvolume_info, + struct cifs_sb_info *cifs_sb) + { + INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); +@@ -3240,6 +3279,15 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, + + if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm)) + cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n"); ++ ++ ++ if (pvolume_info->prepath) { ++ cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL); ++ if (cifs_sb->prepath == NULL) ++ return -ENOMEM; ++ } ++ ++ return 0; + } + + static void +@@ -3410,6 +3458,44 @@ cifs_get_volume_info(char *mount_data, const char *devname) + return volume_info; + } + ++static int ++cifs_are_all_path_components_accessible(struct TCP_Server_Info *server, ++ unsigned int xid, ++ struct cifs_tcon *tcon, ++ struct cifs_sb_info *cifs_sb, ++ char *full_path) ++{ ++ int rc; ++ char *s; ++ char sep, tmp; ++ ++ sep = CIFS_DIR_SEP(cifs_sb); ++ s = full_path; ++ ++ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ""); ++ while (rc == 0) { ++ /* skip separators */ ++ while (*s == sep) ++ s++; ++ if (!*s) ++ break; ++ /* next separator */ ++ while (*s && *s != sep) ++ s++; ++ ++ /* ++ * temporarily null-terminate the path at the end of ++ * the current component ++ */ ++ tmp = *s; ++ *s = 0; ++ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ++ full_path); ++ *s = tmp; ++ } ++ return rc; ++} ++ + int + cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) + { +@@ -3536,6 +3622,17 @@ remote_path_check: + kfree(full_path); + goto mount_fail_check; + } ++ if (rc != -EREMOTE) { ++ rc = cifs_are_all_path_components_accessible(server, ++ xid, tcon, cifs_sb, ++ full_path); ++ if (rc != 0) { ++ cifs_dbg(VFS, "cannot query dirs between root and final path, " ++ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); ++ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; ++ rc = 0; ++ } ++ } + kfree(full_path); + } + +@@ -3599,7 +3696,7 @@ mount_fail_check: + else if (ses) + cifs_put_smb_ses(ses); + else +- cifs_put_tcp_session(server); ++ cifs_put_tcp_session(server, 0); + bdi_destroy(&cifs_sb->bdi); + } + +@@ -3793,6 +3890,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb) + + bdi_destroy(&cifs_sb->bdi); + kfree(cifs_sb->mountdata); ++ kfree(cifs_sb->prepath); + unload_nls(cifs_sb->local_nls); + kfree(cifs_sb); + } +@@ -3932,7 +4030,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) + ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info); + if (IS_ERR(ses)) { + tcon = (struct cifs_tcon *)ses; +- cifs_put_tcp_session(master_tcon->ses->server); ++ cifs_put_tcp_session(master_tcon->ses->server, 0); + goto out; + } + +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index a998c929286f..543124703e05 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -83,6 +83,7 @@ build_path_from_dentry(struct dentry *direntry) + struct dentry *temp; + int namelen; + int dfsplen; ++ int pplen = 0; + char *full_path; + char dirsep; + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); +@@ -94,8 +95,12 @@ build_path_from_dentry(struct dentry *direntry) + dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); + else + dfsplen = 0; ++ ++ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) ++ pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0; ++ + cifs_bp_rename_retry: +- namelen = dfsplen; ++ namelen = dfsplen + pplen; + seq = read_seqbegin(&rename_lock); + rcu_read_lock(); + for (temp = direntry; !IS_ROOT(temp);) { +@@ -136,7 +141,7 @@ cifs_bp_rename_retry: + } + } + rcu_read_unlock(); +- if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { ++ if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) { + cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n", + namelen, dfsplen); + /* presumably this is only possible if racing with a rename +@@ -152,6 +157,17 @@ cifs_bp_rename_retry: + those safely to '/' if any are found in the middle of the prepath */ + /* BB test paths to Windows with '/' in the midst of prepath */ + ++ if (pplen) { ++ int i; ++ ++ cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath); ++ memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1); ++ full_path[dfsplen] = '\\'; ++ for (i = 0; i < pplen-1; i++) ++ if (full_path[dfsplen+1+i] == '/') ++ full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb); ++ } ++ + if (dfsplen) { + strncpy(full_path, tcon->treeName, dfsplen); + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c +index 54304ccae7e7..971e7bea5d80 100644 +--- a/fs/cifs/inode.c ++++ b/fs/cifs/inode.c +@@ -895,12 +895,29 @@ struct inode *cifs_root_iget(struct super_block *sb) + struct inode *inode = NULL; + long rc; + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); ++ char *path = NULL; ++ int len; ++ ++ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) ++ && cifs_sb->prepath) { ++ len = strlen(cifs_sb->prepath); ++ path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL); ++ if (path == NULL) ++ return ERR_PTR(-ENOMEM); ++ path[0] = '/'; ++ memcpy(path+1, cifs_sb->prepath, len); ++ } else { ++ path = kstrdup("", GFP_KERNEL); ++ if (path == NULL) ++ return ERR_PTR(-ENOMEM); ++ } + + xid = get_xid(); ++ convert_delimiter(path, CIFS_DIR_SEP(cifs_sb)); + if (tcon->unix_ext) +- rc = cifs_get_inode_info_unix(&inode, "", sb, xid); ++ rc = cifs_get_inode_info_unix(&inode, path, sb, xid); + else +- rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL); ++ rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL); + + if (!inode) { + inode = ERR_PTR(rc); +@@ -928,6 +945,7 @@ struct inode *cifs_root_iget(struct super_block *sb) + } + + out: ++ kfree(path); + /* can not call macro free_xid here since in a void func + * TODO: This is no longer true + */ +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c +index 610c6c24d41d..d97841e124ba 100644 +--- a/fs/cifs/smb1ops.c ++++ b/fs/cifs/smb1ops.c +@@ -891,6 +891,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile) + return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle; + } + ++static bool ++cifs_can_echo(struct TCP_Server_Info *server) ++{ ++ if (server->tcpStatus == CifsGood) ++ return true; ++ ++ return false; ++} ++ + struct smb_version_operations smb1_operations = { + .send_cancel = send_nt_cancel, + .compare_fids = cifs_compare_fids, +@@ -923,6 +932,7 @@ struct smb_version_operations smb1_operations = { + .get_dfs_refer = CIFSGetDFSRefer, + .qfs_tcon = cifs_qfs_tcon, + .is_path_accessible = cifs_is_path_accessible, ++ .can_echo = cifs_can_echo, + .query_path_info = cifs_query_path_info, + .query_file_info = cifs_query_file_info, + .get_srv_inum = cifs_get_srv_inum, +diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c +index d801f63cddd0..866caf1d2bea 100644 +--- a/fs/cifs/smb2file.c ++++ b/fs/cifs/smb2file.c +@@ -266,7 +266,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile) + * and check it for zero before using. + */ + max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf; +- if (!max_buf) { ++ if (max_buf < sizeof(struct smb2_lock_element)) { + free_xid(xid); + return -EINVAL; + } +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 9dd8c968d94e..04fd3946213f 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -254,7 +254,7 @@ out: + case SMB2_CHANGE_NOTIFY: + case SMB2_QUERY_INFO: + case SMB2_SET_INFO: +- return -EAGAIN; ++ rc = -EAGAIN; + } + unload_nls(nls_codepage); + return rc; +@@ -720,9 +720,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, + else + return -EIO; + +- if (tcon && tcon->bad_network_name) +- return -ENOENT; +- + unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); + if (unc_path == NULL) + return -ENOMEM; +@@ -734,6 +731,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, + return -EINVAL; + } + ++ /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ ++ if (tcon) ++ tcon->tid = 0; ++ + rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); + if (rc) { + kfree(unc_path); +@@ -809,8 +810,6 @@ tcon_exit: + tcon_error_exit: + if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) { + cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); +- if (tcon) +- tcon->bad_network_name = true; + } + goto tcon_exit; + } +@@ -1239,6 +1238,54 @@ smb2_echo_callback(struct mid_q_entry *mid) + add_credits(server, credits_received, CIFS_ECHO_OP); + } + ++void smb2_reconnect_server(struct work_struct *work) ++{ ++ struct TCP_Server_Info *server = container_of(work, ++ struct TCP_Server_Info, reconnect.work); ++ struct cifs_ses *ses; ++ struct cifs_tcon *tcon, *tcon2; ++ struct list_head tmp_list; ++ int tcon_exist = false; ++ ++ /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ ++ mutex_lock(&server->reconnect_mutex); ++ ++ INIT_LIST_HEAD(&tmp_list); ++ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); ++ ++ spin_lock(&cifs_tcp_ses_lock); ++ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { ++ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { ++ if (tcon->need_reconnect) { ++ tcon->tc_count++; ++ list_add_tail(&tcon->rlist, &tmp_list); ++ tcon_exist = true; ++ } ++ } ++ } ++ /* ++ * Get the reference to server struct to be sure that the last call of ++ * cifs_put_tcon() in the loop below won't release the server pointer. ++ */ ++ if (tcon_exist) ++ server->srv_count++; ++ ++ spin_unlock(&cifs_tcp_ses_lock); ++ ++ list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { ++ smb2_reconnect(SMB2_ECHO, tcon); ++ list_del_init(&tcon->rlist); ++ cifs_put_tcon(tcon); ++ } ++ ++ cifs_dbg(FYI, "Reconnecting tcons finished\n"); ++ mutex_unlock(&server->reconnect_mutex); ++ ++ /* now we can safely release srv struct */ ++ if (tcon_exist) ++ cifs_put_tcp_session(server, 1); ++} ++ + int + SMB2_echo(struct TCP_Server_Info *server) + { +@@ -1251,32 +1298,11 @@ SMB2_echo(struct TCP_Server_Info *server) + cifs_dbg(FYI, "In echo request\n"); + + if (server->tcpStatus == CifsNeedNegotiate) { +- struct list_head *tmp, *tmp2; +- struct cifs_ses *ses; +- struct cifs_tcon *tcon; +- +- cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); +- spin_lock(&cifs_tcp_ses_lock); +- list_for_each(tmp, &server->smb_ses_list) { +- ses = list_entry(tmp, struct cifs_ses, smb_ses_list); +- list_for_each(tmp2, &ses->tcon_list) { +- tcon = list_entry(tmp2, struct cifs_tcon, +- tcon_list); +- /* add check for persistent handle reconnect */ +- if (tcon && tcon->need_reconnect) { +- spin_unlock(&cifs_tcp_ses_lock); +- rc = smb2_reconnect(SMB2_ECHO, tcon); +- spin_lock(&cifs_tcp_ses_lock); +- } +- } +- } +- spin_unlock(&cifs_tcp_ses_lock); ++ /* No need to send echo on newly established connections */ ++ queue_delayed_work(cifsiod_wq, &server->reconnect, 0); ++ return rc; + } + +- /* if no session, renegotiate failed above */ +- if (server->tcpStatus == CifsNeedNegotiate) +- return -EIO; +- + rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req); + if (rc) + return rc; +diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h +index 2aa3535e38ce..d0cd166ac887 100644 +--- a/fs/cifs/smb2proto.h ++++ b/fs/cifs/smb2proto.h +@@ -93,6 +93,7 @@ extern void smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock); + extern int smb2_unlock_range(struct cifsFileInfo *cfile, + struct file_lock *flock, const unsigned int xid); + extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile); ++extern void smb2_reconnect_server(struct work_struct *work); + + /* + * SMB2 Worker functions - most of protocol specific implementation details +diff --git a/fs/dcache.c b/fs/dcache.c +index 2d0b9d2f3c43..f4fd9651421c 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -2405,6 +2405,12 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) + dentry->d_parent = dentry; + list_del_init(&dentry->d_child); + anon->d_parent = dparent; ++ if (likely(!d_unhashed(anon))) { ++ hlist_bl_lock(&anon->d_sb->s_anon); ++ __hlist_bl_del(&anon->d_hash); ++ anon->d_hash.pprev = NULL; ++ hlist_bl_unlock(&anon->d_sb->s_anon); ++ } + list_move(&anon->d_child, &dparent->d_subdirs); + + write_seqcount_end(&dentry->d_seq); +@@ -2459,7 +2465,6 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) + * could splice into our tree? */ + __d_materialise_dentry(dentry, alias); + write_sequnlock(&rename_lock); +- __d_drop(alias); + goto found; + } else { + /* Nope, but we must(!) avoid directory +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 5fb975495c2d..1095d77c2a9d 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -73,10 +73,9 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, + csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, + csum_size); + offset += csum_size; +- csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, +- EXT4_INODE_SIZE(inode->i_sb) - +- offset); + } ++ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, ++ EXT4_INODE_SIZE(inode->i_sb) - offset); + } + + return csum; +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 83ed61a6cfcb..cba1fc678eec 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -3063,6 +3063,13 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, + if (ar->pright && start + size - 1 >= ar->lright) + size -= start + size - ar->lright; + ++ /* ++ * Trim allocation request for filesystems with artificially small ++ * groups. ++ */ ++ if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) ++ size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); ++ + end = start + size; + + /* check we don't cross already preallocated blocks */ +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index faa192087033..1fe383f22ab1 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -753,6 +753,7 @@ static void ext4_put_super(struct super_block *sb) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_super_block *es = sbi->s_es; ++ int aborted = 0; + int i, err; + + ext4_unregister_li_request(sb); +@@ -762,9 +763,10 @@ static void ext4_put_super(struct super_block *sb) + destroy_workqueue(sbi->dio_unwritten_wq); + + if (sbi->s_journal) { ++ aborted = is_journal_aborted(sbi->s_journal); + err = jbd2_journal_destroy(sbi->s_journal); + sbi->s_journal = NULL; +- if (err < 0) ++ if ((err < 0) && !aborted) + ext4_abort(sb, "Couldn't clean up the journal"); + } + +@@ -775,7 +777,7 @@ static void ext4_put_super(struct super_block *sb) + ext4_ext_release(sb); + ext4_xattr_put_super(sb); + +- if (!(sb->s_flags & MS_RDONLY)) { ++ if (!(sb->s_flags & MS_RDONLY) && !aborted) { + EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); + es->s_state = cpu_to_le16(sbi->s_mount_state); + } +@@ -3185,10 +3187,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp, + ext4_set_bit(s++, buf); + count++; + } +- for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) { +- ext4_set_bit(EXT4_B2C(sbi, s++), buf); +- count++; ++ j = ext4_bg_num_gdb(sb, grp); ++ if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) { ++ ext4_error(sb, "Invalid number of block group " ++ "descriptor blocks: %d", j); ++ j = EXT4_BLOCKS_PER_GROUP(sb) - s; + } ++ count += j; ++ for (; j > 0; j--) ++ ext4_set_bit(EXT4_B2C(sbi, s++), buf); + } + if (!count) + return 0; +@@ -3291,7 +3298,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + char *orig_data = kstrdup(data, GFP_KERNEL); + struct buffer_head *bh; + struct ext4_super_block *es = NULL; +- struct ext4_sb_info *sbi; ++ struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); + ext4_fsblk_t block; + ext4_fsblk_t sb_block = get_sb_block(&data); + ext4_fsblk_t logical_sb_block; +@@ -3311,16 +3318,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; + ext4_group_t first_not_zeroed; + +- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); +- if (!sbi) +- goto out_free_orig; ++ if ((data && !orig_data) || !sbi) ++ goto out_free_base; + + sbi->s_blockgroup_lock = + kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); +- if (!sbi->s_blockgroup_lock) { +- kfree(sbi); +- goto out_free_orig; +- } ++ if (!sbi->s_blockgroup_lock) ++ goto out_free_base; ++ + sb->s_fs_info = sbi; + sbi->s_sb = sb; + sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; +@@ -3463,11 +3468,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + */ + sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; + +- if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, +- &journal_devnum, &journal_ioprio, 0)) { +- ext4_msg(sb, KERN_WARNING, +- "failed to parse options in superblock: %s", +- sbi->s_es->s_mount_opts); ++ if (sbi->s_es->s_mount_opts[0]) { ++ char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts, ++ sizeof(sbi->s_es->s_mount_opts), ++ GFP_KERNEL); ++ if (!s_mount_opts) ++ goto failed_mount; ++ if (!parse_options(s_mount_opts, sb, &journal_devnum, ++ &journal_ioprio, 0)) { ++ ext4_msg(sb, KERN_WARNING, ++ "failed to parse options in superblock: %s", ++ s_mount_opts); ++ } ++ kfree(s_mount_opts); + } + sbi->s_def_mount_opt = sbi->s_mount_opt; + if (!parse_options((char *) data, sb, &journal_devnum, +@@ -3614,12 +3627,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + + sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); + sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); +- if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0) +- goto cantfind_ext4; + + sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb); + if (sbi->s_inodes_per_block == 0) + goto cantfind_ext4; ++ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || ++ sbi->s_inodes_per_group > blocksize * 8) { ++ ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", ++ sbi->s_blocks_per_group); ++ goto failed_mount; ++ } + sbi->s_itb_per_group = sbi->s_inodes_per_group / + sbi->s_inodes_per_block; + sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb); +@@ -3703,13 +3720,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + } + sbi->s_cluster_ratio = clustersize / blocksize; + +- if (sbi->s_inodes_per_group > blocksize * 8) { +- ext4_msg(sb, KERN_ERR, +- "#inodes per group too big: %lu", +- sbi->s_inodes_per_group); +- goto failed_mount; +- } +- + /* Do we have standard group size of clustersize * 8 blocks ? */ + if (sbi->s_blocks_per_group == clustersize << 3) + set_opt2(sb, STD_GROUP_SIZE); +@@ -3769,6 +3779,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); + db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / + EXT4_DESC_PER_BLOCK(sb); ++ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG)) { ++ if (le32_to_cpu(es->s_first_meta_bg) > db_count) { ++ ext4_msg(sb, KERN_WARNING, ++ "first meta block group too large: %u " ++ "(group descriptor block count %u)", ++ le32_to_cpu(es->s_first_meta_bg), db_count); ++ goto failed_mount; ++ } ++ } + sbi->s_group_desc = ext4_kvmalloc(db_count * + sizeof(struct buffer_head *), + GFP_KERNEL); +@@ -3884,7 +3903,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + */ + if (!test_opt(sb, NOLOAD) && + EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) { +- if (ext4_load_journal(sb, es, journal_devnum)) ++ err = ext4_load_journal(sb, es, journal_devnum); ++ if (err) + goto failed_mount3; + } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && + EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { +@@ -4099,7 +4119,9 @@ no_journal: + } + + ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " +- "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, ++ "Opts: %.*s%s%s", descr, ++ (int) sizeof(sbi->s_es->s_mount_opts), ++ sbi->s_es->s_mount_opts, + *sbi->s_es->s_mount_opts ? "; " : "", orig_data); + + if (es->s_error_count) +@@ -4167,8 +4189,8 @@ failed_mount: + out_fail: + sb->s_fs_info = NULL; + kfree(sbi->s_blockgroup_lock); ++out_free_base: + kfree(sbi); +-out_free_orig: + kfree(orig_data); + return err ? err : ret; + } +diff --git a/fs/fat/inode.c b/fs/fat/inode.c +index 5d4513cb1b3c..04708fab99aa 100644 +--- a/fs/fat/inode.c ++++ b/fs/fat/inode.c +@@ -1193,6 +1193,16 @@ out: + return 0; + } + ++static void fat_dummy_inode_init(struct inode *inode) ++{ ++ /* Initialize this dummy inode to work as no-op. */ ++ MSDOS_I(inode)->mmu_private = 0; ++ MSDOS_I(inode)->i_start = 0; ++ MSDOS_I(inode)->i_logstart = 0; ++ MSDOS_I(inode)->i_attrs = 0; ++ MSDOS_I(inode)->i_pos = 0; ++} ++ + static int fat_read_root(struct inode *inode) + { + struct super_block *sb = inode->i_sb; +@@ -1503,12 +1513,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, + fat_inode = new_inode(sb); + if (!fat_inode) + goto out_fail; +- MSDOS_I(fat_inode)->i_pos = 0; ++ fat_dummy_inode_init(fat_inode); + sbi->fat_inode = fat_inode; + + fsinfo_inode = new_inode(sb); + if (!fsinfo_inode) + goto out_fail; ++ fat_dummy_inode_init(fsinfo_inode); + fsinfo_inode->i_ino = MSDOS_FSINFO_INO; + sbi->fsinfo_inode = fsinfo_inode; + insert_inode_hash(fsinfo_inode); +diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c +index b631c9043460..9aaa6db3e4ba 100644 +--- a/fs/gfs2/dir.c ++++ b/fs/gfs2/dir.c +@@ -763,7 +763,7 @@ static int get_first_leaf(struct gfs2_inode *dip, u32 index, + int error; + + error = get_leaf_nr(dip, index, &leaf_no); +- if (!error) ++ if (!IS_ERR_VALUE(error)) + error = get_leaf(dip, leaf_no, bh_out); + + return error; +@@ -974,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) + + index = name->hash >> (32 - dip->i_depth); + error = get_leaf_nr(dip, index, &leaf_no); +- if (error) ++ if (IS_ERR_VALUE(error)) + return error; + + /* Get the old leaf block */ +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 4e5f332f15d9..db7d89cea2ce 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -169,7 +169,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/fs/ioprio.c b/fs/ioprio.c +index 31666c92b46a..563435684c3c 100644 +--- a/fs/ioprio.c ++++ b/fs/ioprio.c +@@ -149,8 +149,10 @@ static int get_task_ioprio(struct task_struct *p) + if (ret) + goto out; + ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); ++ task_lock(p); + if (p->io_context) + ret = p->io_context->ioprio; ++ task_unlock(p); + out: + return ret; + } +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c +index 21b828c713cc..54e958125c1f 100644 +--- a/fs/jbd2/transaction.c ++++ b/fs/jbd2/transaction.c +@@ -1655,7 +1655,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) + + __blist_del_buffer(list, jh); + jh->b_jlist = BJ_None; +- if (test_clear_buffer_jbddirty(bh)) ++ if (transaction && is_journal_aborted(transaction->t_journal)) ++ clear_buffer_jbddirty(bh); ++ else if (test_clear_buffer_jbddirty(bh)) + mark_buffer_dirty(bh); /* Expose it to the VM */ + } + +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index e093e73178b7..48038e7adb1e 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -1435,6 +1435,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, + switch (err) { + case -ENOENT: + d_add(dentry, NULL); ++ nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); + break; + case -EISDIR: + case -ENOTDIR: +diff --git a/fs/nfs/file.c b/fs/nfs/file.c +index a87a44f84113..f8bd4ea2a891 100644 +--- a/fs/nfs/file.c ++++ b/fs/nfs/file.c +@@ -419,7 +419,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, + */ + if (!PageUptodate(page)) { + unsigned pglen = nfs_page_length(page); +- unsigned end = offset + len; ++ unsigned end = offset + copied; + + if (pglen == 0) { + zero_user_segments(page, 0, offset, +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index c2b89a1a403b..c1148e87d533 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -4047,7 +4047,7 @@ out: + */ + static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) + { +- struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; ++ struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, }; + struct nfs_getaclargs args = { + .fh = NFS_FH(inode), + .acl_pages = pages, +@@ -4061,13 +4061,9 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu + .rpc_argp = &args, + .rpc_resp = &res, + }; +- unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); ++ unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; + int ret = -ENOMEM, i; + +- /* As long as we're doing a round trip to the server anyway, +- * let's be prepared for a page of acl data. */ +- if (npages == 0) +- npages = 1; + if (npages > ARRAY_SIZE(pages)) + return -ERANGE; + +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c +index 8016892f3f05..879b56d2f722 100644 +--- a/fs/nfsd/nfssvc.c ++++ b/fs/nfsd/nfssvc.c +@@ -627,6 +627,37 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr) + return nfserr; + } + ++/* ++ * A write procedure can have a large argument, and a read procedure can ++ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and ++ * reply that can both be larger than a page. The xdr code has taken ++ * advantage of this assumption to be a sloppy about bounds checking in ++ * some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that ++ * problem, we enforce these assumptions here: ++ */ ++static bool nfs_request_too_big(struct svc_rqst *rqstp, ++ struct svc_procedure *proc) ++{ ++ /* ++ * The ACL code has more careful bounds-checking and is not ++ * susceptible to this problem: ++ */ ++ if (rqstp->rq_prog != NFS_PROGRAM) ++ return false; ++ /* ++ * Ditto NFSv4 (which can in theory have argument and reply both ++ * more than a page): ++ */ ++ if (rqstp->rq_vers >= 4) ++ return false; ++ /* The reply will be small, we're OK: */ ++ if (proc->pc_xdrressize > 0 && ++ proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE)) ++ return false; ++ ++ return rqstp->rq_arg.len > PAGE_SIZE; ++} ++ + int + nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) + { +@@ -639,6 +670,11 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) + rqstp->rq_vers, rqstp->rq_proc); + proc = rqstp->rq_procinfo; + ++ if (nfs_request_too_big(rqstp, proc)) { ++ dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers); ++ *statp = rpc_garbage_args; ++ return 1; ++ } + /* + * Give the xdr decoder a chance to change this if it wants + * (necessary in the NFSv4.0 compound case) +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c +index b294deb27d17..cf88dd4f8f36 100644 +--- a/fs/ocfs2/dlmglue.c ++++ b/fs/ocfs2/dlmglue.c +@@ -3264,6 +3264,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, + mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, + lockres->l_level, new_level); + ++ /* ++ * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always ++ * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that ++ * we can recover correctly from node failure. Otherwise, we may get ++ * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set. ++ */ ++ if (!ocfs2_is_o2cb_active() && ++ lockres->l_ops->flags & LOCK_TYPE_USES_LVB) ++ lvb = 1; ++ + if (lvb) + dlm_flags |= DLM_LKF_VALBLK; + +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c +index 496af7fd87d5..86ed0f4aefc4 100644 +--- a/fs/ocfs2/file.c ++++ b/fs/ocfs2/file.c +@@ -1104,6 +1104,7 @@ out: + int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) + { + int status = 0, size_change; ++ int inode_locked = 0; + struct inode *inode = dentry->d_inode; + struct super_block *sb = inode->i_sb; + struct ocfs2_super *osb = OCFS2_SB(sb); +@@ -1149,6 +1150,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) + mlog_errno(status); + goto bail_unlock_rw; + } ++ inode_locked = 1; + + if (size_change && attr->ia_size != i_size_read(inode)) { + status = inode_newsize_ok(inode, attr->ia_size); +@@ -1229,7 +1231,10 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) + bail_commit: + ocfs2_commit_trans(osb, handle); + bail_unlock: +- ocfs2_inode_unlock(inode, 1); ++ if (status) { ++ ocfs2_inode_unlock(inode, 1); ++ inode_locked = 0; ++ } + bail_unlock_rw: + if (size_change) + ocfs2_rw_unlock(inode, 1); +@@ -1245,6 +1250,8 @@ bail: + if (status < 0) + mlog_errno(status); + } ++ if (inode_locked) ++ ocfs2_inode_unlock(inode, 1); + + return status; + } +diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c +index 39abf89697ed..88610b3cbc04 100644 +--- a/fs/ocfs2/stackglue.c ++++ b/fs/ocfs2/stackglue.c +@@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl"; + */ + static struct ocfs2_stack_plugin *active_stack; + ++inline int ocfs2_is_o2cb_active(void) ++{ ++ return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB); ++} ++EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active); ++ + static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) + { + struct ocfs2_stack_plugin *p; +diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h +index 1ec56fdb8d0d..fa49d8a1dc7b 100644 +--- a/fs/ocfs2/stackglue.h ++++ b/fs/ocfs2/stackglue.h +@@ -289,4 +289,7 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p + int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin); + void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin); + ++/* In ocfs2_downconvert_lock(), we need to know which stack we are using */ ++int ocfs2_is_o2cb_active(void); ++ + #endif /* STACKGLUE_H */ +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index b86db1236c7c..972cdc282b1a 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -279,11 +279,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) + + /* We don't show the stack guard page in /proc/maps */ + start = vma->vm_start; +- if (stack_guard_page_start(vma, start)) +- start += PAGE_SIZE; + end = vma->vm_end; +- if (stack_guard_page_end(vma, end)) +- end -= PAGE_SIZE; + + seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", + start, +diff --git a/fs/splice.c b/fs/splice.c +index 2ffa7b0c62fd..ce6ffe94ba26 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -215,6 +215,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, + buf->len = spd->partial[page_nr].len; + buf->private = spd->partial[page_nr].private; + buf->ops = spd->ops; ++ buf->flags = 0; + if (spd->flags & SPLICE_F_GIFT) + buf->flags |= PIPE_BUF_FLAG_GIFT; + +diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c +index cfbb4c1b2f17..d738a7b842da 100644 +--- a/fs/xfs/xfs_aops.c ++++ b/fs/xfs/xfs_aops.c +@@ -158,6 +158,12 @@ xfs_setfilesize( + rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], + 0, 1, _THIS_IP_); + ++ /* we abort the update if there was an IO error */ ++ if (ioend->io_error) { ++ xfs_trans_cancel(tp, 0); ++ return ioend->io_error; ++ } ++ + xfs_ilock(ip, XFS_ILOCK_EXCL); + isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size); + if (!isize) { +@@ -213,14 +219,17 @@ xfs_end_io( + ioend->io_error = -EIO; + goto done; + } +- if (ioend->io_error) +- goto done; + + /* + * For unwritten extents we need to issue transactions to convert a + * range to normal written extens after the data I/O has finished. ++ * Detecting and handling completion IO errors is done individually ++ * for each case as different cleanup operations need to be performed ++ * on error. + */ + if (ioend->io_type == XFS_IO_UNWRITTEN) { ++ if (ioend->io_error) ++ goto done; + error = xfs_iomap_write_unwritten(ip, ioend->io_offset, + ioend->io_size); + } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) { +diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c +index 1b2472a46e46..8ff89db9e663 100644 +--- a/fs/xfs/xfs_buf.c ++++ b/fs/xfs/xfs_buf.c +@@ -428,6 +428,7 @@ retry: + out_free_pages: + for (i = 0; i < bp->b_page_count; i++) + __free_page(bp->b_pages[i]); ++ bp->b_flags &= ~_XBF_PAGES; + return error; + } + +diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c +index 7cf5e4eafe28..8325cb234d96 100644 +--- a/fs/xfs/xfs_log_recover.c ++++ b/fs/xfs/xfs_log_recover.c +@@ -3382,6 +3382,7 @@ xlog_recover_clear_agi_bucket( + agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); + offset = offsetof(xfs_agi_t, agi_unlinked) + + (sizeof(xfs_agino_t) * bucket); ++ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF); + xfs_trans_log_buf(tp, agibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + +diff --git a/include/linux/capability.h b/include/linux/capability.h +index 9b4378af414c..eeb43c4816e5 100644 +--- a/include/linux/capability.h ++++ b/include/linux/capability.h +@@ -40,8 +40,6 @@ struct inode; + struct dentry; + struct user_namespace; + +-struct user_namespace *current_user_ns(void); +- + extern const kernel_cap_t __cap_empty_set; + extern const kernel_cap_t __cap_init_eff_set; + +diff --git a/include/linux/cred.h b/include/linux/cred.h +index 6c58dd7cb9ac..cd3fb73dc421 100644 +--- a/include/linux/cred.h ++++ b/include/linux/cred.h +@@ -345,7 +345,10 @@ extern struct user_namespace init_user_ns; + #ifdef CONFIG_USER_NS + #define current_user_ns() (current_cred_xxx(user_ns)) + #else +-#define current_user_ns() (&init_user_ns) ++static inline struct user_namespace *current_user_ns(void) ++{ ++ return &init_user_ns; ++} + #endif + + +diff --git a/include/linux/log2.h b/include/linux/log2.h +index fd7ff3d91e6a..f38fae23bdac 100644 +--- a/include/linux/log2.h ++++ b/include/linux/log2.h +@@ -16,12 +16,6 @@ + #include + + /* +- * deal with unrepresentable constant logarithms +- */ +-extern __attribute__((const, noreturn)) +-int ____ilog2_NaN(void); +- +-/* + * non-constant log of base 2 calculators + * - the arch may override these in asm/bitops.h if they can be implemented + * more efficiently than using fls() and fls64() +@@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) + #define ilog2(n) \ + ( \ + __builtin_constant_p(n) ? ( \ +- (n) < 1 ? ____ilog2_NaN() : \ ++ (n) < 2 ? 0 : \ + (n) & (1ULL << 63) ? 63 : \ + (n) & (1ULL << 62) ? 62 : \ + (n) & (1ULL << 61) ? 61 : \ +@@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) + (n) & (1ULL << 4) ? 4 : \ + (n) & (1ULL << 3) ? 3 : \ + (n) & (1ULL << 2) ? 2 : \ +- (n) & (1ULL << 1) ? 1 : \ +- (n) & (1ULL << 0) ? 0 : \ +- ____ilog2_NaN() \ +- ) : \ ++ 1 ) : \ + (sizeof(n) <= 4) ? \ + __ilog2_u32(n) : \ + __ilog2_u64(n) \ +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 55590f4fe110..d16f5243f952 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -1069,34 +1069,6 @@ int set_page_dirty(struct page *page); + int set_page_dirty_lock(struct page *page); + int clear_page_dirty_for_io(struct page *page); + +-/* Is the vma a continuation of the stack vma above it? */ +-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) +-{ +- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +-} +- +-static inline int stack_guard_page_start(struct vm_area_struct *vma, +- unsigned long addr) +-{ +- return (vma->vm_flags & VM_GROWSDOWN) && +- (vma->vm_start == addr) && +- !vma_growsdown(vma->vm_prev, addr); +-} +- +-/* Is the vma a continuation of the stack vma below it? */ +-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) +-{ +- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); +-} +- +-static inline int stack_guard_page_end(struct vm_area_struct *vma, +- unsigned long addr) +-{ +- return (vma->vm_flags & VM_GROWSUP) && +- (vma->vm_end == addr) && +- !vma_growsup(vma->vm_next, addr); +-} +- + extern pid_t + vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); + +@@ -1622,6 +1594,7 @@ unsigned long ra_submit(struct file_ra_state *ra, + struct address_space *mapping, + struct file *filp); + ++extern unsigned long stack_guard_gap; + /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ + extern int expand_stack(struct vm_area_struct *vma, unsigned long address); + +@@ -1650,6 +1623,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m + return vma; + } + ++static inline unsigned long vm_start_gap(struct vm_area_struct *vma) ++{ ++ unsigned long vm_start = vma->vm_start; ++ ++ if (vma->vm_flags & VM_GROWSDOWN) { ++ vm_start -= stack_guard_gap; ++ if (vm_start > vma->vm_start) ++ vm_start = 0; ++ } ++ return vm_start; ++} ++ ++static inline unsigned long vm_end_gap(struct vm_area_struct *vma) ++{ ++ unsigned long vm_end = vma->vm_end; ++ ++ if (vma->vm_flags & VM_GROWSUP) { ++ vm_end += stack_guard_gap; ++ if (vm_end < vma->vm_end) ++ vm_end = -PAGE_SIZE; ++ } ++ return vm_end; ++} ++ + static inline unsigned long vma_pages(struct vm_area_struct *vma) + { + return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; +diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h +index 7b8fc73810ad..f2c785958165 100644 +--- a/include/linux/nfs4.h ++++ b/include/linux/nfs4.h +@@ -223,7 +223,7 @@ enum nfsstat4 { + + static inline bool seqid_mutating_err(u32 err) + { +- /* rfc 3530 section 8.1.5: */ ++ /* See RFC 7530, section 9.1.7 */ + switch (err) { + case NFS4ERR_STALE_CLIENTID: + case NFS4ERR_STALE_STATEID: +@@ -232,6 +232,7 @@ static inline bool seqid_mutating_err(u32 err) + case NFS4ERR_BADXDR: + case NFS4ERR_RESOURCE: + case NFS4ERR_NOFILEHANDLE: ++ case NFS4ERR_MOVED: + return false; + }; + return true; +diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h +index e52958d7c2d1..3018528bd1bf 100644 +--- a/include/uapi/linux/can.h ++++ b/include/uapi/linux/can.h +@@ -158,5 +158,6 @@ struct can_filter { + }; + + #define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */ ++#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */ + + #endif /* CAN_H */ +diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h +index b2cc0cd9c4d9..1a9de73e845d 100644 +--- a/include/uapi/linux/packet_diag.h ++++ b/include/uapi/linux/packet_diag.h +@@ -63,7 +63,7 @@ struct packet_diag_mclist { + __u32 pdmc_count; + __u16 pdmc_type; + __u16 pdmc_alen; +- __u8 pdmc_addr[MAX_ADDR_LEN]; ++ __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */ + }; + + struct packet_diag_ring { +diff --git a/ipc/shm.c b/ipc/shm.c +index 08b14f69d6cf..ddfad445242c 100644 +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -1041,8 +1041,8 @@ out_unlock1: + * "raddr" thing points to kernel space, and there has to be a wrapper around + * this. + */ +-long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, +- unsigned long shmlba) ++long do_shmat(int shmid, char __user *shmaddr, int shmflg, ++ ulong *raddr, unsigned long shmlba) + { + struct shmid_kernel *shp; + unsigned long addr; +@@ -1063,8 +1063,13 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, + goto out; + else if ((addr = (ulong)shmaddr)) { + if (addr & (shmlba - 1)) { +- if (shmflg & SHM_RND) +- addr &= ~(shmlba - 1); /* round down */ ++ /* ++ * Round down to the nearest multiple of shmlba. ++ * For sane do_mmap_pgoff() parameters, avoid ++ * round downs that trigger nil-page and MAP_FIXED. ++ */ ++ if ((shmflg & SHM_RND) && addr >= shmlba) ++ addr &= ~(shmlba - 1); + else + #ifndef __ARCH_FORCE_SHMLBA + if (addr & ~PAGE_MASK) +diff --git a/kernel/futex.c b/kernel/futex.c +index edc4beae4df1..3f7dd29f4d4d 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -2413,7 +2413,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + { + struct hrtimer_sleeper timeout, *to = NULL; + struct rt_mutex_waiter rt_waiter; +- struct rt_mutex *pi_mutex = NULL; + struct futex_hash_bucket *hb; + union futex_key key2 = FUTEX_KEY_INIT; + struct futex_q q = futex_q_init; +@@ -2494,6 +2493,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + if (q.pi_state && (q.pi_state->owner != current)) { + spin_lock(q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); ++ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) ++ rt_mutex_unlock(&q.pi_state->pi_mutex); + /* + * Drop the reference to the pi state which + * the requeue_pi() code acquired for us. +@@ -2502,6 +2503,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + spin_unlock(q.lock_ptr); + } + } else { ++ struct rt_mutex *pi_mutex; ++ + /* + * We have been woken up by futex_unlock_pi(), a timeout, or a + * signal. futex_unlock_pi() will not destroy the lock_ptr nor +@@ -2525,18 +2528,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + if (res) + ret = (res < 0) ? res : 0; + ++ /* ++ * If fixup_pi_state_owner() faulted and was unable to handle ++ * the fault, unlock the rt_mutex and return the fault to ++ * userspace. ++ */ ++ if (ret && rt_mutex_owner(pi_mutex) == current) ++ rt_mutex_unlock(pi_mutex); ++ + /* Unqueue and drop the lock. */ + unqueue_me_pi(&q); + } + +- /* +- * If fixup_pi_state_owner() faulted and was unable to handle the +- * fault, unlock the rt_mutex and return the fault to userspace. +- */ +- if (ret == -EFAULT) { +- if (pi_mutex && rt_mutex_owner(pi_mutex) == current) +- rt_mutex_unlock(pi_mutex); +- } else if (ret == -EINTR) { ++ if (ret == -EINTR) { + /* + * We've already been requeued, but cannot restart by calling + * futex_lock_pi() directly. We could restart this syscall, but +@@ -2902,4 +2906,4 @@ static int __init futex_init(void) + + return 0; + } +-__initcall(futex_init); ++core_initcall(futex_init); +diff --git a/kernel/padata.c b/kernel/padata.c +index 072f4ee4eb89..0925ccf92c7f 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -190,19 +190,20 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) + + reorder = &next_queue->reorder; + ++ spin_lock(&reorder->lock); + if (!list_empty(&reorder->list)) { + padata = list_entry(reorder->list.next, + struct padata_priv, list); + +- spin_lock(&reorder->lock); + list_del_init(&padata->list); + atomic_dec(&pd->reorder_objects); +- spin_unlock(&reorder->lock); + + pd->processed++; + ++ spin_unlock(&reorder->lock); + goto out; + } ++ spin_unlock(&reorder->lock); + + if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { + padata = ERR_PTR(-ENODATA); +diff --git a/kernel/printk.c b/kernel/printk.c +index ee8f6be7d8a9..8acc98aafa6e 100644 +--- a/kernel/printk.c ++++ b/kernel/printk.c +@@ -1270,7 +1270,7 @@ static void call_console_drivers(int level, const char *text, size_t len) + { + struct console *con; + +- trace_console(text, len); ++ trace_console_rcuidle(text, len); + + if (level >= console_loglevel && !ignore_loglevel) + return; +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 506e56ec56a9..0892cfa0fe34 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4745,7 +4745,8 @@ void show_state_filter(unsigned long state_filter) + touch_all_softlockup_watchdogs(); + + #ifdef CONFIG_SCHED_DEBUG +- sysrq_sched_debug_show(); ++ if (!state_filter) ++ sysrq_sched_debug_show(); + #endif + rcu_read_unlock(); + /* +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 4fd49fe1046d..430725da89d0 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -2224,6 +2224,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int + break; + if (neg) + continue; ++ val = convmul * val / convdiv; + if ((min && val < *min) || (max && val > *max)) + continue; + *i = val; +diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c +index 19ee339a1d0d..6f27814e1323 100644 +--- a/kernel/time/tick-broadcast.c ++++ b/kernel/time/tick-broadcast.c +@@ -775,6 +775,9 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) + { + int cpu = smp_processor_id(); + ++ if (!bc) ++ return; ++ + /* Set it up only once ! */ + if (bc->event_handler != tick_handle_oneshot_broadcast) { + int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; +diff --git a/mm/filemap.c b/mm/filemap.c +index 725a10043244..72130787db0a 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -1123,6 +1123,11 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos, + + cond_resched(); + find_page: ++ if (fatal_signal_pending(current)) { ++ error = -EINTR; ++ goto out; ++ } ++ + page = find_get_page(mapping, index); + if (!page) { + page_cache_sync_readahead(mapping, +diff --git a/mm/memory.c b/mm/memory.c +index 2ca2ee113ea2..8b4975d1f167 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -1654,12 +1654,6 @@ no_page_table: + return page; + } + +-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) +-{ +- return stack_guard_page_start(vma, addr) || +- stack_guard_page_end(vma, addr+PAGE_SIZE); +-} +- + /** + * __get_user_pages() - pin user pages in memory + * @tsk: task_struct of target task +@@ -1827,11 +1821,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + int ret; + unsigned int fault_flags = 0; + +- /* For mlock, just skip the stack guard page. */ +- if (foll_flags & FOLL_MLOCK) { +- if (stack_guard_page(vma, start)) +- goto next_page; +- } + if (foll_flags & FOLL_WRITE) + fault_flags |= FAULT_FLAG_WRITE; + if (nonblocking) +@@ -3192,40 +3181,6 @@ out_release: + } + + /* +- * This is like a special single-page "expand_{down|up}wards()", +- * except we must first make sure that 'address{-|+}PAGE_SIZE' +- * doesn't hit another vma. +- */ +-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) +-{ +- address &= PAGE_MASK; +- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { +- struct vm_area_struct *prev = vma->vm_prev; +- +- /* +- * Is there a mapping abutting this one below? +- * +- * That's only ok if it's the same stack mapping +- * that has gotten split.. +- */ +- if (prev && prev->vm_end == address) +- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; +- +- return expand_downwards(vma, address - PAGE_SIZE); +- } +- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { +- struct vm_area_struct *next = vma->vm_next; +- +- /* As VM_GROWSDOWN but s/below/above/ */ +- if (next && next->vm_start == address + PAGE_SIZE) +- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; +- +- return expand_upwards(vma, address + PAGE_SIZE); +- } +- return 0; +-} +- +-/* + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte mapped but not yet locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. +@@ -3244,10 +3199,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + if (vma->vm_flags & VM_SHARED) + return VM_FAULT_SIGBUS; + +- /* Check if we need to add a guard page to the stack */ +- if (check_stack_guard_page(vma, address) < 0) +- return VM_FAULT_SIGSEGV; +- + /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index 7f1bf93fa87f..9deb93decc9a 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -1205,7 +1205,7 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) + } + + /* +- * Confirm all pages in a range [start, end) is belongs to the same zone. ++ * Confirm all pages in a range [start, end) belong to the same zone. + */ + static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) + { +@@ -1213,9 +1213,9 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) + struct zone *zone = NULL; + struct page *page; + int i; +- for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn); ++ for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); + pfn < end_pfn; +- pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) { ++ pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { + /* Make sure the memory section is present first */ + if (!present_section_nr(pfn_to_section_nr(pfn))) + continue; +@@ -1234,7 +1234,11 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) + zone = page_zone(page); + } + } +- return 1; ++ ++ if (zone) ++ return 1; ++ else ++ return 0; + } + + /* +diff --git a/mm/mempolicy.c b/mm/mempolicy.c +index b2061bb5af73..e57c967a1af0 100644 +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -1537,7 +1537,6 @@ asmlinkage long compat_sys_get_mempolicy(int __user *policy, + asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, + compat_ulong_t maxnode) + { +- long err = 0; + unsigned long __user *nm = NULL; + unsigned long nr_bits, alloc_size; + DECLARE_BITMAP(bm, MAX_NUMNODES); +@@ -1546,14 +1545,13 @@ asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, + alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; + + if (nmask) { +- err = compat_get_bitmap(bm, nmask, nr_bits); ++ if (compat_get_bitmap(bm, nmask, nr_bits)) ++ return -EFAULT; + nm = compat_alloc_user_space(alloc_size); +- err |= copy_to_user(nm, bm, alloc_size); ++ if (copy_to_user(nm, bm, alloc_size)) ++ return -EFAULT; + } + +- if (err) +- return -EFAULT; +- + return sys_set_mempolicy(mode, nm, nr_bits+1); + } + +@@ -1561,7 +1559,6 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, + compat_ulong_t mode, compat_ulong_t __user *nmask, + compat_ulong_t maxnode, compat_ulong_t flags) + { +- long err = 0; + unsigned long __user *nm = NULL; + unsigned long nr_bits, alloc_size; + nodemask_t bm; +@@ -1570,14 +1567,13 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, + alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; + + if (nmask) { +- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); ++ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) ++ return -EFAULT; + nm = compat_alloc_user_space(alloc_size); +- err |= copy_to_user(nm, nodes_addr(bm), alloc_size); ++ if (copy_to_user(nm, nodes_addr(bm), alloc_size)) ++ return -EFAULT; + } + +- if (err) +- return -EFAULT; +- + return sys_mbind(start, len, mode, nm, nr_bits+1, flags); + } + +diff --git a/mm/mmap.c b/mm/mmap.c +index 70ff9b41c970..3c4e4d7ae54e 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -263,6 +263,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) + unsigned long rlim, retval; + unsigned long newbrk, oldbrk; + struct mm_struct *mm = current->mm; ++ struct vm_area_struct *next; + unsigned long min_brk; + bool populate; + +@@ -308,7 +309,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) + } + + /* Check against existing mmap mappings. */ +- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) ++ next = find_vma(mm, oldbrk); ++ if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) + goto out; + + /* Ok, looks good - let it rip. */ +@@ -331,10 +333,22 @@ out: + + static long vma_compute_subtree_gap(struct vm_area_struct *vma) + { +- unsigned long max, subtree_gap; +- max = vma->vm_start; +- if (vma->vm_prev) +- max -= vma->vm_prev->vm_end; ++ unsigned long max, prev_end, subtree_gap; ++ ++ /* ++ * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we ++ * allow two stack_guard_gaps between them here, and when choosing ++ * an unmapped area; whereas when expanding we only require one. ++ * That's a little inconsistent, but keeps the code here simpler. ++ */ ++ max = vm_start_gap(vma); ++ if (vma->vm_prev) { ++ prev_end = vm_end_gap(vma->vm_prev); ++ if (max > prev_end) ++ max -= prev_end; ++ else ++ max = 0; ++ } + if (vma->vm_rb.rb_left) { + subtree_gap = rb_entry(vma->vm_rb.rb_left, + struct vm_area_struct, vm_rb)->rb_subtree_gap; +@@ -418,7 +432,7 @@ void validate_mm(struct mm_struct *mm) + list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) + anon_vma_interval_tree_verify(avc); + vma_unlock_anon_vma(vma); +- highest_address = vma->vm_end; ++ highest_address = vm_end_gap(vma); + vma = vma->vm_next; + i++; + } +@@ -586,7 +600,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, + if (vma->vm_next) + vma_gap_update(vma->vm_next); + else +- mm->highest_vm_end = vma->vm_end; ++ mm->highest_vm_end = vm_end_gap(vma); + + /* + * vma->vm_prev wasn't known when we followed the rbtree to find the +@@ -835,7 +849,7 @@ again: remove_next = 1 + (end > next->vm_end); + vma_gap_update(vma); + if (end_changed) { + if (!next) +- mm->highest_vm_end = end; ++ mm->highest_vm_end = vm_end_gap(vma); + else if (!adjust_next) + vma_gap_update(next); + } +@@ -878,7 +892,7 @@ again: remove_next = 1 + (end > next->vm_end); + else if (next) + vma_gap_update(next); + else +- mm->highest_vm_end = end; ++ WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); + } + if (insert && file) + uprobe_mmap(insert); +@@ -1670,7 +1684,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) + + while (true) { + /* Visit left subtree if it looks promising */ +- gap_end = vma->vm_start; ++ gap_end = vm_start_gap(vma); + if (gap_end >= low_limit && vma->vm_rb.rb_left) { + struct vm_area_struct *left = + rb_entry(vma->vm_rb.rb_left, +@@ -1681,12 +1695,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) + } + } + +- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; ++ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; + check_current: + /* Check if current node has a suitable gap */ + if (gap_start > high_limit) + return -ENOMEM; +- if (gap_end >= low_limit && gap_end - gap_start >= length) ++ if (gap_end >= low_limit && ++ gap_end > gap_start && gap_end - gap_start >= length) + goto found; + + /* Visit right subtree if it looks promising */ +@@ -1708,8 +1723,8 @@ check_current: + vma = rb_entry(rb_parent(prev), + struct vm_area_struct, vm_rb); + if (prev == vma->vm_rb.rb_left) { +- gap_start = vma->vm_prev->vm_end; +- gap_end = vma->vm_start; ++ gap_start = vm_end_gap(vma->vm_prev); ++ gap_end = vm_start_gap(vma); + goto check_current; + } + } +@@ -1773,7 +1788,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) + + while (true) { + /* Visit right subtree if it looks promising */ +- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; ++ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; + if (gap_start <= high_limit && vma->vm_rb.rb_right) { + struct vm_area_struct *right = + rb_entry(vma->vm_rb.rb_right, +@@ -1786,10 +1801,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) + + check_current: + /* Check if current node has a suitable gap */ +- gap_end = vma->vm_start; ++ gap_end = vm_start_gap(vma); + if (gap_end < low_limit) + return -ENOMEM; +- if (gap_start <= high_limit && gap_end - gap_start >= length) ++ if (gap_start <= high_limit && ++ gap_end > gap_start && gap_end - gap_start >= length) + goto found; + + /* Visit left subtree if it looks promising */ +@@ -1812,7 +1828,7 @@ check_current: + struct vm_area_struct, vm_rb); + if (prev == vma->vm_rb.rb_right) { + gap_start = vma->vm_prev ? +- vma->vm_prev->vm_end : 0; ++ vm_end_gap(vma->vm_prev) : 0; + goto check_current; + } + } +@@ -1850,7 +1866,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) + { + struct mm_struct *mm = current->mm; +- struct vm_area_struct *vma; ++ struct vm_area_struct *vma, *prev; + struct vm_unmapped_area_info info; + + if (len > TASK_SIZE - mmap_min_addr) +@@ -1861,9 +1877,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + + if (addr) { + addr = PAGE_ALIGN(addr); +- vma = find_vma(mm, addr); ++ vma = find_vma_prev(mm, addr, &prev); + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma)) && ++ (!prev || addr >= vm_end_gap(prev))) + return addr; + } + +@@ -1895,7 +1912,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) + { +- struct vm_area_struct *vma; ++ struct vm_area_struct *vma, *prev; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + struct vm_unmapped_area_info info; +@@ -1910,9 +1927,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); +- vma = find_vma(mm, addr); ++ vma = find_vma_prev(mm, addr, &prev); + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma)) && ++ (!prev || addr >= vm_end_gap(prev))) + return addr; + } + +@@ -2052,21 +2070,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, + * update accounting. This is shared with both the + * grow-up and grow-down cases. + */ +-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) ++static int acct_stack_growth(struct vm_area_struct *vma, ++ unsigned long size, unsigned long grow) + { + struct mm_struct *mm = vma->vm_mm; + struct rlimit *rlim = current->signal->rlim; +- unsigned long new_start, actual_size; ++ unsigned long new_start; + + /* address space limit tests */ + if (!may_expand_vm(mm, grow)) + return -ENOMEM; + + /* Stack limit test */ +- actual_size = size; +- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) +- actual_size -= PAGE_SIZE; +- if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) ++ if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + return -ENOMEM; + + /* mlock limit tests */ +@@ -2107,32 +2123,43 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + */ + int expand_upwards(struct vm_area_struct *vma, unsigned long address) + { +- int error; ++ struct vm_area_struct *next; ++ unsigned long gap_addr; ++ int error = 0; + + if (!(vma->vm_flags & VM_GROWSUP)) + return -EFAULT; + +- /* +- * We must make sure the anon_vma is allocated +- * so that the anon_vma locking is not a noop. +- */ ++ /* Guard against exceeding limits of the address space. */ ++ address &= PAGE_MASK; ++ if (address >= TASK_SIZE) ++ return -ENOMEM; ++ address += PAGE_SIZE; ++ ++ /* Enforce stack_guard_gap */ ++ gap_addr = address + stack_guard_gap; ++ ++ /* Guard against overflow */ ++ if (gap_addr < address || gap_addr > TASK_SIZE) ++ gap_addr = TASK_SIZE; ++ ++ next = vma->vm_next; ++ if (next && next->vm_start < gap_addr) { ++ if (!(next->vm_flags & VM_GROWSUP)) ++ return -ENOMEM; ++ /* Check that both stack segments have the same anon_vma? */ ++ } ++ ++ /* We must make sure the anon_vma is allocated. */ + if (unlikely(anon_vma_prepare(vma))) + return -ENOMEM; +- vma_lock_anon_vma(vma); + + /* + * vma->vm_start/vm_end cannot change under us because the caller + * is required to hold the mmap_sem in read mode. We need the + * anon_vma lock to serialize against concurrent expand_stacks. +- * Also guard against wrapping around to address 0. + */ +- if (address < PAGE_ALIGN(address+4)) +- address = PAGE_ALIGN(address+4); +- else { +- vma_unlock_anon_vma(vma); +- return -ENOMEM; +- } +- error = 0; ++ vma_lock_anon_vma(vma); + + /* Somebody else might have raced and expanded it already */ + if (address > vma->vm_end) { +@@ -2163,7 +2190,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) + if (vma->vm_next) + vma_gap_update(vma->vm_next); + else +- vma->vm_mm->highest_vm_end = address; ++ vma->vm_mm->highest_vm_end = vm_end_gap(vma); + spin_unlock(&vma->vm_mm->page_table_lock); + + perf_event_mmap(vma); +@@ -2183,27 +2210,36 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) + int expand_downwards(struct vm_area_struct *vma, + unsigned long address) + { ++ struct vm_area_struct *prev; ++ unsigned long gap_addr; + int error; + +- /* +- * We must make sure the anon_vma is allocated +- * so that the anon_vma locking is not a noop. +- */ +- if (unlikely(anon_vma_prepare(vma))) +- return -ENOMEM; +- + address &= PAGE_MASK; + error = security_mmap_addr(address); + if (error) + return error; + +- vma_lock_anon_vma(vma); ++ /* Enforce stack_guard_gap */ ++ gap_addr = address - stack_guard_gap; ++ if (gap_addr > address) ++ return -ENOMEM; ++ prev = vma->vm_prev; ++ if (prev && prev->vm_end > gap_addr) { ++ if (!(prev->vm_flags & VM_GROWSDOWN)) ++ return -ENOMEM; ++ /* Check that both stack segments have the same anon_vma? */ ++ } ++ ++ /* We must make sure the anon_vma is allocated. */ ++ if (unlikely(anon_vma_prepare(vma))) ++ return -ENOMEM; + + /* + * vma->vm_start/vm_end cannot change under us because the caller + * is required to hold the mmap_sem in read mode. We need the + * anon_vma lock to serialize against concurrent expand_stacks. + */ ++ vma_lock_anon_vma(vma); + + /* Somebody else might have raced and expanded it already */ + if (address < vma->vm_start) { +@@ -2245,28 +2281,25 @@ int expand_downwards(struct vm_area_struct *vma, + return error; + } + +-/* +- * Note how expand_stack() refuses to expand the stack all the way to +- * abut the next virtual mapping, *unless* that mapping itself is also +- * a stack mapping. We want to leave room for a guard page, after all +- * (the guard page itself is not added here, that is done by the +- * actual page faulting logic) +- * +- * This matches the behavior of the guard page logic (see mm/memory.c: +- * check_stack_guard_page()), which only allows the guard page to be +- * removed under these circumstances. +- */ ++/* enforced gap between the expanding stack and other mappings. */ ++unsigned long stack_guard_gap = 256UL<rc); + goto free_and_error; + } ++ if (rsize < count) { ++ pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize); ++ count = rsize; ++ } + + p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count); + +diff --git a/net/can/bcm.c b/net/can/bcm.c +index 725ce812cfbc..6e0a88d9c554 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -706,14 +706,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id, + + static void bcm_remove_op(struct bcm_op *op) + { +- hrtimer_cancel(&op->timer); +- hrtimer_cancel(&op->thrtimer); +- +- if (op->tsklet.func) +- tasklet_kill(&op->tsklet); ++ if (op->tsklet.func) { ++ while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) || ++ test_bit(TASKLET_STATE_RUN, &op->tsklet.state) || ++ hrtimer_active(&op->timer)) { ++ hrtimer_cancel(&op->timer); ++ tasklet_kill(&op->tsklet); ++ } ++ } + +- if (op->thrtsklet.func) +- tasklet_kill(&op->thrtsklet); ++ if (op->thrtsklet.func) { ++ while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) || ++ test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) || ++ hrtimer_active(&op->thrtimer)) { ++ hrtimer_cancel(&op->thrtimer); ++ tasklet_kill(&op->thrtsklet); ++ } ++ } + + if ((op->frames) && (op->frames != &op->sframe)) + kfree(op->frames); +diff --git a/net/can/raw.c b/net/can/raw.c +index f4d86485571f..602be0e07a02 100644 +--- a/net/can/raw.c ++++ b/net/can/raw.c +@@ -470,6 +470,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, + if (optlen % sizeof(struct can_filter) != 0) + return -EINVAL; + ++ if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter)) ++ return -EINVAL; ++ + count = optlen / sizeof(struct can_filter); + + if (count > 1) { +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c +index 025ced8fbb57..c99cfde87bd6 100644 +--- a/net/ceph/messenger.c ++++ b/net/ceph/messenger.c +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -472,11 +473,16 @@ static int ceph_tcp_connect(struct ceph_connection *con) + { + struct sockaddr_storage *paddr = &con->peer_addr.in_addr; + struct socket *sock; ++ unsigned int noio_flag; + int ret; + + BUG_ON(con->sock); ++ ++ /* sock_create_kern() allocates with GFP_KERNEL */ ++ noio_flag = memalloc_noio_save(); + ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, + IPPROTO_TCP, &sock); ++ memalloc_noio_restore(noio_flag); + if (ret) + return ret; + sock->sk->sk_allocation = GFP_NOFS; +@@ -1969,6 +1975,19 @@ static int process_connect(struct ceph_connection *con) + + dout("process_connect on %p tag %d\n", con, (int)con->in_tag); + ++ if (con->auth_reply_buf) { ++ /* ++ * Any connection that defines ->get_authorizer() ++ * should also define ->verify_authorizer_reply(). ++ * See get_connect_authorizer(). ++ */ ++ ret = con->ops->verify_authorizer_reply(con, 0); ++ if (ret < 0) { ++ con->error_msg = "bad authorize reply"; ++ return ret; ++ } ++ } ++ + switch (con->in_reply.tag) { + case CEPH_MSGR_TAG_FEATURES: + pr_err("%s%lld %s feature set mismatch," +diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c +index a974dfec4bf1..55bb6909edc8 100644 +--- a/net/core/drop_monitor.c ++++ b/net/core/drop_monitor.c +@@ -80,6 +80,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) + struct nlattr *nla; + struct sk_buff *skb; + unsigned long flags; ++ void *msg_header; + + al = sizeof(struct net_dm_alert_msg); + al += dm_hit_limit * sizeof(struct net_dm_drop_point); +@@ -87,21 +88,41 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) + + skb = genlmsg_new(al, GFP_KERNEL); + +- if (skb) { +- genlmsg_put(skb, 0, 0, &net_drop_monitor_family, +- 0, NET_DM_CMD_ALERT); +- nla = nla_reserve(skb, NLA_UNSPEC, +- sizeof(struct net_dm_alert_msg)); +- msg = nla_data(nla); +- memset(msg, 0, al); +- } else { +- mod_timer(&data->send_timer, jiffies + HZ / 10); ++ if (!skb) ++ goto err; ++ ++ msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family, ++ 0, NET_DM_CMD_ALERT); ++ if (!msg_header) { ++ nlmsg_free(skb); ++ skb = NULL; ++ goto err; ++ } ++ nla = nla_reserve(skb, NLA_UNSPEC, ++ sizeof(struct net_dm_alert_msg)); ++ if (!nla) { ++ nlmsg_free(skb); ++ skb = NULL; ++ goto err; + } ++ msg = nla_data(nla); ++ memset(msg, 0, al); ++ goto out; + ++err: ++ mod_timer(&data->send_timer, jiffies + HZ / 10); ++out: + spin_lock_irqsave(&data->lock, flags); + swap(data->skb, skb); + spin_unlock_irqrestore(&data->lock, flags); + ++ if (skb) { ++ struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; ++ struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh); ++ ++ genlmsg_end(skb, genlmsg_data(gnlh)); ++ } ++ + return skb; + } + +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index b49e8bafab17..fb82e8a5b006 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -872,7 +872,8 @@ static void neigh_probe(struct neighbour *neigh) + if (skb) + skb = skb_copy(skb, GFP_ATOMIC); + write_unlock(&neigh->lock); +- neigh->ops->solicit(neigh, skb); ++ if (neigh->ops->solicit) ++ neigh->ops->solicit(neigh, skb); + atomic_inc(&neigh->probes); + kfree_skb(skb); + } +diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c +index f053198e730c..5e3a7302f774 100644 +--- a/net/dccp/ccids/ccid2.c ++++ b/net/dccp/ccids/ccid2.c +@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk) + for (i = 0; i < hc->tx_seqbufc; i++) + kfree(hc->tx_seqbuf[i]); + hc->tx_seqbufc = 0; ++ dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); + } + + static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) +diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c +index 662071b249cc..e47b15dd9b39 100644 +--- a/net/dccp/minisocks.c ++++ b/net/dccp/minisocks.c +@@ -140,6 +140,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk, + /* It is still raw copy of parent, so invalidate + * destructor and make plain sk_free() */ + newsk->sk_destruct = NULL; ++ bh_unlock_sock(newsk); + sk_free(newsk); + return NULL; + } +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index 4556cd25acde..017b4792cd44 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -957,7 +957,8 @@ static void nl_fib_input(struct sk_buff *skb) + + net = sock_net(skb->sk); + nlh = nlmsg_hdr(skb); +- if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len || ++ if (skb->len < nlmsg_total_size(sizeof(*frn)) || ++ skb->len < nlh->nlmsg_len || + nlmsg_len(nlh) < sizeof(*frn)) + return; + +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c +index b0178b04bd81..4572ee7c71f4 100644 +--- a/net/ipv4/igmp.c ++++ b/net/ipv4/igmp.c +@@ -196,9 +196,14 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay) + static void igmp_gq_start_timer(struct in_device *in_dev) + { + int tv = net_random() % in_dev->mr_maxdelay; ++ unsigned long exp = jiffies + tv + 2; ++ ++ if (in_dev->mr_gq_running && ++ time_after_eq(exp, (in_dev->mr_gq_timer).expires)) ++ return; + + in_dev->mr_gq_running = 1; +- if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2)) ++ if (!mod_timer(&in_dev->mr_gq_timer, exp)) + in_dev_hold(in_dev); + } + +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index f3b15bb7fbec..0680058fe693 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -5335,6 +5335,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) + struct inet_connection_sock *icsk = inet_csk(sk); + + tcp_set_state(sk, TCP_ESTABLISHED); ++ icsk->icsk_ack.lrcvtime = tcp_time_stamp; + + if (skb != NULL) { + icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); +@@ -5535,7 +5536,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, + * to stand against the temptation 8) --ANK + */ + inet_csk_schedule_ack(sk); +- icsk->icsk_ack.lrcvtime = tcp_time_stamp; + tcp_enter_quickack_mode(sk); + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, + TCP_DELACK_MAX, TCP_RTO_MAX); +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index 195c618aba6c..270840f5ee01 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -270,10 +270,13 @@ EXPORT_SYMBOL(tcp_v4_connect); + */ + void tcp_v4_mtu_reduced(struct sock *sk) + { +- struct dst_entry *dst; + struct inet_sock *inet = inet_sk(sk); +- u32 mtu = tcp_sk(sk)->mtu_info; ++ struct dst_entry *dst; ++ u32 mtu; + ++ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) ++ return; ++ mtu = tcp_sk(sk)->mtu_info; + dst = inet_csk_update_pmtu(sk, mtu); + if (!dst) + return; +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c +index 0f0178827259..914a55db8031 100644 +--- a/net/ipv4/tcp_minisocks.c ++++ b/net/ipv4/tcp_minisocks.c +@@ -405,6 +405,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, + newtp->srtt = 0; + newtp->mdev = TCP_TIMEOUT_INIT; + newicsk->icsk_rto = TCP_TIMEOUT_INIT; ++ newicsk->icsk_ack.lrcvtime = tcp_time_stamp; + + newtp->packets_out = 0; + newtp->retrans_out = 0; +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 1f2f6b5406ee..8729a934124f 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -2154,9 +2154,11 @@ u32 __tcp_select_window(struct sock *sk) + int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); + int window; + +- if (mss > full_space) ++ if (unlikely(mss > full_space)) { + mss = full_space; +- ++ if (mss <= 0) ++ return 0; ++ } + if (free_space < (full_space >> 1)) { + icsk->icsk_ack.quick = 0; + +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c +index 4b85e6f636c9..722367a6d817 100644 +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -201,7 +201,8 @@ void tcp_delack_timer_handler(struct sock *sk) + + sk_mem_reclaim_partial(sk); + +- if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) ++ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || ++ !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) + goto out; + + if (time_after(icsk->icsk_ack.timeout, jiffies)) { +@@ -480,7 +481,8 @@ void tcp_write_timer_handler(struct sock *sk) + struct inet_connection_sock *icsk = inet_csk(sk); + int event; + +- if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) ++ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || ++ !icsk->icsk_pending) + goto out; + + if (time_after(icsk->icsk_timeout, jiffies)) { +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index a3e2c34d5b7a..9c4aa2e22448 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -4768,8 +4768,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf) + struct net_device *dev; + struct inet6_dev *idev; + +- rcu_read_lock(); +- for_each_netdev_rcu(net, dev) { ++ for_each_netdev(net, dev) { + idev = __in6_dev_get(dev); + if (idev) { + int changed = (!idev->cnf.disable_ipv6) ^ (!newf); +@@ -4778,7 +4777,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf) + dev_disable_change(idev); + } + } +- rcu_read_unlock(); + } + + static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 1ce7ea1f40b7..17a88ebcc845 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -740,7 +740,6 @@ slow_path: + * Fragment the datagram. + */ + +- *prevhdr = NEXTHDR_FRAGMENT; + hroom = LL_RESERVED_SPACE(rt->dst.dev); + troom = rt->dst.dev->needed_tailroom; + +@@ -748,6 +747,8 @@ slow_path: + * Keep copying data until we run out. + */ + while(left > 0) { ++ u8 *fragnexthdr_offset; ++ + len = left; + /* IF: it doesn't fit, use 'mtu' - the data space left */ + if (len > mtu) +@@ -794,6 +795,10 @@ slow_path: + */ + skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); + ++ fragnexthdr_offset = skb_network_header(frag); ++ fragnexthdr_offset += prevhdr - skb_network_header(skb); ++ *fragnexthdr_offset = NEXTHDR_FRAGMENT; ++ + /* + * Build fragment header. + */ +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c +index 8344f686335d..2026c5b4342d 100644 +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -777,7 +777,8 @@ failure: + * Delete a VIF entry + */ + +-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) ++static int mif6_delete(struct mr6_table *mrt, int vifi, int notify, ++ struct list_head *head) + { + struct mif_device *v; + struct net_device *dev; +@@ -823,7 +824,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) + dev->ifindex, &in6_dev->cnf); + } + +- if (v->flags & MIFF_REGISTER) ++ if ((v->flags & MIFF_REGISTER) && !notify) + unregister_netdevice_queue(dev, head); + + dev_put(dev); +@@ -1333,7 +1334,6 @@ static int ip6mr_device_event(struct notifier_block *this, + struct mr6_table *mrt; + struct mif_device *v; + int ct; +- LIST_HEAD(list); + + if (event != NETDEV_UNREGISTER) + return NOTIFY_DONE; +@@ -1342,10 +1342,9 @@ static int ip6mr_device_event(struct notifier_block *this, + v = &mrt->vif6_table[0]; + for (ct = 0; ct < mrt->maxvif; ct++, v++) { + if (v->dev == dev) +- mif6_delete(mrt, ct, &list); ++ mif6_delete(mrt, ct, 1, NULL); + } + } +- unregister_netdevice_many(&list); + + return NOTIFY_DONE; + } +@@ -1550,7 +1549,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all) + for (i = 0; i < mrt->maxvif; i++) { + if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC)) + continue; +- mif6_delete(mrt, i, &list); ++ mif6_delete(mrt, i, 0, &list); + } + unregister_netdevice_many(&list); + +@@ -1703,7 +1702,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns + if (copy_from_user(&mifi, optval, sizeof(mifi_t))) + return -EFAULT; + rtnl_lock(); +- ret = mif6_delete(mrt, mifi, NULL); ++ ret = mif6_delete(mrt, mifi, 0, NULL); + rtnl_unlock(); + return ret; + +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index 989bd7987985..c7ce2be09d90 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -1133,7 +1133,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) + spin_lock_bh(&sk->sk_receive_queue.lock); + skb = skb_peek(&sk->sk_receive_queue); + if (skb != NULL) +- amount = skb->tail - skb->transport_header; ++ amount = skb->len; + spin_unlock_bh(&sk->sk_receive_queue.lock); + return put_user(amount, (int __user *)arg); + } +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index fb5010c27a22..244892ce560e 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -1676,6 +1676,8 @@ static int ip6_route_del(struct fib6_config *cfg) + continue; + if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) + continue; ++ if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol) ++ continue; + dst_hold(&rt->dst); + read_unlock_bh(&table->tb6_lock); + +diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c +index 7152624ed5f1..26ccd65cdcab 100644 +--- a/net/irda/irqueue.c ++++ b/net/irda/irqueue.c +@@ -385,9 +385,6 @@ EXPORT_SYMBOL(hashbin_new); + * for deallocating this structure if it's complex. If not the user can + * just supply kfree, which should take care of the job. + */ +-#ifdef CONFIG_LOCKDEP +-static int hashbin_lock_depth = 0; +-#endif + int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) + { + irda_queue_t* queue; +@@ -398,22 +395,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) + IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); + + /* Synchronize */ +- if ( hashbin->hb_type & HB_LOCK ) { +- spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags, +- hashbin_lock_depth++); +- } ++ if (hashbin->hb_type & HB_LOCK) ++ spin_lock_irqsave(&hashbin->hb_spinlock, flags); + + /* + * Free the entries in the hashbin, TODO: use hashbin_clear when + * it has been shown to work + */ + for (i = 0; i < HASHBIN_SIZE; i ++ ) { +- queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); +- while (queue ) { +- if (free_func) +- (*free_func)(queue); +- queue = dequeue_first( +- (irda_queue_t**) &hashbin->hb_queue[i]); ++ while (1) { ++ queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); ++ ++ if (!queue) ++ break; ++ ++ if (free_func) { ++ if (hashbin->hb_type & HB_LOCK) ++ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); ++ free_func(queue); ++ if (hashbin->hb_type & HB_LOCK) ++ spin_lock_irqsave(&hashbin->hb_spinlock, flags); ++ } + } + } + +@@ -422,12 +424,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) + hashbin->magic = ~HB_MAGIC; + + /* Release lock */ +- if ( hashbin->hb_type & HB_LOCK) { ++ if (hashbin->hb_type & HB_LOCK) + spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); +-#ifdef CONFIG_LOCKDEP +- hashbin_lock_depth--; +-#endif +- } + + /* + * Free the hashbin structure +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c +index f4d30b509cdf..1f65095c3217 100644 +--- a/net/l2tp/l2tp_ip.c ++++ b/net/l2tp/l2tp_ip.c +@@ -382,7 +382,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) + drop: + IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); + kfree_skb(skb); +- return -1; ++ return 0; + } + + /* Userspace will call sendmsg() on the tunnel socket to send L2TP +diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c +index efb510e6f206..a1f47b8d8013 100644 +--- a/net/mac80211/pm.c ++++ b/net/mac80211/pm.c +@@ -114,6 +114,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) + break; + } + ++ flush_delayed_work(&sdata->dec_tailroom_needed_wk); + drv_remove_interface(local, sdata); + } + +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 4b1734a14ff9..0bbb3470fa78 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -2242,7 +2242,7 @@ static int packet_snd(struct socket *sock, + int vnet_hdr_len; + struct packet_sock *po = pkt_sk(sk); + unsigned short gso_type = 0; +- int hlen, tlen; ++ int hlen, tlen, linear; + int extra_len = 0; + + /* +@@ -2336,7 +2336,9 @@ static int packet_snd(struct socket *sock, + err = -ENOBUFS; + hlen = LL_RESERVED_SPACE(dev); + tlen = dev->needed_tailroom; +- skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len, ++ linear = vnet_hdr.hdr_len; ++ linear = max(linear, min_t(int, len, dev->hard_header_len)); ++ skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, + msg->msg_flags & MSG_DONTWAIT, &err); + if (skb == NULL) + goto out_unlock; +@@ -2556,7 +2558,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, + int addr_len) + { + struct sock *sk = sock->sk; +- char name[15]; ++ char name[sizeof(uaddr->sa_data) + 1]; + struct net_device *dev; + int err = -ENODEV; + +@@ -2566,7 +2568,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, + + if (addr_len != sizeof(struct sockaddr)) + return -EINVAL; +- strlcpy(name, uaddr->sa_data, sizeof(name)); ++ /* uaddr->sa_data comes from the userspace, it's not guaranteed to be ++ * zero-terminated. ++ */ ++ memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); ++ name[sizeof(uaddr->sa_data)] = 0; + + dev = dev_get_by_name(sock_net(sk), name); + if (dev) +@@ -3181,6 +3187,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv + return -EBUSY; + if (copy_from_user(&val, optval, sizeof(val))) + return -EFAULT; ++ if (val > INT_MAX) ++ return -EINVAL; + po->tp_reserve = val; + return 0; + } +@@ -3666,8 +3674,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) + goto out; + if (po->tp_version >= TPACKET_V3 && +- (int)(req->tp_block_size - +- BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) ++ req->tp_block_size <= ++ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv)) + goto out; + if (unlikely(req->tp_frame_size < po->tp_hdrlen + + po->tp_reserve)) +@@ -3678,6 +3686,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + rb->frames_per_block = req->tp_block_size/req->tp_frame_size; + if (unlikely(rb->frames_per_block <= 0)) + goto out; ++ if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) ++ goto out; + if (unlikely((rb->frames_per_block * req->tp_block_nr) != + req->tp_frame_nr)) + goto out; +@@ -3694,7 +3704,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + */ + if (!tx_ring) + init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); +- break; ++ break; + default: + break; + } +diff --git a/net/rds/cong.c b/net/rds/cong.c +index e5b65acd650b..cec4c4e6d905 100644 +--- a/net/rds/cong.c ++++ b/net/rds/cong.c +@@ -285,7 +285,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port) + i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; + off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; + +- __set_bit_le(off, (void *)map->m_page_addrs[i]); ++ set_bit_le(off, (void *)map->m_page_addrs[i]); + } + + void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) +@@ -299,7 +299,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) + i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; + off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; + +- __clear_bit_le(off, (void *)map->m_page_addrs[i]); ++ clear_bit_le(off, (void *)map->m_page_addrs[i]); + } + + static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index 4178cf387d21..4358ae85cdad 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -6181,6 +6181,9 @@ int sctp_inet_listen(struct socket *sock, int backlog) + if (sock->state != SS_UNCONNECTED) + goto out; + ++ if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED)) ++ goto out; ++ + /* If backlog is zero, disable listening. */ + if (!backlog) { + if (sctp_sstate(sk, CLOSED)) +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index 8f118c7c19e1..4b18115c0543 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -977,6 +977,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + unsigned int hash; + struct unix_address *addr; + struct hlist_head *list; ++ struct path path = { NULL, NULL }; + + err = -EINVAL; + if (sunaddr->sun_family != AF_UNIX) +@@ -992,9 +993,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + goto out; + addr_len = err; + ++ if (sun_path[0]) { ++ umode_t mode = S_IFSOCK | ++ (SOCK_INODE(sock)->i_mode & ~current_umask()); ++ err = unix_mknod(sun_path, mode, &path); ++ if (err) { ++ if (err == -EEXIST) ++ err = -EADDRINUSE; ++ goto out; ++ } ++ } ++ + err = mutex_lock_interruptible(&u->readlock); + if (err) +- goto out; ++ goto out_put; + + err = -EINVAL; + if (u->addr) +@@ -1011,16 +1023,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + atomic_set(&addr->refcnt, 1); + + if (sun_path[0]) { +- struct path path; +- umode_t mode = S_IFSOCK | +- (SOCK_INODE(sock)->i_mode & ~current_umask()); +- err = unix_mknod(sun_path, mode, &path); +- if (err) { +- if (err == -EEXIST) +- err = -EADDRINUSE; +- unix_release_addr(addr); +- goto out_up; +- } + addr->hash = UNIX_HASH_SIZE; + hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1); + spin_lock(&unix_table_lock); +@@ -1047,6 +1049,9 @@ out_unlock: + spin_unlock(&unix_table_lock); + out_up: + mutex_unlock(&u->readlock); ++out_put: ++ if (err) ++ path_put(&path); + out: + return err; + } +diff --git a/samples/seccomp/bpf-helper.h b/samples/seccomp/bpf-helper.h +index 38ee70f3cd5b..1d8de9edd858 100644 +--- a/samples/seccomp/bpf-helper.h ++++ b/samples/seccomp/bpf-helper.h +@@ -138,7 +138,7 @@ union arg64 { + #define ARG_32(idx) \ + BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)) + +-/* Loads hi into A and lo in X */ ++/* Loads lo into M[0] and hi into M[1] and A */ + #define ARG_64(idx) \ + BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \ + BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \ +@@ -153,88 +153,107 @@ union arg64 { + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \ + jt + +-/* Checks the lo, then swaps to check the hi. A=lo,X=hi */ ++#define JA32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \ ++ jt ++ ++#define JGE32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \ ++ jt ++ ++#define JGT32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \ ++ jt ++ ++#define JLE32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \ ++ jt ++ ++#define JLT32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \ ++ jt ++ ++/* ++ * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both ++ * A and M[1]. This invariant is kept by restoring A if necessary. ++ */ + #define JEQ64(lo, hi, jt) \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ + BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ /* if (lo != arg.lo) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + + #define JNE64(lo, hi, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 5, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ /* if (hi != arg.hi) goto MATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo != arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ +- +-#define JA32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \ +- jt ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + + #define JA64(lo, hi, jt) \ ++ /* if (hi & arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo & arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + +-#define JGE32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \ +- jt +- +-#define JLT32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \ +- jt +- +-/* Shortcut checking if hi > arg.hi. */ + #define JGE64(lo, hi, jt) \ ++ /* if (hi > arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo >= arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ +- jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ +- +-#define JLT64(lo, hi, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ +- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ +- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + +-#define JGT32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \ +- jt +- +-#define JLE32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \ +- jt +- +-/* Check hi > args.hi first, then do the GE checking */ + #define JGT64(lo, hi, jt) \ ++ /* if (hi > arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo > arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + + #define JLE64(lo, hi, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 6, 0), \ +- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ /* if (hi < arg.hi) goto MATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo <= arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ ++ jt, \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) ++ ++#define JLT64(lo, hi, jt) \ ++ /* if (hi < arg.hi) goto MATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo < arg.lo) goto MATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + + #define LOAD_SYSCALL_NR \ + BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \ +diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c +index 3ae28db5a64f..c8031c1c52ca 100644 +--- a/security/apparmor/audit.c ++++ b/security/apparmor/audit.c +@@ -212,7 +212,8 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp, + + if (sa->aad->type == AUDIT_APPARMOR_KILL) + (void)send_sig_info(SIGKILL, NULL, +- sa->aad->tsk ? sa->aad->tsk : current); ++ sa->type == LSM_AUDIT_DATA_TASK && sa->aad->tsk ? ++ sa->aad->tsk : current); + + if (sa->aad->type == AUDIT_APPARMOR_ALLOWED) + return complain_error(sa->aad->error); +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c +index 859abdaac1ea..8405a0428b67 100644 +--- a/security/apparmor/domain.c ++++ b/security/apparmor/domain.c +@@ -441,7 +441,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) + new_profile = aa_get_profile(ns->unconfined); + info = "ux fallback"; + } else { +- error = -ENOENT; ++ error = -EACCES; + info = "profile not found"; + } + } +diff --git a/security/apparmor/file.c b/security/apparmor/file.c +index fdaa50cb1876..a4f7f1a5a798 100644 +--- a/security/apparmor/file.c ++++ b/security/apparmor/file.c +@@ -110,7 +110,8 @@ int aa_audit_file(struct aa_profile *profile, struct file_perms *perms, + int type = AUDIT_APPARMOR_AUTO; + struct common_audit_data sa; + struct apparmor_audit_data aad = {0,}; +- sa.type = LSM_AUDIT_DATA_NONE; ++ sa.type = LSM_AUDIT_DATA_TASK; ++ sa.u.tsk = NULL; + sa.aad = &aad; + aad.op = op, + aad.fs.request = request; +diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h +index 775843e7f984..b5029c77c3e3 100644 +--- a/security/apparmor/include/match.h ++++ b/security/apparmor/include/match.h +@@ -57,6 +57,7 @@ struct table_set_header { + #define YYTD_ID_ACCEPT2 6 + #define YYTD_ID_NXT 7 + #define YYTD_ID_TSIZE 8 ++#define YYTD_ID_MAX 8 + + #define YYTD_DATA8 1 + #define YYTD_DATA16 2 +diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h +index bda4569fdd83..0c9d121f15d0 100644 +--- a/security/apparmor/include/policy.h ++++ b/security/apparmor/include/policy.h +@@ -313,6 +313,8 @@ static inline int AUDIT_MODE(struct aa_profile *profile) + return profile->audit; + } + ++bool policy_view_capable(void); ++bool policy_admin_capable(void); + bool aa_may_manage_policy(int op); + + #endif /* __AA_POLICY_H */ +diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c +index b21830eced41..6eeaab80865d 100644 +--- a/security/apparmor/lsm.c ++++ b/security/apparmor/lsm.c +@@ -759,51 +759,49 @@ __setup("apparmor=", apparmor_enabled_setup); + /* set global flag turning off the ability to load policy */ + static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_admin_capable()) + return -EPERM; +- if (aa_g_lock_policy) +- return -EACCES; + return param_set_bool(val, kp); + } + + static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_view_capable()) + return -EPERM; + return param_get_bool(buffer, kp); + } + + static int param_set_aabool(const char *val, const struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_admin_capable()) + return -EPERM; + return param_set_bool(val, kp); + } + + static int param_get_aabool(char *buffer, const struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_view_capable()) + return -EPERM; + return param_get_bool(buffer, kp); + } + + static int param_set_aauint(const char *val, const struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_admin_capable()) + return -EPERM; + return param_set_uint(val, kp); + } + + static int param_get_aauint(char *buffer, const struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_view_capable()) + return -EPERM; + return param_get_uint(buffer, kp); + } + + static int param_get_audit(char *buffer, struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_view_capable()) + return -EPERM; + + if (!apparmor_enabled) +@@ -815,7 +813,7 @@ static int param_get_audit(char *buffer, struct kernel_param *kp) + static int param_set_audit(const char *val, struct kernel_param *kp) + { + int i; +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_admin_capable()) + return -EPERM; + + if (!apparmor_enabled) +@@ -836,7 +834,7 @@ static int param_set_audit(const char *val, struct kernel_param *kp) + + static int param_get_mode(char *buffer, struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_admin_capable()) + return -EPERM; + + if (!apparmor_enabled) +@@ -848,7 +846,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp) + static int param_set_mode(const char *val, struct kernel_param *kp) + { + int i; +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_admin_capable()) + return -EPERM; + + if (!apparmor_enabled) +diff --git a/security/apparmor/match.c b/security/apparmor/match.c +index 90971a8c3789..704b0eb25801 100644 +--- a/security/apparmor/match.c ++++ b/security/apparmor/match.c +@@ -45,6 +45,8 @@ static struct table_header *unpack_table(char *blob, size_t bsize) + * it every time we use td_id as an index + */ + th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1; ++ if (th.td_id > YYTD_ID_MAX) ++ goto out; + th.td_flags = be16_to_cpu(*(u16 *) (blob + 2)); + th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8)); + blob += sizeof(struct table_header); +@@ -59,7 +61,9 @@ static struct table_header *unpack_table(char *blob, size_t bsize) + + table = kvmalloc(tsize); + if (table) { +- *table = th; ++ table->td_id = th.td_id; ++ table->td_flags = th.td_flags; ++ table->td_lolen = th.td_lolen; + if (th.td_flags == YYTD_DATA8) + UNPACK_ARRAY(table->td_data, blob, th.td_lolen, + u8, byte_to_byte); +@@ -71,14 +75,14 @@ static struct table_header *unpack_table(char *blob, size_t bsize) + u32, be32_to_cpu); + else + goto fail; ++ /* if table was vmalloced make sure the page tables are synced ++ * before it is used, as it goes live to all cpus. ++ */ ++ if (is_vmalloc_addr(table)) ++ vm_unmap_aliases(); + } + + out: +- /* if table was vmalloced make sure the page tables are synced +- * before it is used, as it goes live to all cpus. +- */ +- if (is_vmalloc_addr(table)) +- vm_unmap_aliases(); + return table; + fail: + kvfree(table); +diff --git a/security/apparmor/path.c b/security/apparmor/path.c +index e91ffee80162..07bf2ac1ef6f 100644 +--- a/security/apparmor/path.c ++++ b/security/apparmor/path.c +@@ -25,7 +25,6 @@ + #include "include/path.h" + #include "include/policy.h" + +- + /* modified from dcache.c */ + static int prepend(char **buffer, int buflen, const char *str, int namelen) + { +@@ -39,6 +38,38 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen) + + #define CHROOT_NSCONNECT (PATH_CHROOT_REL | PATH_CHROOT_NSCONNECT) + ++/* If the path is not connected to the expected root, ++ * check if it is a sysctl and handle specially else remove any ++ * leading / that __d_path may have returned. ++ * Unless ++ * specifically directed to connect the path, ++ * OR ++ * if in a chroot and doing chroot relative paths and the path ++ * resolves to the namespace root (would be connected outside ++ * of chroot) and specifically directed to connect paths to ++ * namespace root. ++ */ ++static int disconnect(const struct path *path, char *buf, char **name, ++ int flags) ++{ ++ int error = 0; ++ ++ if (!(flags & PATH_CONNECT_PATH) && ++ !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) && ++ our_mnt(path->mnt))) { ++ /* disconnected path, don't return pathname starting ++ * with '/' ++ */ ++ error = -EACCES; ++ if (**name == '/') ++ *name = *name + 1; ++ } else if (**name != '/') ++ /* CONNECT_PATH with missing root */ ++ error = prepend(name, *name - buf, "/", 1); ++ ++ return error; ++} ++ + /** + * d_namespace_path - lookup a name associated with a given path + * @path: path to lookup (NOT NULL) +@@ -74,7 +105,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, + * control instead of hard coded /proc + */ + return prepend(name, *name - buf, "/proc", 5); +- } ++ } else ++ return disconnect(path, buf, name, flags); + return 0; + } + +@@ -120,29 +152,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, + goto out; + } + +- /* If the path is not connected to the expected root, +- * check if it is a sysctl and handle specially else remove any +- * leading / that __d_path may have returned. +- * Unless +- * specifically directed to connect the path, +- * OR +- * if in a chroot and doing chroot relative paths and the path +- * resolves to the namespace root (would be connected outside +- * of chroot) and specifically directed to connect paths to +- * namespace root. +- */ +- if (!connected) { +- if (!(flags & PATH_CONNECT_PATH) && +- !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) && +- our_mnt(path->mnt))) { +- /* disconnected path, don't return pathname starting +- * with '/' +- */ +- error = -EACCES; +- if (*res == '/') +- *name = res + 1; +- } +- } ++ if (!connected) ++ error = disconnect(path, buf, name, flags); + + out: + return error; +diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c +index 813200384d97..c4780e108d7d 100644 +--- a/security/apparmor/policy.c ++++ b/security/apparmor/policy.c +@@ -1002,6 +1002,22 @@ static int audit_policy(int op, gfp_t gfp, const char *name, const char *info, + &sa, NULL); + } + ++bool policy_view_capable(void) ++{ ++ struct user_namespace *user_ns = current_user_ns(); ++ bool response = false; ++ ++ if (ns_capable(user_ns, CAP_MAC_ADMIN)) ++ response = true; ++ ++ return response; ++} ++ ++bool policy_admin_capable(void) ++{ ++ return policy_view_capable() && !aa_g_lock_policy; ++} ++ + /** + * aa_may_manage_policy - can the current task manage policy + * @op: the policy manipulation operation being done +@@ -1016,7 +1032,7 @@ bool aa_may_manage_policy(int op) + return 0; + } + +- if (!capable(CAP_MAC_ADMIN)) { ++ if (!policy_admin_capable()) { + audit_policy(op, GFP_KERNEL, NULL, "not policy admin", -EACCES); + return 0; + } +diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c +index 329b1fd30749..55ff3eecd368 100644 +--- a/security/apparmor/policy_unpack.c ++++ b/security/apparmor/policy_unpack.c +@@ -571,6 +571,9 @@ static struct aa_profile *unpack_profile(struct aa_ext *e) + error = PTR_ERR(profile->policy.dfa); + profile->policy.dfa = NULL; + goto fail; ++ } else if (!profile->policy.dfa) { ++ error = -EPROTO; ++ goto fail; + } + if (!unpack_u32(e, &profile->policy.start[0], "start")) + /* default start state */ +@@ -652,7 +655,7 @@ static bool verify_xindex(int xindex, int table_size) + int index, xtype; + xtype = xindex & AA_X_TYPE_MASK; + index = xindex & AA_X_INDEX_MASK; +- if (xtype == AA_X_TABLE && index > table_size) ++ if (xtype == AA_X_TABLE && index >= table_size) + return 0; + return 1; + } +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c +index 08865dcbf5f1..d449dde1bf50 100644 +--- a/sound/core/seq/seq_clientmgr.c ++++ b/sound/core/seq/seq_clientmgr.c +@@ -1909,6 +1909,7 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client, + info.output_pool != client->pool->size)) { + if (snd_seq_write_pool_allocated(client)) { + /* remove all existing cells */ ++ snd_seq_pool_mark_closing(client->pool); + snd_seq_queue_client_leave_cells(client->number); + snd_seq_pool_done(client->pool); + } +diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c +index 0d75afa786bc..490b697e83ff 100644 +--- a/sound/core/seq/seq_fifo.c ++++ b/sound/core/seq/seq_fifo.c +@@ -72,6 +72,9 @@ void snd_seq_fifo_delete(struct snd_seq_fifo **fifo) + return; + *fifo = NULL; + ++ if (f->pool) ++ snd_seq_pool_mark_closing(f->pool); ++ + snd_seq_fifo_clear(f); + + /* wake up clients if any */ +@@ -137,6 +140,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f, + f->tail = cell; + if (f->head == NULL) + f->head = cell; ++ cell->next = NULL; + f->cells++; + spin_unlock_irqrestore(&f->lock, flags); + +@@ -216,6 +220,8 @@ void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f, + spin_lock_irqsave(&f->lock, flags); + cell->next = f->head; + f->head = cell; ++ if (!f->tail) ++ f->tail = cell; + f->cells++; + spin_unlock_irqrestore(&f->lock, flags); + } +@@ -261,6 +267,10 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize) + /* NOTE: overflow flag is not cleared */ + spin_unlock_irqrestore(&f->lock, flags); + ++ /* close the old pool and wait until all users are gone */ ++ snd_seq_pool_mark_closing(oldpool); ++ snd_use_lock_sync(&f->use_lock); ++ + /* release cells in old pool */ + for (cell = oldhead; cell; cell = next) { + next = cell->next; +diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c +index 2cfe50c71a9d..8a6b7baafa35 100644 +--- a/sound/core/seq/seq_lock.c ++++ b/sound/core/seq/seq_lock.c +@@ -28,19 +28,16 @@ + /* wait until all locks are released */ + void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line) + { +- int max_count = 5 * HZ; ++ int warn_count = 5 * HZ; + + if (atomic_read(lockp) < 0) { + printk(KERN_WARNING "seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line); + return; + } + while (atomic_read(lockp) > 0) { +- if (max_count == 0) { +- snd_printk(KERN_WARNING "seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line); +- break; +- } ++ if (warn_count-- == 0) ++ pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line); + schedule_timeout_uninterruptible(1); +- max_count--; + } + } + +diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c +index f478f770bf52..8c510781558f 100644 +--- a/sound/core/seq/seq_memory.c ++++ b/sound/core/seq/seq_memory.c +@@ -411,32 +411,33 @@ int snd_seq_pool_init(struct snd_seq_pool *pool) + return 0; + } + ++/* refuse the further insertion to the pool */ ++void snd_seq_pool_mark_closing(struct snd_seq_pool *pool) ++{ ++ unsigned long flags; ++ ++ if (snd_BUG_ON(!pool)) ++ return; ++ spin_lock_irqsave(&pool->lock, flags); ++ pool->closing = 1; ++ spin_unlock_irqrestore(&pool->lock, flags); ++} ++ + /* remove events */ + int snd_seq_pool_done(struct snd_seq_pool *pool) + { + unsigned long flags; + struct snd_seq_event_cell *ptr; +- int max_count = 5 * HZ; + + if (snd_BUG_ON(!pool)) + return -EINVAL; + + /* wait for closing all threads */ +- spin_lock_irqsave(&pool->lock, flags); +- pool->closing = 1; +- spin_unlock_irqrestore(&pool->lock, flags); +- + if (waitqueue_active(&pool->output_sleep)) + wake_up(&pool->output_sleep); + +- while (atomic_read(&pool->counter) > 0) { +- if (max_count == 0) { +- snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); +- break; +- } ++ while (atomic_read(&pool->counter) > 0) + schedule_timeout_uninterruptible(1); +- max_count--; +- } + + /* release all resources */ + spin_lock_irqsave(&pool->lock, flags); +@@ -490,6 +491,7 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool) + *ppool = NULL; + if (pool == NULL) + return 0; ++ snd_seq_pool_mark_closing(pool); + snd_seq_pool_done(pool); + kfree(pool); + return 0; +diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h +index 4a2ec779b8a7..32f959c17786 100644 +--- a/sound/core/seq/seq_memory.h ++++ b/sound/core/seq/seq_memory.h +@@ -84,6 +84,7 @@ static inline int snd_seq_total_cells(struct snd_seq_pool *pool) + int snd_seq_pool_init(struct snd_seq_pool *pool); + + /* done pool - free events */ ++void snd_seq_pool_mark_closing(struct snd_seq_pool *pool); + int snd_seq_pool_done(struct snd_seq_pool *pool); + + /* create pool */ +diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c +index 4c9aa462de9b..17fe04d892f9 100644 +--- a/sound/core/seq/seq_queue.c ++++ b/sound/core/seq/seq_queue.c +@@ -183,6 +183,8 @@ void __exit snd_seq_queues_delete(void) + } + } + ++static void queue_use(struct snd_seq_queue *queue, int client, int use); ++ + /* allocate a new queue - + * return queue index value or negative value for error + */ +@@ -194,11 +196,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) + if (q == NULL) + return -ENOMEM; + q->info_flags = info_flags; ++ queue_use(q, client, 1); + if (queue_list_add(q) < 0) { + queue_delete(q); + return -ENOMEM; + } +- snd_seq_queue_use(q->queue, client, 1); /* use this queue */ + return q->queue; + } + +@@ -504,19 +506,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client, + return result; + } + +- +-/* use or unuse this queue - +- * if it is the first client, starts the timer. +- * if it is not longer used by any clients, stop the timer. +- */ +-int snd_seq_queue_use(int queueid, int client, int use) ++/* use or unuse this queue */ ++static void queue_use(struct snd_seq_queue *queue, int client, int use) + { +- struct snd_seq_queue *queue; +- +- queue = queueptr(queueid); +- if (queue == NULL) +- return -EINVAL; +- mutex_lock(&queue->timer_mutex); + if (use) { + if (!test_and_set_bit(client, queue->clients_bitmap)) + queue->clients++; +@@ -531,6 +523,21 @@ int snd_seq_queue_use(int queueid, int client, int use) + } else { + snd_seq_timer_close(queue); + } ++} ++ ++/* use or unuse this queue - ++ * if it is the first client, starts the timer. ++ * if it is not longer used by any clients, stop the timer. ++ */ ++int snd_seq_queue_use(int queueid, int client, int use) ++{ ++ struct snd_seq_queue *queue; ++ ++ queue = queueptr(queueid); ++ if (queue == NULL) ++ return -EINVAL; ++ mutex_lock(&queue->timer_mutex); ++ queue_use(queue, client, use); + mutex_unlock(&queue->timer_mutex); + queuefree(queue); + return 0; +diff --git a/sound/core/timer.c b/sound/core/timer.c +index 749857a889e6..98904d8e51db 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -1659,9 +1659,21 @@ static int snd_timer_user_params(struct file *file, + return -EBADFD; + if (copy_from_user(¶ms, _params, sizeof(params))) + return -EFAULT; +- if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) { +- err = -EINVAL; +- goto _end; ++ if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) { ++ u64 resolution; ++ ++ if (params.ticks < 1) { ++ err = -EINVAL; ++ goto _end; ++ } ++ ++ /* Don't allow resolution less than 1ms */ ++ resolution = snd_timer_resolution(tu->timeri); ++ resolution *= params.ticks; ++ if (resolution < 1000000) { ++ err = -EINVAL; ++ goto _end; ++ } + } + if (params.queue_size > 0 && + (params.queue_size < 32 || params.queue_size > 1024)) { +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index babbf238a648..af27d67efa82 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -2185,6 +2185,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), + SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), + SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), ++ SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), + SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index 5ea5a18f3f58..77047e3517a5 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -893,9 +893,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, + case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ + case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */ + case USB_ID(0x046d, 0x0991): ++ case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */ + /* Most audio usb devices lie about volume resolution. + * Most Logitech webcams have res = 384. +- * Proboly there is some logitech magic behind this number --fishor ++ * Probably there is some logitech magic behind this number --fishor + */ + if (!strcmp(kctl->id.name, "Mic Capture Volume")) { + snd_printk(KERN_INFO