mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-26 16:51:48 +00:00
6785 lines
212 KiB
Diff
6785 lines
212 KiB
Diff
diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
|
|
index 6ff16b620d84..c08b62d63afa 100644
|
|
--- a/Documentation/scsi/scsi_eh.txt
|
|
+++ b/Documentation/scsi/scsi_eh.txt
|
|
@@ -255,19 +255,23 @@ scmd->allowed.
|
|
|
|
3. scmd recovered
|
|
ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
|
|
- - shost->host_failed--
|
|
- clear scmd->eh_eflags
|
|
- scsi_setup_cmd_retry()
|
|
- move from local eh_work_q to local eh_done_q
|
|
LOCKING: none
|
|
+ CONCURRENCY: at most one thread per separate eh_work_q to
|
|
+ keep queue manipulation lockless
|
|
|
|
4. EH completes
|
|
ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
|
|
- layer of failure.
|
|
+ layer of failure. May be called concurrently but must have
|
|
+ a no more than one thread per separate eh_work_q to
|
|
+ manipulate the queue locklessly
|
|
- scmd is removed from eh_done_q and scmd->eh_entry is cleared
|
|
- if retry is necessary, scmd is requeued using
|
|
scsi_queue_insert()
|
|
- otherwise, scsi_finish_command() is invoked for scmd
|
|
+ - zero shost->host_failed
|
|
LOCKING: queue or finish function performs appropriate locking
|
|
|
|
|
|
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
|
|
index 88152f214f48..302b5ed616a6 100644
|
|
--- a/Documentation/sysctl/fs.txt
|
|
+++ b/Documentation/sysctl/fs.txt
|
|
@@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/fs:
|
|
- nr_open
|
|
- overflowuid
|
|
- overflowgid
|
|
+- pipe-user-pages-hard
|
|
+- pipe-user-pages-soft
|
|
- protected_hardlinks
|
|
- protected_symlinks
|
|
- suid_dumpable
|
|
@@ -159,6 +161,27 @@ The default is 65534.
|
|
|
|
==============================================================
|
|
|
|
+pipe-user-pages-hard:
|
|
+
|
|
+Maximum total number of pages a non-privileged user may allocate for pipes.
|
|
+Once this limit is reached, no new pipes may be allocated until usage goes
|
|
+below the limit again. When set to 0, no limit is applied, which is the default
|
|
+setting.
|
|
+
|
|
+==============================================================
|
|
+
|
|
+pipe-user-pages-soft:
|
|
+
|
|
+Maximum total number of pages a non-privileged user may allocate for pipes
|
|
+before the pipe size gets limited to a single page. Once this limit is reached,
|
|
+new pipes will be limited to a single page in size for this user in order to
|
|
+limit total memory usage, and trying to increase them using fcntl() will be
|
|
+denied until usage goes below the limit again. The default value allows to
|
|
+allocate up to 1024 pipes at their default size. When set to 0, no limit is
|
|
+applied.
|
|
+
|
|
+==============================================================
|
|
+
|
|
protected_hardlinks:
|
|
|
|
A long-standing class of security issues is the hardlink-based
|
|
diff --git a/Makefile b/Makefile
|
|
index 868093c16ae0..d3cb458b295a 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 3
|
|
PATCHLEVEL = 10
|
|
-SUBLEVEL = 102
|
|
+SUBLEVEL = 103
|
|
EXTRAVERSION =
|
|
NAME = TOSSUG Baby Fish
|
|
|
|
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
|
|
index ca0207b9d5b6..06997ad70725 100644
|
|
--- a/arch/arc/kernel/stacktrace.c
|
|
+++ b/arch/arc/kernel/stacktrace.c
|
|
@@ -131,7 +131,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
|
|
* prelogue is setup (callee regs saved and then fp set and not other
|
|
* way around
|
|
*/
|
|
- pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
|
|
+ pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
|
|
return 0;
|
|
|
|
#endif
|
|
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
|
|
index 3357d26ffe54..74691e652a3a 100644
|
|
--- a/arch/arc/mm/tlbex.S
|
|
+++ b/arch/arc/mm/tlbex.S
|
|
@@ -219,7 +219,7 @@ ex_saved_reg1:
|
|
#ifdef CONFIG_SMP
|
|
sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
|
|
GET_CPU_ID r0 ; get to per cpu scratch mem,
|
|
- lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
|
|
+ asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
|
|
add r0, @ex_saved_reg1, r0
|
|
#else
|
|
st r0, [@ex_saved_reg1]
|
|
@@ -239,7 +239,7 @@ ex_saved_reg1:
|
|
.macro TLBMISS_RESTORE_REGS
|
|
#ifdef CONFIG_SMP
|
|
GET_CPU_ID r0 ; get to per cpu scratch mem
|
|
- lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
|
|
+ asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
|
|
add r0, @ex_saved_reg1, r0
|
|
ld_s r3, [r0,12]
|
|
ld_s r2, [r0, 8]
|
|
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
|
|
index 03deeffd9f6d..4e2110d48c41 100644
|
|
--- a/arch/arm/kernel/ptrace.c
|
|
+++ b/arch/arm/kernel/ptrace.c
|
|
@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
|
|
if (ret)
|
|
return ret;
|
|
|
|
- vfp_flush_hwstate(thread);
|
|
thread->vfpstate.hard = new_vfp;
|
|
+ vfp_flush_hwstate(thread);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
|
|
index 3e94811690ce..a0aee80b608d 100644
|
|
--- a/arch/arm/kernel/sys_oabi-compat.c
|
|
+++ b/arch/arm/kernel/sys_oabi-compat.c
|
|
@@ -275,8 +275,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
|
|
mm_segment_t fs;
|
|
long ret, err, i;
|
|
|
|
- if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
|
|
+ if (maxevents <= 0 ||
|
|
+ maxevents > (INT_MAX/sizeof(*kbuf)) ||
|
|
+ maxevents > (INT_MAX/sizeof(*events)))
|
|
return -EINVAL;
|
|
+ if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
|
|
+ return -EFAULT;
|
|
kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
|
|
if (!kbuf)
|
|
return -ENOMEM;
|
|
@@ -313,6 +317,8 @@ asmlinkage long sys_oabi_semtimedop(int semid,
|
|
|
|
if (nsops < 1 || nsops > SEMOPM)
|
|
return -EINVAL;
|
|
+ if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
|
|
+ return -EFAULT;
|
|
sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
|
|
if (!sops)
|
|
return -ENOMEM;
|
|
diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h
|
|
index 0154e2807ebb..2369ad394876 100644
|
|
--- a/arch/metag/include/asm/cmpxchg_lnkget.h
|
|
+++ b/arch/metag/include/asm/cmpxchg_lnkget.h
|
|
@@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
|
|
" DCACHE [%2], %0\n"
|
|
#endif
|
|
"2:\n"
|
|
- : "=&d" (temp), "=&da" (retval)
|
|
+ : "=&d" (temp), "=&d" (retval)
|
|
: "da" (m), "bd" (old), "da" (new)
|
|
: "cc"
|
|
);
|
|
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
|
|
index b955fafc58ba..d1adc59af5bf 100644
|
|
--- a/arch/mips/ath79/early_printk.c
|
|
+++ b/arch/mips/ath79/early_printk.c
|
|
@@ -31,13 +31,15 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
|
|
} while (1);
|
|
}
|
|
|
|
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
|
|
+
|
|
static void prom_putchar_ar71xx(unsigned char ch)
|
|
{
|
|
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
|
|
|
|
- prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
|
|
+ prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
|
|
__raw_writel(ch, base + UART_TX * 4);
|
|
- prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
|
|
+ prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
|
|
}
|
|
|
|
static void prom_putchar_ar933x(unsigned char ch)
|
|
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
|
|
index 4d6fa0bf1305..883a162083af 100644
|
|
--- a/arch/mips/include/asm/kvm_host.h
|
|
+++ b/arch/mips/include/asm/kvm_host.h
|
|
@@ -349,6 +349,7 @@ struct kvm_mips_tlb {
|
|
#define KVM_MIPS_GUEST_TLB_SIZE 64
|
|
struct kvm_vcpu_arch {
|
|
void *host_ebase, *guest_ebase;
|
|
+ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
|
unsigned long host_stack;
|
|
unsigned long host_gp;
|
|
|
|
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
|
|
index 1470b7b68b0e..a7e71744fe89 100644
|
|
--- a/arch/mips/include/asm/processor.h
|
|
+++ b/arch/mips/include/asm/processor.h
|
|
@@ -51,7 +51,7 @@ extern unsigned int vced_count, vcei_count;
|
|
* User space process size: 2GB. This is hardcoded into a few places,
|
|
* so don't change it unless you know what you are doing.
|
|
*/
|
|
-#define TASK_SIZE 0x7fff8000UL
|
|
+#define TASK_SIZE 0x80000000UL
|
|
#endif
|
|
|
|
#ifdef __KERNEL__
|
|
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
|
|
index 6a8714193fb9..b5f77f76c899 100644
|
|
--- a/arch/mips/include/uapi/asm/siginfo.h
|
|
+++ b/arch/mips/include/uapi/asm/siginfo.h
|
|
@@ -45,13 +45,13 @@ typedef struct siginfo {
|
|
|
|
/* kill() */
|
|
struct {
|
|
- pid_t _pid; /* sender's pid */
|
|
+ __kernel_pid_t _pid; /* sender's pid */
|
|
__ARCH_SI_UID_T _uid; /* sender's uid */
|
|
} _kill;
|
|
|
|
/* POSIX.1b timers */
|
|
struct {
|
|
- timer_t _tid; /* timer id */
|
|
+ __kernel_timer_t _tid; /* timer id */
|
|
int _overrun; /* overrun count */
|
|
char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
|
|
sigval_t _sigval; /* same as below */
|
|
@@ -60,26 +60,26 @@ typedef struct siginfo {
|
|
|
|
/* POSIX.1b signals */
|
|
struct {
|
|
- pid_t _pid; /* sender's pid */
|
|
+ __kernel_pid_t _pid; /* sender's pid */
|
|
__ARCH_SI_UID_T _uid; /* sender's uid */
|
|
sigval_t _sigval;
|
|
} _rt;
|
|
|
|
/* SIGCHLD */
|
|
struct {
|
|
- pid_t _pid; /* which child */
|
|
+ __kernel_pid_t _pid; /* which child */
|
|
__ARCH_SI_UID_T _uid; /* sender's uid */
|
|
int _status; /* exit code */
|
|
- clock_t _utime;
|
|
- clock_t _stime;
|
|
+ __kernel_clock_t _utime;
|
|
+ __kernel_clock_t _stime;
|
|
} _sigchld;
|
|
|
|
/* IRIX SIGCHLD */
|
|
struct {
|
|
- pid_t _pid; /* which child */
|
|
- clock_t _utime;
|
|
+ __kernel_pid_t _pid; /* which child */
|
|
+ __kernel_clock_t _utime;
|
|
int _status; /* exit code */
|
|
- clock_t _stime;
|
|
+ __kernel_clock_t _stime;
|
|
} _irix_sigchld;
|
|
|
|
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
|
|
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
|
|
index cab150789c8d..b657fbefc466 100644
|
|
--- a/arch/mips/kernel/scall64-n32.S
|
|
+++ b/arch/mips/kernel/scall64-n32.S
|
|
@@ -349,7 +349,7 @@ EXPORT(sysn32_call_table)
|
|
PTR sys_ni_syscall /* available, was setaltroot */
|
|
PTR sys_add_key
|
|
PTR sys_request_key
|
|
- PTR sys_keyctl /* 6245 */
|
|
+ PTR compat_sys_keyctl /* 6245 */
|
|
PTR sys_set_thread_area
|
|
PTR sys_inotify_init
|
|
PTR sys_inotify_add_watch
|
|
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
|
|
index 37605dc8eef7..bf56d7e271dd 100644
|
|
--- a/arch/mips/kernel/scall64-o32.S
|
|
+++ b/arch/mips/kernel/scall64-o32.S
|
|
@@ -474,7 +474,7 @@ sys_call_table:
|
|
PTR sys_ni_syscall /* available, was setaltroot */
|
|
PTR sys_add_key /* 4280 */
|
|
PTR sys_request_key
|
|
- PTR sys_keyctl
|
|
+ PTR compat_sys_keyctl
|
|
PTR sys_set_thread_area
|
|
PTR sys_inotify_init
|
|
PTR sys_inotify_add_watch /* 4285 */
|
|
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
|
|
index 34c35f0e3290..73553cd98070 100644
|
|
--- a/arch/mips/kvm/kvm_locore.S
|
|
+++ b/arch/mips/kvm/kvm_locore.S
|
|
@@ -227,6 +227,7 @@ FEXPORT(__kvm_mips_load_k0k1)
|
|
/* Jump to guest */
|
|
eret
|
|
.set pop
|
|
+EXPORT(__kvm_mips_vcpu_run_end)
|
|
|
|
VECTOR(MIPSX(exception), unknown)
|
|
/*
|
|
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
|
|
index 8aa5f30d8579..97a181a44e53 100644
|
|
--- a/arch/mips/kvm/kvm_mips.c
|
|
+++ b/arch/mips/kvm/kvm_mips.c
|
|
@@ -343,6 +343,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|
memcpy(gebase + offset, mips32_GuestException,
|
|
mips32_GuestExceptionEnd - mips32_GuestException);
|
|
|
|
+#ifdef MODULE
|
|
+ offset += mips32_GuestExceptionEnd - mips32_GuestException;
|
|
+ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
|
|
+ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
|
|
+ vcpu->arch.vcpu_run = gebase + offset;
|
|
+#else
|
|
+ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
|
|
+#endif
|
|
+
|
|
/* Invalidate the icache for these ranges */
|
|
mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
|
|
|
|
@@ -426,7 +435,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
kvm_guest_enter();
|
|
|
|
- r = __kvm_mips_vcpu_run(run, vcpu);
|
|
+ r = vcpu->arch.vcpu_run(run, vcpu);
|
|
|
|
kvm_guest_exit();
|
|
local_irq_enable();
|
|
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
|
|
index 33085819cd89..9f7643874fba 100644
|
|
--- a/arch/mips/kvm/kvm_mips_emul.c
|
|
+++ b/arch/mips/kvm/kvm_mips_emul.c
|
|
@@ -972,8 +972,13 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
|
|
preempt_disable();
|
|
if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
|
|
|
|
- if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
|
|
- kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
|
|
+ if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
|
|
+ kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
|
|
+ kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
|
|
+ __func__, va, vcpu, read_c0_entryhi());
|
|
+ er = EMULATE_FAIL;
|
|
+ preempt_enable();
|
|
+ goto done;
|
|
}
|
|
} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
|
|
KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
|
|
@@ -1006,11 +1011,16 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
|
|
run, vcpu);
|
|
preempt_enable();
|
|
goto dont_update_pc;
|
|
- } else {
|
|
- /* We fault an entry from the guest tlb to the shadow host TLB */
|
|
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
|
|
- NULL,
|
|
- NULL);
|
|
+ }
|
|
+ /* We fault an entry from the guest tlb to the shadow host TLB */
|
|
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
|
|
+ NULL, NULL)) {
|
|
+ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
|
|
+ __func__, va, index, vcpu,
|
|
+ read_c0_entryhi());
|
|
+ er = EMULATE_FAIL;
|
|
+ preempt_enable();
|
|
+ goto done;
|
|
}
|
|
}
|
|
} else {
|
|
@@ -1821,8 +1831,13 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
|
|
tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
|
|
#endif
|
|
/* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
|
|
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
|
|
- NULL);
|
|
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
|
|
+ NULL, NULL)) {
|
|
+ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
|
|
+ __func__, va, index, vcpu,
|
|
+ read_c0_entryhi());
|
|
+ er = EMULATE_FAIL;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
|
|
index 20da7d29eede..bf41ea36210e 100644
|
|
--- a/arch/mips/kvm/kvm_mips_int.h
|
|
+++ b/arch/mips/kvm/kvm_mips_int.h
|
|
@@ -27,6 +27,8 @@
|
|
#define MIPS_EXC_MAX 12
|
|
/* XXXSL More to follow */
|
|
|
|
+extern char __kvm_mips_vcpu_run_end[];
|
|
+
|
|
#define C_TI (_ULCAST_(1) << 30)
|
|
|
|
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
|
|
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
|
|
index c777dd36d4a8..4bee4397dca8 100644
|
|
--- a/arch/mips/kvm/kvm_tlb.c
|
|
+++ b/arch/mips/kvm/kvm_tlb.c
|
|
@@ -312,7 +312,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
|
|
}
|
|
|
|
gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
|
|
- if (gfn >= kvm->arch.guest_pmap_npages) {
|
|
+ if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
|
|
kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
|
|
gfn, badvaddr);
|
|
kvm_mips_dump_host_tlbs();
|
|
@@ -397,21 +397,38 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
|
|
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
|
|
struct kvm *kvm = vcpu->kvm;
|
|
pfn_t pfn0, pfn1;
|
|
+ gfn_t gfn0, gfn1;
|
|
+ long tlb_lo[2];
|
|
+
|
|
+ tlb_lo[0] = tlb->tlb_lo0;
|
|
+ tlb_lo[1] = tlb->tlb_lo1;
|
|
+
|
|
+ /*
|
|
+ * The commpage address must not be mapped to anything else if the guest
|
|
+ * TLB contains entries nearby, or commpage accesses will break.
|
|
+ */
|
|
+ if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
|
|
+ VPN2_MASK & (PAGE_MASK << 1)))
|
|
+ tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
|
|
+
|
|
+ gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
|
|
+ gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
|
|
+ if (gfn0 >= kvm->arch.guest_pmap_npages ||
|
|
+ gfn1 >= kvm->arch.guest_pmap_npages) {
|
|
+ kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
|
|
+ __func__, gfn0, gfn1, tlb->tlb_hi);
|
|
+ kvm_mips_dump_guest_tlbs(vcpu);
|
|
+ return -1;
|
|
+ }
|
|
|
|
+ if (kvm_mips_map_page(kvm, gfn0) < 0)
|
|
+ return -1;
|
|
|
|
- if ((tlb->tlb_hi & VPN2_MASK) == 0) {
|
|
- pfn0 = 0;
|
|
- pfn1 = 0;
|
|
- } else {
|
|
- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
|
|
- return -1;
|
|
-
|
|
- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
|
|
- return -1;
|
|
+ if (kvm_mips_map_page(kvm, gfn1) < 0)
|
|
+ return -1;
|
|
|
|
- pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
|
|
- pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
|
|
- }
|
|
+ pfn0 = kvm->arch.guest_pmap[gfn0];
|
|
+ pfn1 = kvm->arch.guest_pmap[gfn1];
|
|
|
|
if (hpa0)
|
|
*hpa0 = pfn0 << PAGE_SHIFT;
|
|
@@ -423,9 +440,9 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
|
|
entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
|
|
kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
|
|
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
|
|
- (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
|
|
+ (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
|
|
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
|
|
- (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
|
|
+ (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
|
|
|
|
#ifdef DEBUG
|
|
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
|
|
@@ -909,10 +926,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
|
|
local_irq_restore(flags);
|
|
return KVM_INVALID_INST;
|
|
}
|
|
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
|
|
- &vcpu->arch.
|
|
- guest_tlb[index],
|
|
- NULL, NULL);
|
|
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
|
|
+ &vcpu->arch.guest_tlb[index],
|
|
+ NULL, NULL)) {
|
|
+ kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
|
|
+ __func__, opc, index, vcpu,
|
|
+ read_c0_entryhi());
|
|
+ kvm_mips_dump_guest_tlbs(vcpu);
|
|
+ local_irq_restore(flags);
|
|
+ return KVM_INVALID_INST;
|
|
+ }
|
|
inst = *(opc);
|
|
}
|
|
local_irq_restore(flags);
|
|
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
|
|
index f03771900813..3d492a823a55 100644
|
|
--- a/arch/mips/math-emu/cp1emu.c
|
|
+++ b/arch/mips/math-emu/cp1emu.c
|
|
@@ -684,9 +684,11 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
|
|
case spec_op:
|
|
switch (insn.r_format.func) {
|
|
case jalr_op:
|
|
- regs->regs[insn.r_format.rd] =
|
|
- regs->cp0_epc + dec_insn.pc_inc +
|
|
- dec_insn.next_pc_inc;
|
|
+ if (insn.r_format.rd != 0) {
|
|
+ regs->regs[insn.r_format.rd] =
|
|
+ regs->cp0_epc + dec_insn.pc_inc +
|
|
+ dec_insn.next_pc_inc;
|
|
+ }
|
|
/* Fall through */
|
|
case jr_op:
|
|
*contpc = regs->regs[insn.r_format.rs];
|
|
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
|
|
index d7c0acb35ec2..8d49614d600d 100644
|
|
--- a/arch/parisc/kernel/unaligned.c
|
|
+++ b/arch/parisc/kernel/unaligned.c
|
|
@@ -666,7 +666,7 @@ void handle_unaligned(struct pt_regs *regs)
|
|
break;
|
|
}
|
|
|
|
- if (modify && R1(regs->iir))
|
|
+ if (ret == 0 && modify && R1(regs->iir))
|
|
regs->gr[R1(regs->iir)] = newbase;
|
|
|
|
|
|
@@ -677,6 +677,14 @@ void handle_unaligned(struct pt_regs *regs)
|
|
|
|
if (ret)
|
|
{
|
|
+ /*
|
|
+ * The unaligned handler failed.
|
|
+ * If we were called by __get_user() or __put_user() jump
|
|
+ * to it's exception fixup handler instead of crashing.
|
|
+ */
|
|
+ if (!user_mode(regs) && fixup_exception(regs))
|
|
+ return;
|
|
+
|
|
printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
|
|
die_if_kernel("Unaligned data reference", regs, 28);
|
|
|
|
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
|
|
index 60c31698f7d5..469d7715d6aa 100644
|
|
--- a/arch/powerpc/include/asm/reg.h
|
|
+++ b/arch/powerpc/include/asm/reg.h
|
|
@@ -643,7 +643,7 @@
|
|
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
|
|
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
|
|
#define SPRN_MMCR1 798
|
|
-#define SPRN_MMCR2 769
|
|
+#define SPRN_MMCR2 785
|
|
#define SPRN_MMCRA 0x312
|
|
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
|
|
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
|
|
@@ -677,13 +677,13 @@
|
|
#define SPRN_PMC6 792
|
|
#define SPRN_PMC7 793
|
|
#define SPRN_PMC8 794
|
|
-#define SPRN_SIAR 780
|
|
-#define SPRN_SDAR 781
|
|
#define SPRN_SIER 784
|
|
#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
|
|
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
|
|
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
|
|
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
|
|
+#define SPRN_SIAR 796
|
|
+#define SPRN_SDAR 797
|
|
|
|
#define SPRN_PA6T_MMCR0 795
|
|
#define PA6T_MMCR0_EN0 0x0000000000000001UL
|
|
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
|
|
index 902ca3c6b4b6..3ac1d3a90551 100644
|
|
--- a/arch/powerpc/kernel/exceptions-64s.S
|
|
+++ b/arch/powerpc/kernel/exceptions-64s.S
|
|
@@ -857,11 +857,6 @@ hv_facility_unavailable_relon_trampoline:
|
|
#endif
|
|
STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
|
|
|
|
- /* Other future vectors */
|
|
- .align 7
|
|
- .globl __end_interrupts
|
|
-__end_interrupts:
|
|
-
|
|
.align 7
|
|
system_call_entry_direct:
|
|
#if defined(CONFIG_RELOCATABLE)
|
|
@@ -1191,6 +1186,17 @@ __end_handlers:
|
|
STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
|
|
STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
|
|
|
|
+ /*
|
|
+ * The __end_interrupts marker must be past the out-of-line (OOL)
|
|
+ * handlers, so that they are copied to real address 0x100 when running
|
|
+ * a relocatable kernel. This ensures they can be reached from the short
|
|
+ * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
|
|
+ * directly, without using LOAD_HANDLER().
|
|
+ */
|
|
+ .align 7
|
|
+ .globl __end_interrupts
|
|
+__end_interrupts:
|
|
+
|
|
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
|
|
/*
|
|
* Data area reserved for FWNMI option.
|
|
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
|
|
index d55357ee9028..a5e339806589 100644
|
|
--- a/arch/powerpc/kernel/process.c
|
|
+++ b/arch/powerpc/kernel/process.c
|
|
@@ -1088,6 +1088,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
|
current->thread.regs = regs - 1;
|
|
}
|
|
|
|
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
+ /*
|
|
+ * Clear any transactional state, we're exec()ing. The cause is
|
|
+ * not important as there will never be a recheckpoint so it's not
|
|
+ * user visible.
|
|
+ */
|
|
+ if (MSR_TM_SUSPENDED(mfmsr()))
|
|
+ tm_reclaim_current(0);
|
|
+#endif
|
|
+
|
|
memset(regs->gpr, 0, sizeof(regs->gpr));
|
|
regs->ctr = 0;
|
|
regs->link = 0;
|
|
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
|
|
index 68f97d5a4679..dc0278e7fd91 100644
|
|
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
|
|
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
|
|
@@ -551,29 +551,50 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
|
|
{
|
|
int config_addr;
|
|
int ret;
|
|
+ /* Waiting 0.2s maximum before skipping configuration */
|
|
+ int max_wait = 200;
|
|
|
|
/* Figure out the PE address */
|
|
config_addr = pe->config_addr;
|
|
if (pe->addr)
|
|
config_addr = pe->addr;
|
|
|
|
- /* Use new configure-pe function, if supported */
|
|
- if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
|
|
- ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
|
|
- config_addr, BUID_HI(pe->phb->buid),
|
|
- BUID_LO(pe->phb->buid));
|
|
- } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
|
|
- ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
|
|
- config_addr, BUID_HI(pe->phb->buid),
|
|
- BUID_LO(pe->phb->buid));
|
|
- } else {
|
|
- return -EFAULT;
|
|
- }
|
|
+ while (max_wait > 0) {
|
|
+ /* Use new configure-pe function, if supported */
|
|
+ if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
|
|
+ ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
|
|
+ config_addr, BUID_HI(pe->phb->buid),
|
|
+ BUID_LO(pe->phb->buid));
|
|
+ } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
|
|
+ ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
|
|
+ config_addr, BUID_HI(pe->phb->buid),
|
|
+ BUID_LO(pe->phb->buid));
|
|
+ } else {
|
|
+ return -EFAULT;
|
|
+ }
|
|
|
|
- if (ret)
|
|
- pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
|
|
- __func__, pe->phb->global_number, pe->addr, ret);
|
|
+ if (!ret)
|
|
+ return ret;
|
|
+
|
|
+ /*
|
|
+ * If RTAS returns a delay value that's above 100ms, cut it
|
|
+ * down to 100ms in case firmware made a mistake. For more
|
|
+ * on how these delay values work see rtas_busy_delay_time
|
|
+ */
|
|
+ if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
|
|
+ ret <= RTAS_EXTENDED_DELAY_MAX)
|
|
+ ret = RTAS_EXTENDED_DELAY_MIN+2;
|
|
+
|
|
+ max_wait -= rtas_busy_delay_time(ret);
|
|
+
|
|
+ if (max_wait < 0)
|
|
+ break;
|
|
+
|
|
+ rtas_busy_delay(ret);
|
|
+ }
|
|
|
|
+ pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
|
|
+ __func__, pe->phb->global_number, pe->addr, ret);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
|
|
index 86ae364900d6..401369134ba3 100644
|
|
--- a/arch/powerpc/platforms/pseries/iommu.c
|
|
+++ b/arch/powerpc/platforms/pseries/iommu.c
|
|
@@ -858,7 +858,8 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
|
|
static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
|
struct ddw_query_response *query)
|
|
{
|
|
- struct eeh_dev *edev;
|
|
+ struct device_node *dn;
|
|
+ struct pci_dn *pdn;
|
|
u32 cfg_addr;
|
|
u64 buid;
|
|
int ret;
|
|
@@ -869,11 +870,10 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
|
* Retrieve them from the pci device, not the node with the
|
|
* dma-window property
|
|
*/
|
|
- edev = pci_dev_to_eeh_dev(dev);
|
|
- cfg_addr = edev->config_addr;
|
|
- if (edev->pe_config_addr)
|
|
- cfg_addr = edev->pe_config_addr;
|
|
- buid = edev->phb->buid;
|
|
+ dn = pci_device_to_OF_node(dev);
|
|
+ pdn = PCI_DN(dn);
|
|
+ buid = pdn->phb->buid;
|
|
+ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
|
|
|
|
ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
|
|
cfg_addr, BUID_HI(buid), BUID_LO(buid));
|
|
@@ -887,7 +887,8 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
|
struct ddw_create_response *create, int page_shift,
|
|
int window_shift)
|
|
{
|
|
- struct eeh_dev *edev;
|
|
+ struct device_node *dn;
|
|
+ struct pci_dn *pdn;
|
|
u32 cfg_addr;
|
|
u64 buid;
|
|
int ret;
|
|
@@ -898,11 +899,10 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
|
* Retrieve them from the pci device, not the node with the
|
|
* dma-window property
|
|
*/
|
|
- edev = pci_dev_to_eeh_dev(dev);
|
|
- cfg_addr = edev->config_addr;
|
|
- if (edev->pe_config_addr)
|
|
- cfg_addr = edev->pe_config_addr;
|
|
- buid = edev->phb->buid;
|
|
+ dn = pci_device_to_OF_node(dev);
|
|
+ pdn = PCI_DN(dn);
|
|
+ buid = pdn->phb->buid;
|
|
+ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
|
|
|
|
do {
|
|
/* extra outputs are LIOBN and dma-addr (hi, lo) */
|
|
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
|
|
index cd29d2f4e4f3..749313b452ae 100644
|
|
--- a/arch/s390/include/asm/syscall.h
|
|
+++ b/arch/s390/include/asm/syscall.h
|
|
@@ -54,7 +54,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|
struct pt_regs *regs,
|
|
int error, long val)
|
|
{
|
|
- regs->gprs[2] = error ? -error : val;
|
|
+ regs->gprs[2] = error ? error : val;
|
|
}
|
|
|
|
static inline void syscall_get_arguments(struct task_struct *task,
|
|
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
|
|
index 6cf0111783d3..368f3582c93e 100644
|
|
--- a/arch/x86/boot/Makefile
|
|
+++ b/arch/x86/boot/Makefile
|
|
@@ -168,6 +168,9 @@ isoimage: $(obj)/bzImage
|
|
for i in lib lib64 share end ; do \
|
|
if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
|
|
cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
|
|
+ if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
|
|
+ cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
|
|
+ fi ; \
|
|
break ; \
|
|
fi ; \
|
|
if [ $$i = end ] ; then exit 1 ; fi ; \
|
|
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
|
|
index be12c534fd59..29a3d1b00ca9 100644
|
|
--- a/arch/x86/include/asm/mmu_context.h
|
|
+++ b/arch/x86/include/asm/mmu_context.h
|
|
@@ -42,7 +42,34 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
#endif
|
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
|
|
- /* Re-load page tables */
|
|
+ /*
|
|
+ * Re-load page tables.
|
|
+ *
|
|
+ * This logic has an ordering constraint:
|
|
+ *
|
|
+ * CPU 0: Write to a PTE for 'next'
|
|
+ * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
|
|
+ * CPU 1: set bit 1 in next's mm_cpumask
|
|
+ * CPU 1: load from the PTE that CPU 0 writes (implicit)
|
|
+ *
|
|
+ * We need to prevent an outcome in which CPU 1 observes
|
|
+ * the new PTE value and CPU 0 observes bit 1 clear in
|
|
+ * mm_cpumask. (If that occurs, then the IPI will never
|
|
+ * be sent, and CPU 0's TLB will contain a stale entry.)
|
|
+ *
|
|
+ * The bad outcome can occur if either CPU's load is
|
|
+ * reordered before that CPU's store, so both CPUs must
|
|
+ * execute full barriers to prevent this from happening.
|
|
+ *
|
|
+ * Thus, switch_mm needs a full barrier between the
|
|
+ * store to mm_cpumask and any operation that could load
|
|
+ * from next->pgd. TLB fills are special and can happen
|
|
+ * due to instruction fetches or for no reason at all,
|
|
+ * and neither LOCK nor MFENCE orders them.
|
|
+ * Fortunately, load_cr3() is serializing and gives the
|
|
+ * ordering guarantee we need.
|
|
+ *
|
|
+ */
|
|
load_cr3(next->pgd);
|
|
|
|
/* Stop flush ipis for the previous mm */
|
|
@@ -65,10 +92,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
* schedule, protecting us from simultaneous changes.
|
|
*/
|
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
+
|
|
/*
|
|
* We were in lazy tlb mode and leave_mm disabled
|
|
* tlb flush IPI delivery. We must reload CR3
|
|
* to make sure to use no freed page tables.
|
|
+ *
|
|
+ * As above, load_cr3() is serializing and orders TLB
|
|
+ * fills with respect to the mm_cpumask write.
|
|
*/
|
|
load_cr3(next->pgd);
|
|
load_LDT_nolock(&next->context);
|
|
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
|
|
index 59554dca96ec..e6a3b1e35fae 100644
|
|
--- a/arch/x86/kernel/amd_nb.c
|
|
+++ b/arch/x86/kernel/amd_nb.c
|
|
@@ -67,8 +67,8 @@ int amd_cache_northbridges(void)
|
|
while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
|
|
i++;
|
|
|
|
- if (i == 0)
|
|
- return 0;
|
|
+ if (!i)
|
|
+ return -ENODEV;
|
|
|
|
nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
|
|
if (!nb)
|
|
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
|
|
index 53a4e2744846..3ab03430211d 100644
|
|
--- a/arch/x86/kernel/apm_32.c
|
|
+++ b/arch/x86/kernel/apm_32.c
|
|
@@ -392,7 +392,7 @@ static struct cpuidle_device apm_cpuidle_device;
|
|
/*
|
|
* Local variables
|
|
*/
|
|
-static struct {
|
|
+__visible struct {
|
|
unsigned long offset;
|
|
unsigned short segment;
|
|
} apm_bios_entry;
|
|
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
|
|
index ac057583282a..a18154454e36 100644
|
|
--- a/arch/x86/kernel/cpu/perf_event_intel.c
|
|
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
|
|
@@ -2241,13 +2241,16 @@ __init int intel_pmu_init(void)
|
|
* counter, so do not extend mask to generic counters
|
|
*/
|
|
for_each_event_constraint(c, x86_pmu.event_constraints) {
|
|
- if (c->cmask != X86_RAW_EVENT_MASK
|
|
- || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
|
|
+ if (c->cmask == X86_RAW_EVENT_MASK
|
|
+ && c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
|
|
+ c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
|
|
continue;
|
|
}
|
|
|
|
- c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
|
|
- c->weight += x86_pmu.num_counters;
|
|
+ c->idxmsk64 &=
|
|
+ ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
|
|
+ c->weight = hweight64(c->idxmsk64);
|
|
+
|
|
}
|
|
}
|
|
|
|
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
|
|
index 0c6c07cea3f7..766aa3bf1798 100644
|
|
--- a/arch/x86/kernel/kprobes/core.c
|
|
+++ b/arch/x86/kernel/kprobes/core.c
|
|
@@ -908,7 +908,19 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
|
* normal page fault.
|
|
*/
|
|
regs->ip = (unsigned long)cur->addr;
|
|
+ /*
|
|
+ * Trap flag (TF) has been set here because this fault
|
|
+ * happened where the single stepping will be done.
|
|
+ * So clear it by resetting the current kprobe:
|
|
+ */
|
|
+ regs->flags &= ~X86_EFLAGS_TF;
|
|
+
|
|
+ /*
|
|
+ * If the TF flag was set before the kprobe hit,
|
|
+ * don't touch it:
|
|
+ */
|
|
regs->flags |= kcb->kprobe_old_flags;
|
|
+
|
|
if (kcb->kprobe_status == KPROBE_REENTER)
|
|
restore_previous_kprobe(kcb);
|
|
else
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index 3c0b085b4336..8e57771d4bfd 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -2966,6 +2966,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
|
|
if (dbgregs->flags)
|
|
return -EINVAL;
|
|
|
|
+ if (dbgregs->dr6 & ~0xffffffffull)
|
|
+ return -EINVAL;
|
|
+ if (dbgregs->dr7 & ~0xffffffffull)
|
|
+ return -EINVAL;
|
|
+
|
|
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
|
|
vcpu->arch.dr6 = dbgregs->dr6;
|
|
vcpu->arch.dr7 = dbgregs->dr7;
|
|
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
|
|
index 282375f13c7e..c26b610a604d 100644
|
|
--- a/arch/x86/mm/tlb.c
|
|
+++ b/arch/x86/mm/tlb.c
|
|
@@ -149,7 +149,9 @@ void flush_tlb_current_task(void)
|
|
|
|
preempt_disable();
|
|
|
|
+ /* This is an implicit full barrier that synchronizes with switch_mm. */
|
|
local_flush_tlb();
|
|
+
|
|
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
|
|
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
|
|
preempt_enable();
|
|
@@ -188,11 +190,19 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
unsigned act_entries, tlb_entries = 0;
|
|
|
|
preempt_disable();
|
|
- if (current->active_mm != mm)
|
|
+ if (current->active_mm != mm) {
|
|
+ /* Synchronize with switch_mm. */
|
|
+ smp_mb();
|
|
+
|
|
goto flush_all;
|
|
+ }
|
|
|
|
if (!current->mm) {
|
|
leave_mm(smp_processor_id());
|
|
+
|
|
+ /* Synchronize with switch_mm. */
|
|
+ smp_mb();
|
|
+
|
|
goto flush_all;
|
|
}
|
|
|
|
@@ -242,10 +252,18 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
|
|
preempt_disable();
|
|
|
|
if (current->active_mm == mm) {
|
|
- if (current->mm)
|
|
+ if (current->mm) {
|
|
+ /*
|
|
+ * Implicit full barrier (INVLPG) that synchronizes
|
|
+ * with switch_mm.
|
|
+ */
|
|
__flush_tlb_one(start);
|
|
- else
|
|
+ } else {
|
|
leave_mm(smp_processor_id());
|
|
+
|
|
+ /* Synchronize with switch_mm. */
|
|
+ smp_mb();
|
|
+ }
|
|
}
|
|
|
|
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
|
|
diff --git a/block/genhd.c b/block/genhd.c
|
|
index b09f5fc94dee..7af2f6a18d9b 100644
|
|
--- a/block/genhd.c
|
|
+++ b/block/genhd.c
|
|
@@ -829,6 +829,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
|
|
if (iter) {
|
|
class_dev_iter_exit(iter);
|
|
kfree(iter);
|
|
+ seqf->private = NULL;
|
|
}
|
|
}
|
|
|
|
diff --git a/crypto/gcm.c b/crypto/gcm.c
|
|
index cd97cdd8cabe..451e420ce56c 100644
|
|
--- a/crypto/gcm.c
|
|
+++ b/crypto/gcm.c
|
|
@@ -716,7 +716,9 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
|
|
|
|
ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
|
|
CRYPTO_ALG_TYPE_HASH,
|
|
- CRYPTO_ALG_TYPE_AHASH_MASK);
|
|
+ CRYPTO_ALG_TYPE_AHASH_MASK |
|
|
+ crypto_requires_sync(algt->type,
|
|
+ algt->mask));
|
|
if (IS_ERR(ghash_alg))
|
|
return ERR_CAST(ghash_alg);
|
|
|
|
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
|
|
index 7281b8a93ad3..79cbbbfffffc 100644
|
|
--- a/crypto/scatterwalk.c
|
|
+++ b/crypto/scatterwalk.c
|
|
@@ -68,7 +68,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
|
|
|
|
void scatterwalk_done(struct scatter_walk *walk, int out, int more)
|
|
{
|
|
- if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
|
|
+ if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
|
|
+ !(walk->offset & (PAGE_SIZE - 1)))
|
|
scatterwalk_pagedone(walk, out, more);
|
|
}
|
|
EXPORT_SYMBOL_GPL(scatterwalk_done);
|
|
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
|
|
index a02a91cd1de4..c5e3dd93865a 100644
|
|
--- a/drivers/acpi/pci_root.c
|
|
+++ b/drivers/acpi/pci_root.c
|
|
@@ -385,6 +385,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
|
int result;
|
|
struct acpi_pci_root *root;
|
|
u32 flags, base_flags;
|
|
+ bool no_aspm = false, clear_aspm = false;
|
|
|
|
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
|
|
if (!root)
|
|
@@ -445,31 +446,10 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
|
flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
|
|
acpi_pci_osc_support(root, flags);
|
|
|
|
- /*
|
|
- * TBD: Need PCI interface for enumeration/configuration of roots.
|
|
- */
|
|
-
|
|
mutex_lock(&acpi_pci_root_lock);
|
|
list_add_tail(&root->node, &acpi_pci_roots);
|
|
mutex_unlock(&acpi_pci_root_lock);
|
|
|
|
- /*
|
|
- * Scan the Root Bridge
|
|
- * --------------------
|
|
- * Must do this prior to any attempt to bind the root device, as the
|
|
- * PCI namespace does not get created until this call is made (and
|
|
- * thus the root bridge's pci_dev does not exist).
|
|
- */
|
|
- root->bus = pci_acpi_scan_root(root);
|
|
- if (!root->bus) {
|
|
- printk(KERN_ERR PREFIX
|
|
- "Bus %04x:%02x not present in PCI namespace\n",
|
|
- root->segment, (unsigned int)root->secondary.start);
|
|
- result = -ENODEV;
|
|
- goto out_del_root;
|
|
- }
|
|
-
|
|
- /* Indicate support for various _OSC capabilities. */
|
|
if (pci_ext_cfg_avail())
|
|
flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
|
|
if (pcie_aspm_support_enabled()) {
|
|
@@ -483,7 +463,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
|
if (ACPI_FAILURE(status)) {
|
|
dev_info(&device->dev, "ACPI _OSC support "
|
|
"notification failed, disabling PCIe ASPM\n");
|
|
- pcie_no_aspm();
|
|
+ no_aspm = true;
|
|
flags = base_flags;
|
|
}
|
|
}
|
|
@@ -515,7 +495,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
|
* We have ASPM control, but the FADT indicates
|
|
* that it's unsupported. Clear it.
|
|
*/
|
|
- pcie_clear_aspm(root->bus);
|
|
+ clear_aspm = true;
|
|
}
|
|
} else {
|
|
dev_info(&device->dev,
|
|
@@ -524,7 +504,14 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
|
acpi_format_exception(status), flags);
|
|
pr_info("ACPI _OSC control for PCIe not granted, "
|
|
"disabling ASPM\n");
|
|
- pcie_no_aspm();
|
|
+ /*
|
|
+ * We want to disable ASPM here, but aspm_disabled
|
|
+ * needs to remain in its state from boot so that we
|
|
+ * properly handle PCIe 1.1 devices. So we set this
|
|
+ * flag here, to defer the action until after the ACPI
|
|
+ * root scan.
|
|
+ */
|
|
+ no_aspm = true;
|
|
}
|
|
} else {
|
|
dev_info(&device->dev,
|
|
@@ -532,6 +519,33 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
|
"(_OSC support mask: 0x%02x)\n", flags);
|
|
}
|
|
|
|
+ /*
|
|
+ * TBD: Need PCI interface for enumeration/configuration of roots.
|
|
+ */
|
|
+
|
|
+ /*
|
|
+ * Scan the Root Bridge
|
|
+ * --------------------
|
|
+ * Must do this prior to any attempt to bind the root device, as the
|
|
+ * PCI namespace does not get created until this call is made (and
|
|
+ * thus the root bridge's pci_dev does not exist).
|
|
+ */
|
|
+ root->bus = pci_acpi_scan_root(root);
|
|
+ if (!root->bus) {
|
|
+ dev_err(&device->dev,
|
|
+ "Bus %04x:%02x not present in PCI namespace\n",
|
|
+ root->segment, (unsigned int)root->secondary.start);
|
|
+ result = -ENODEV;
|
|
+ goto end;
|
|
+ }
|
|
+
|
|
+ if (clear_aspm) {
|
|
+ dev_info(&device->dev, "Disabling ASPM (FADT indicates it is unsupported)\n");
|
|
+ pcie_clear_aspm(root->bus);
|
|
+ }
|
|
+ if (no_aspm)
|
|
+ pcie_no_aspm();
|
|
+
|
|
pci_acpi_add_bus_pm_notifier(device, root->bus);
|
|
if (device->wakeup.flags.run_wake)
|
|
device_set_run_wake(root->bus->bridge, true);
|
|
@@ -548,11 +562,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
|
pci_bus_add_devices(root->bus);
|
|
return 1;
|
|
|
|
-out_del_root:
|
|
- mutex_lock(&acpi_pci_root_lock);
|
|
- list_del(&root->node);
|
|
- mutex_unlock(&acpi_pci_root_lock);
|
|
-
|
|
end:
|
|
kfree(root);
|
|
return result;
|
|
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
|
|
index 063036d876b0..126eb86f239f 100644
|
|
--- a/drivers/ata/libata-eh.c
|
|
+++ b/drivers/ata/libata-eh.c
|
|
@@ -604,7 +604,7 @@ void ata_scsi_error(struct Scsi_Host *host)
|
|
ata_scsi_port_error_handler(host, ap);
|
|
|
|
/* finish or retry handled scmd's and clean up */
|
|
- WARN_ON(host->host_failed || !list_empty(&eh_work_q));
|
|
+ WARN_ON(!list_empty(&eh_work_q));
|
|
|
|
DPRINTK("EXIT\n");
|
|
}
|
|
diff --git a/drivers/base/module.c b/drivers/base/module.c
|
|
index db930d3ee312..2a215780eda2 100644
|
|
--- a/drivers/base/module.c
|
|
+++ b/drivers/base/module.c
|
|
@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
|
|
|
|
static void module_create_drivers_dir(struct module_kobject *mk)
|
|
{
|
|
- if (!mk || mk->drivers_dir)
|
|
- return;
|
|
+ static DEFINE_MUTEX(drivers_dir_mutex);
|
|
|
|
- mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
|
|
+ mutex_lock(&drivers_dir_mutex);
|
|
+ if (mk && !mk->drivers_dir)
|
|
+ mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
|
|
+ mutex_unlock(&drivers_dir_mutex);
|
|
}
|
|
|
|
void module_add_driver(struct module *mod, struct device_driver *drv)
|
|
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
|
|
index 6789c1653913..cde4a6e0fab0 100644
|
|
--- a/drivers/crypto/ux500/hash/hash_core.c
|
|
+++ b/drivers/crypto/ux500/hash/hash_core.c
|
|
@@ -806,7 +806,7 @@ int hash_process_data(
|
|
&device_data->state);
|
|
memmove(req_ctx->state.buffer,
|
|
device_data->state.buffer,
|
|
- HASH_BLOCK_SIZE / sizeof(u32));
|
|
+ HASH_BLOCK_SIZE);
|
|
if (ret) {
|
|
dev_err(device_data->dev, "[%s] "
|
|
"hash_resume_state()"
|
|
@@ -858,7 +858,7 @@ int hash_process_data(
|
|
|
|
memmove(device_data->state.buffer,
|
|
req_ctx->state.buffer,
|
|
- HASH_BLOCK_SIZE / sizeof(u32));
|
|
+ HASH_BLOCK_SIZE);
|
|
if (ret) {
|
|
dev_err(device_data->dev, "[%s] "
|
|
"hash_save_state()"
|
|
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
|
|
index 426c51dd420c..ac11e455aea5 100644
|
|
--- a/drivers/gpio/gpio-pca953x.c
|
|
+++ b/drivers/gpio/gpio-pca953x.c
|
|
@@ -75,7 +75,7 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id);
|
|
#define MAX_BANK 5
|
|
#define BANK_SZ 8
|
|
|
|
-#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
|
|
+#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
|
|
|
|
struct pca953x_chip {
|
|
unsigned gpio_start;
|
|
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
|
|
index b78cbe74dadf..93b74107d20d 100644
|
|
--- a/drivers/gpu/drm/drm_fb_helper.c
|
|
+++ b/drivers/gpu/drm/drm_fb_helper.c
|
|
@@ -1313,7 +1313,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
|
|
int n, int width, int height)
|
|
{
|
|
int c, o;
|
|
- struct drm_device *dev = fb_helper->dev;
|
|
struct drm_connector *connector;
|
|
struct drm_connector_helper_funcs *connector_funcs;
|
|
struct drm_encoder *encoder;
|
|
@@ -1334,7 +1333,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
|
|
if (modes[n] == NULL)
|
|
return best_score;
|
|
|
|
- crtcs = kzalloc(dev->mode_config.num_connector *
|
|
+ crtcs = kzalloc(fb_helper->connector_count *
|
|
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
|
|
if (!crtcs)
|
|
return best_score;
|
|
@@ -1381,7 +1380,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
|
|
best_crtc = crtc;
|
|
best_score = score;
|
|
memcpy(best_crtcs, crtcs,
|
|
- dev->mode_config.num_connector *
|
|
+ fb_helper->connector_count *
|
|
sizeof(struct drm_fb_helper_crtc *));
|
|
}
|
|
}
|
|
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
|
|
index 489ffd2c66e5..a3d37e4a84ae 100644
|
|
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
|
|
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
|
|
@@ -85,7 +85,7 @@ static const char *const dsi_errors[] = {
|
|
"RX Prot Violation",
|
|
"HS Generic Write FIFO Full",
|
|
"LP Generic Write FIFO Full",
|
|
- "Generic Read Data Avail"
|
|
+ "Generic Read Data Avail",
|
|
"Special Packet Sent",
|
|
"Tearing Effect",
|
|
};
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
|
|
index f3cce23f4a62..f4b9b1c0cae8 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
|
|
@@ -1144,7 +1144,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
|
|
le16_to_cpu(firmware_info->info.usReferenceClock);
|
|
p1pll->reference_div = 0;
|
|
|
|
- if (crev < 2)
|
|
+ if ((frev < 2) && (crev < 2))
|
|
p1pll->pll_out_min =
|
|
le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
|
|
else
|
|
@@ -1153,7 +1153,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
|
|
p1pll->pll_out_max =
|
|
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
|
|
|
|
- if (crev >= 4) {
|
|
+ if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
|
|
p1pll->lcd_pll_out_min =
|
|
le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
|
|
if (p1pll->lcd_pll_out_min == 0)
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
|
|
index 8c44ef57864b..a7e1893de838 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
|
|
@@ -11,6 +11,7 @@
|
|
#include <acpi/acpi.h>
|
|
#include <acpi/acpi_bus.h>
|
|
#include <linux/pci.h>
|
|
+#include <linux/delay.h>
|
|
|
|
#include "radeon_acpi.h"
|
|
|
|
@@ -252,6 +253,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
|
|
if (!info)
|
|
return -EIO;
|
|
kfree(info);
|
|
+
|
|
+ /* 200ms delay is required after off */
|
|
+ if (state == 0)
|
|
+ msleep(200);
|
|
}
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
|
|
index 1fbd38b371d4..ea62810aeda6 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
|
|
@@ -1691,7 +1691,6 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|
1);
|
|
/* no HPD on analog connectors */
|
|
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
|
|
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
|
connector->interlace_allowed = true;
|
|
connector->doublescan_allowed = true;
|
|
break;
|
|
@@ -1889,8 +1888,10 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|
}
|
|
|
|
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
|
|
- if (i2c_bus->valid)
|
|
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
|
+ if (i2c_bus->valid) {
|
|
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
|
|
+ DRM_CONNECTOR_POLL_DISCONNECT;
|
|
+ }
|
|
} else
|
|
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
|
|
|
@@ -1962,7 +1963,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
|
|
1);
|
|
/* no HPD on analog connectors */
|
|
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
|
|
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
|
connector->interlace_allowed = true;
|
|
connector->doublescan_allowed = true;
|
|
break;
|
|
@@ -2047,10 +2047,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
|
|
}
|
|
|
|
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
|
|
- if (i2c_bus->valid)
|
|
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
|
+ if (i2c_bus->valid) {
|
|
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
|
|
+ DRM_CONNECTOR_POLL_DISCONNECT;
|
|
+ }
|
|
} else
|
|
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
|
+
|
|
connector->display_info.subpixel_order = subpixel_order;
|
|
drm_sysfs_connector_add(connector);
|
|
}
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
|
|
index 8df1525f71d2..e9db3f8125ed 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_device.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_device.c
|
|
@@ -449,6 +449,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
|
|
/*
|
|
* GPU helpers function.
|
|
*/
|
|
+
|
|
+/**
|
|
+ * radeon_device_is_virtual - check if we are running is a virtual environment
|
|
+ *
|
|
+ * Check if the asic has been passed through to a VM (all asics).
|
|
+ * Used at driver startup.
|
|
+ * Returns true if virtual or false if not.
|
|
+ */
|
|
+static bool radeon_device_is_virtual(void)
|
|
+{
|
|
+#ifdef CONFIG_X86
|
|
+ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
|
|
+#else
|
|
+ return false;
|
|
+#endif
|
|
+}
|
|
+
|
|
/**
|
|
* radeon_card_posted - check if the hw has already been initialized
|
|
*
|
|
@@ -462,6 +479,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
|
|
{
|
|
uint32_t reg;
|
|
|
|
+ /* for pass through, always force asic_init */
|
|
+ if (radeon_device_is_virtual())
|
|
+ return false;
|
|
+
|
|
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
|
|
if (efi_enabled(EFI_BOOT) &&
|
|
(rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
|
|
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
|
|
index a3915d12e746..eb5700e40e1a 100644
|
|
--- a/drivers/hid/hid-input.c
|
|
+++ b/drivers/hid/hid-input.c
|
|
@@ -1084,7 +1084,7 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
|
|
return;
|
|
|
|
/* report the usage code as scancode if the key status has changed */
|
|
- if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
|
|
+ if (usage->type == EV_KEY && (!!test_bit(usage->code, input->key)) != value)
|
|
input_event(input, EV_MSC, MSC_SCAN, usage->hid);
|
|
|
|
input_event(input, usage->type, usage->code, value);
|
|
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
|
|
index 2f1ddca6f2e0..700145b15088 100644
|
|
--- a/drivers/hid/usbhid/hiddev.c
|
|
+++ b/drivers/hid/usbhid/hiddev.c
|
|
@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
|
|
goto inval;
|
|
} else if (uref->usage_index >= field->report_count)
|
|
goto inval;
|
|
-
|
|
- else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
|
|
- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
|
|
- uref->usage_index + uref_multi->num_values > field->report_count))
|
|
- goto inval;
|
|
}
|
|
|
|
+ if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
|
|
+ (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
|
|
+ uref->usage_index + uref_multi->num_values > field->report_count))
|
|
+ goto inval;
|
|
+
|
|
switch (cmd) {
|
|
case HIDIOCGUSAGE:
|
|
uref->value = field->value[uref->usage_index];
|
|
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
|
|
index 7c9a1d97dc68..a22c427454db 100644
|
|
--- a/drivers/iio/accel/kxsd9.c
|
|
+++ b/drivers/iio/accel/kxsd9.c
|
|
@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
|
|
|
|
mutex_lock(&st->buf_lock);
|
|
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
|
|
- if (ret)
|
|
+ if (ret < 0)
|
|
goto error_ret;
|
|
st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
|
|
st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
|
|
@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
|
|
break;
|
|
case IIO_CHAN_INFO_SCALE:
|
|
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
|
|
- if (ret)
|
|
+ if (ret < 0)
|
|
goto error_ret;
|
|
*val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
|
|
ret = IIO_VAL_INT_PLUS_MICRO;
|
|
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
|
|
index c2744a75c3b0..6569a4e2a436 100644
|
|
--- a/drivers/iio/adc/ad7266.c
|
|
+++ b/drivers/iio/adc/ad7266.c
|
|
@@ -406,7 +406,7 @@ static int ad7266_probe(struct spi_device *spi)
|
|
st = iio_priv(indio_dev);
|
|
|
|
st->reg = regulator_get(&spi->dev, "vref");
|
|
- if (!IS_ERR_OR_NULL(st->reg)) {
|
|
+ if (!IS_ERR(st->reg)) {
|
|
ret = regulator_enable(st->reg);
|
|
if (ret)
|
|
goto error_put_reg;
|
|
@@ -417,6 +417,10 @@ static int ad7266_probe(struct spi_device *spi)
|
|
|
|
st->vref_uv = ret;
|
|
} else {
|
|
+ /* Any other error indicates that the regulator does exist */
|
|
+ if (PTR_ERR(st->reg) != -ENODEV)
|
|
+ return PTR_ERR(st->reg);
|
|
+
|
|
/* Use internal reference */
|
|
st->vref_uv = 2500000;
|
|
}
|
|
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
|
|
index 4d6c7d84e155..301becccf5ed 100644
|
|
--- a/drivers/iio/industrialio-trigger.c
|
|
+++ b/drivers/iio/industrialio-trigger.c
|
|
@@ -203,22 +203,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
|
|
|
|
/* Prevent the module from being removed whilst attached to a trigger */
|
|
__module_get(pf->indio_dev->info->driver_module);
|
|
+
|
|
+ /* Get irq number */
|
|
pf->irq = iio_trigger_get_irq(trig);
|
|
+ if (pf->irq < 0)
|
|
+ goto out_put_module;
|
|
+
|
|
+ /* Request irq */
|
|
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
|
|
pf->type, pf->name,
|
|
pf);
|
|
- if (ret < 0) {
|
|
- module_put(pf->indio_dev->info->driver_module);
|
|
- return ret;
|
|
- }
|
|
+ if (ret < 0)
|
|
+ goto out_put_irq;
|
|
|
|
+ /* Enable trigger in driver */
|
|
if (trig->ops && trig->ops->set_trigger_state && notinuse) {
|
|
ret = trig->ops->set_trigger_state(trig, true);
|
|
if (ret < 0)
|
|
- module_put(pf->indio_dev->info->driver_module);
|
|
+ goto out_free_irq;
|
|
}
|
|
|
|
return ret;
|
|
+
|
|
+out_free_irq:
|
|
+ free_irq(pf->irq, pf);
|
|
+out_put_irq:
|
|
+ iio_trigger_put_irq(trig, pf->irq);
|
|
+out_put_module:
|
|
+ module_put(pf->indio_dev->info->driver_module);
|
|
+ return ret;
|
|
}
|
|
|
|
static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
|
|
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
|
|
index f2f63933e8a9..5befec118a18 100644
|
|
--- a/drivers/infiniband/core/ucm.c
|
|
+++ b/drivers/infiniband/core/ucm.c
|
|
@@ -48,6 +48,7 @@
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
+#include <rdma/ib.h>
|
|
#include <rdma/ib_cm.h>
|
|
#include <rdma/ib_user_cm.h>
|
|
#include <rdma/ib_marshall.h>
|
|
@@ -1104,6 +1105,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
|
|
struct ib_ucm_cmd_hdr hdr;
|
|
ssize_t result;
|
|
|
|
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
|
|
+ return -EACCES;
|
|
+
|
|
if (len < sizeof(hdr))
|
|
return -EINVAL;
|
|
|
|
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
|
|
index 5ca44cd9b00c..99f1c170770f 100644
|
|
--- a/drivers/infiniband/core/ucma.c
|
|
+++ b/drivers/infiniband/core/ucma.c
|
|
@@ -43,6 +43,7 @@
|
|
#include <linux/sysctl.h>
|
|
#include <linux/module.h>
|
|
|
|
+#include <rdma/ib.h>
|
|
#include <rdma/rdma_user_cm.h>
|
|
#include <rdma/ib_marshall.h>
|
|
#include <rdma/rdma_cm.h>
|
|
@@ -1249,6 +1250,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
|
|
struct rdma_ucm_cmd_hdr hdr;
|
|
ssize_t ret;
|
|
|
|
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
|
|
+ return -EACCES;
|
|
+
|
|
if (len < sizeof(hdr))
|
|
return -EINVAL;
|
|
|
|
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
|
|
index b6062b9236a2..f50623d07a75 100644
|
|
--- a/drivers/infiniband/core/uverbs_main.c
|
|
+++ b/drivers/infiniband/core/uverbs_main.c
|
|
@@ -48,6 +48,8 @@
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
+#include <rdma/ib.h>
|
|
+
|
|
#include "uverbs.h"
|
|
|
|
MODULE_AUTHOR("Roland Dreier");
|
|
@@ -588,6 +590,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
|
struct ib_uverbs_file *file = filp->private_data;
|
|
struct ib_uverbs_cmd_hdr hdr;
|
|
|
|
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
|
|
+ return -EACCES;
|
|
+
|
|
if (count < sizeof hdr)
|
|
return -EINVAL;
|
|
|
|
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
|
|
index 890c23b3d714..f55d69500a5f 100644
|
|
--- a/drivers/infiniband/hw/mlx4/ah.c
|
|
+++ b/drivers/infiniband/hw/mlx4/ah.c
|
|
@@ -65,6 +65,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
|
|
|
|
ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
|
|
ah->av.ib.g_slid = ah_attr->src_path_bits;
|
|
+ ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
|
|
if (ah_attr->ah_flags & IB_AH_GRH) {
|
|
ah->av.ib.g_slid |= 0x80;
|
|
ah->av.ib.gid_index = ah_attr->grh.sgid_index;
|
|
@@ -82,7 +83,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
|
|
!(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
|
|
--ah->av.ib.stat_rate;
|
|
}
|
|
- ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
|
|
|
|
return &ah->ibah;
|
|
}
|
|
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
|
|
index 262a18437ceb..1fe3bdb0da14 100644
|
|
--- a/drivers/infiniband/hw/mlx4/qp.c
|
|
+++ b/drivers/infiniband/hw/mlx4/qp.c
|
|
@@ -346,7 +346,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
|
|
sizeof (struct mlx4_wqe_raddr_seg);
|
|
case MLX4_IB_QPT_RC:
|
|
return sizeof (struct mlx4_wqe_ctrl_seg) +
|
|
- sizeof (struct mlx4_wqe_atomic_seg) +
|
|
+ sizeof (struct mlx4_wqe_masked_atomic_seg) +
|
|
sizeof (struct mlx4_wqe_raddr_seg);
|
|
case MLX4_IB_QPT_SMI:
|
|
case MLX4_IB_QPT_GSI:
|
|
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
|
|
index b56c9428f3c5..8cb29b36c82a 100644
|
|
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
|
|
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
|
|
@@ -45,6 +45,8 @@
|
|
#include <linux/delay.h>
|
|
#include <linux/export.h>
|
|
|
|
+#include <rdma/ib.h>
|
|
+
|
|
#include "qib.h"
|
|
#include "qib_common.h"
|
|
#include "qib_user_sdma.h"
|
|
@@ -1977,6 +1979,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
|
|
ssize_t ret = 0;
|
|
void *dest;
|
|
|
|
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
|
|
+ return -EACCES;
|
|
+
|
|
if (count < sizeof(cmd.type)) {
|
|
ret = -EINVAL;
|
|
goto bail;
|
|
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
|
|
index b6e049a3c7a8..a481094af85f 100644
|
|
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
|
|
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
|
|
@@ -887,7 +887,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
|
|
neigh = NULL;
|
|
goto out_unlock;
|
|
}
|
|
- neigh->alive = jiffies;
|
|
+
|
|
+ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
|
|
+ neigh->alive = jiffies;
|
|
goto out_unlock;
|
|
}
|
|
}
|
|
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
|
|
index 856c1b03e22d..685e125d6366 100644
|
|
--- a/drivers/input/joystick/xpad.c
|
|
+++ b/drivers/input/joystick/xpad.c
|
|
@@ -843,6 +843,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
|
|
struct usb_endpoint_descriptor *ep_irq_in;
|
|
int i, error;
|
|
|
|
+ if (intf->cur_altsetting->desc.bNumEndpoints != 2)
|
|
+ return -ENODEV;
|
|
+
|
|
for (i = 0; xpad_device[i].idVendor; i++) {
|
|
if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
|
|
(le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
|
|
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
|
|
index a0a4bbaef02c..3f2f3ac96a55 100644
|
|
--- a/drivers/input/misc/uinput.c
|
|
+++ b/drivers/input/misc/uinput.c
|
|
@@ -835,9 +835,15 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
}
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
+
|
|
+#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
|
|
+
|
|
static long uinput_compat_ioctl(struct file *file,
|
|
unsigned int cmd, unsigned long arg)
|
|
{
|
|
+ if (cmd == UI_SET_PHYS_COMPAT)
|
|
+ cmd = UI_SET_PHYS;
|
|
+
|
|
return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
|
|
}
|
|
#endif
|
|
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
|
|
index 9a83be6b6584..abba11220f29 100644
|
|
--- a/drivers/input/touchscreen/wacom_w8001.c
|
|
+++ b/drivers/input/touchscreen/wacom_w8001.c
|
|
@@ -28,7 +28,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>");
|
|
MODULE_DESCRIPTION(DRIVER_DESC);
|
|
MODULE_LICENSE("GPL");
|
|
|
|
-#define W8001_MAX_LENGTH 11
|
|
+#define W8001_MAX_LENGTH 13
|
|
#define W8001_LEAD_MASK 0x80
|
|
#define W8001_LEAD_BYTE 0x80
|
|
#define W8001_TAB_MASK 0x40
|
|
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
|
|
index a7e4939787c9..eab9167937e2 100644
|
|
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
|
|
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
|
|
@@ -2295,8 +2295,8 @@ _hfcpci_softirq(struct device *dev, void *arg)
|
|
static void
|
|
hfcpci_softirq(void *arg)
|
|
{
|
|
- (void) driver_for_each_device(&hfc_driver.driver, NULL, arg,
|
|
- _hfcpci_softirq);
|
|
+ WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg,
|
|
+ _hfcpci_softirq) != 0);
|
|
|
|
/* if next event would be in the past ... */
|
|
if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
|
|
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
|
|
index 7fcf21cb4ff8..a9a47cd029d5 100644
|
|
--- a/drivers/md/dm-flakey.c
|
|
+++ b/drivers/md/dm-flakey.c
|
|
@@ -286,10 +286,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
|
|
pb->bio_submitted = true;
|
|
|
|
/*
|
|
- * Map reads as normal.
|
|
+ * Map reads as normal only if corrupt_bio_byte set.
|
|
*/
|
|
- if (bio_data_dir(bio) == READ)
|
|
- goto map_bio;
|
|
+ if (bio_data_dir(bio) == READ) {
|
|
+ /* If flags were specified, only corrupt those that match. */
|
|
+ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
|
|
+ all_corrupt_bio_flags_match(bio, fc))
|
|
+ goto map_bio;
|
|
+ else
|
|
+ return -EIO;
|
|
+ }
|
|
|
|
/*
|
|
* Drop writes?
|
|
@@ -327,12 +333,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
|
|
|
|
/*
|
|
* Corrupt successful READs while in down state.
|
|
- * If flags were specified, only corrupt those that match.
|
|
*/
|
|
- if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
|
|
- (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
|
|
- all_corrupt_bio_flags_match(bio, fc))
|
|
- corrupt_bio_data(bio, fc);
|
|
+ if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
|
|
+ if (fc->corrupt_bio_byte)
|
|
+ corrupt_bio_data(bio, fc);
|
|
+ else
|
|
+ return -EIO;
|
|
+ }
|
|
|
|
return error;
|
|
}
|
|
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
|
|
index cea175d19890..4ef8a5c7003e 100644
|
|
--- a/drivers/media/dvb-frontends/stb6100.c
|
|
+++ b/drivers/media/dvb-frontends/stb6100.c
|
|
@@ -193,7 +193,7 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st
|
|
.len = len + 1
|
|
};
|
|
|
|
- if (1 + len > sizeof(buf)) {
|
|
+ if (1 + len > sizeof(cmdbuf)) {
|
|
printk(KERN_WARNING
|
|
"%s: i2c wr: len=%d is too big!\n",
|
|
KBUILD_MODNAME, len);
|
|
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
|
|
index 961d7ff75427..eb92027cef92 100644
|
|
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
|
|
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
|
|
@@ -1000,6 +1000,11 @@ static int match_child(struct device *dev, void *data)
|
|
return !strcmp(dev_name(dev), (char *)data);
|
|
}
|
|
|
|
+static void s5p_mfc_memdev_release(struct device *dev)
|
|
+{
|
|
+ dma_release_declared_memory(dev);
|
|
+}
|
|
+
|
|
static void *mfc_get_drv_data(struct platform_device *pdev);
|
|
|
|
static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
|
|
@@ -1012,6 +1017,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
|
|
mfc_err("Not enough memory\n");
|
|
return -ENOMEM;
|
|
}
|
|
+
|
|
+ dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l");
|
|
+ dev->mem_dev_l->release = s5p_mfc_memdev_release;
|
|
device_initialize(dev->mem_dev_l);
|
|
of_property_read_u32_array(dev->plat_dev->dev.of_node,
|
|
"samsung,mfc-l", mem_info, 2);
|
|
@@ -1029,6 +1037,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
|
|
mfc_err("Not enough memory\n");
|
|
return -ENOMEM;
|
|
}
|
|
+
|
|
+ dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r");
|
|
+ dev->mem_dev_r->release = s5p_mfc_memdev_release;
|
|
device_initialize(dev->mem_dev_r);
|
|
of_property_read_u32_array(dev->plat_dev->dev.of_node,
|
|
"samsung,mfc-r", mem_info, 2);
|
|
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
|
|
index c6bf23599eb9..a2863b7b9e21 100644
|
|
--- a/drivers/mmc/card/block.c
|
|
+++ b/drivers/mmc/card/block.c
|
|
@@ -1582,8 +1582,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
|
|
|
|
packed_cmd_hdr = packed->cmd_hdr;
|
|
memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
|
|
- packed_cmd_hdr[0] = (packed->nr_entries << 16) |
|
|
- (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
|
|
+ packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
|
|
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
|
|
hdr_blocks = mmc_large_sector(card) ? 8 : 1;
|
|
|
|
/*
|
|
@@ -1597,14 +1597,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
|
|
((brq->data.blocks * brq->data.blksz) >=
|
|
card->ext_csd.data_tag_unit_size);
|
|
/* Argument of CMD23 */
|
|
- packed_cmd_hdr[(i * 2)] =
|
|
+ packed_cmd_hdr[(i * 2)] = cpu_to_le32(
|
|
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
|
|
(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
|
|
- blk_rq_sectors(prq);
|
|
+ blk_rq_sectors(prq));
|
|
/* Argument of CMD18 or CMD25 */
|
|
- packed_cmd_hdr[((i * 2)) + 1] =
|
|
+ packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
|
|
mmc_card_blockaddr(card) ?
|
|
- blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
|
|
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
|
|
packed->blocks += blk_rq_sectors(prq);
|
|
i++;
|
|
}
|
|
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
|
|
index a56133585e92..03331c173bd0 100644
|
|
--- a/drivers/mtd/ubi/build.c
|
|
+++ b/drivers/mtd/ubi/build.c
|
|
@@ -997,6 +997,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
|
goto out_detach;
|
|
}
|
|
|
|
+ /* Make device "available" before it becomes accessible via sysfs */
|
|
+ ubi_devices[ubi_num] = ubi;
|
|
+
|
|
err = uif_init(ubi, &ref);
|
|
if (err)
|
|
goto out_detach;
|
|
@@ -1041,7 +1044,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
|
wake_up_process(ubi->bgt_thread);
|
|
spin_unlock(&ubi->wl_lock);
|
|
|
|
- ubi_devices[ubi_num] = ubi;
|
|
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
|
|
return ubi_num;
|
|
|
|
@@ -1052,6 +1054,7 @@ out_uif:
|
|
ubi_assert(ref);
|
|
uif_close(ubi);
|
|
out_detach:
|
|
+ ubi_devices[ubi_num] = NULL;
|
|
ubi_wl_close(ubi);
|
|
ubi_free_internal_volumes(ubi);
|
|
vfree(ubi->vtbl);
|
|
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
|
|
index 8330703c098f..96131eb34c9f 100644
|
|
--- a/drivers/mtd/ubi/vmt.c
|
|
+++ b/drivers/mtd/ubi/vmt.c
|
|
@@ -534,13 +534,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
|
|
spin_unlock(&ubi->volumes_lock);
|
|
}
|
|
|
|
- /* Change volume table record */
|
|
- vtbl_rec = ubi->vtbl[vol_id];
|
|
- vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
|
|
- err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
|
|
- if (err)
|
|
- goto out_acc;
|
|
-
|
|
if (pebs < 0) {
|
|
for (i = 0; i < -pebs; i++) {
|
|
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
|
|
@@ -558,6 +551,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
|
|
spin_unlock(&ubi->volumes_lock);
|
|
}
|
|
|
|
+ /*
|
|
+ * When we shrink a volume we have to flush all pending (erase) work.
|
|
+ * Otherwise it can happen that upon next attach UBI finds a LEB with
|
|
+ * lnum > highest_lnum and refuses to attach.
|
|
+ */
|
|
+ if (pebs < 0) {
|
|
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
|
|
+ if (err)
|
|
+ goto out_acc;
|
|
+ }
|
|
+
|
|
+ /* Change volume table record */
|
|
+ vtbl_rec = ubi->vtbl[vol_id];
|
|
+ vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
|
|
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
|
|
+ if (err)
|
|
+ goto out_acc;
|
|
+
|
|
vol->reserved_pebs = reserved_pebs;
|
|
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
|
|
vol->used_ebs = reserved_pebs;
|
|
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
|
|
index 535d5dd8d816..024078c5fb16 100644
|
|
--- a/drivers/net/can/at91_can.c
|
|
+++ b/drivers/net/can/at91_can.c
|
|
@@ -731,9 +731,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
|
|
|
|
/* upper group completed, look again in lower */
|
|
if (priv->rx_next > get_mb_rx_low_last(priv) &&
|
|
- quota > 0 && mb > get_mb_rx_last(priv)) {
|
|
+ mb > get_mb_rx_last(priv)) {
|
|
priv->rx_next = get_mb_rx_first(priv);
|
|
- goto again;
|
|
+ if (quota > 0)
|
|
+ goto again;
|
|
}
|
|
|
|
return received;
|
|
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
|
|
index f66aeb79abdf..464e5f66b66d 100644
|
|
--- a/drivers/net/can/dev.c
|
|
+++ b/drivers/net/can/dev.c
|
|
@@ -772,6 +772,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
+static void can_dellink(struct net_device *dev, struct list_head *head)
|
|
+{
|
|
+ return;
|
|
+}
|
|
+
|
|
static struct rtnl_link_ops can_link_ops __read_mostly = {
|
|
.kind = "can",
|
|
.maxtype = IFLA_CAN_MAX,
|
|
@@ -779,6 +784,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
|
|
.setup = can_setup,
|
|
.newlink = can_newlink,
|
|
.changelink = can_changelink,
|
|
+ .dellink = can_dellink,
|
|
.get_size = can_get_size,
|
|
.fill_info = can_fill_info,
|
|
.get_xstats_size = can_get_xstats_size,
|
|
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
|
|
index a85a9c2f1385..7357e54f1de9 100644
|
|
--- a/drivers/net/ethernet/atheros/alx/main.c
|
|
+++ b/drivers/net/ethernet/atheros/alx/main.c
|
|
@@ -86,9 +86,14 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
|
|
while (!cur_buf->skb && next != rxq->read_idx) {
|
|
struct alx_rfd *rfd = &rxq->rfd[cur];
|
|
|
|
- skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
|
|
+ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
|
|
if (!skb)
|
|
break;
|
|
+
|
|
+ /* Workround for the HW RX DMA overflow issue */
|
|
+ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
|
|
+ skb_reserve(skb, 64);
|
|
+
|
|
dma = dma_map_single(&alx->hw.pdev->dev,
|
|
skb->data, alx->rxbuf_size,
|
|
DMA_FROM_DEVICE);
|
|
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
|
|
index d5643c143bb8..df3af299a7d2 100644
|
|
--- a/drivers/net/ethernet/marvell/mvneta.c
|
|
+++ b/drivers/net/ethernet/marvell/mvneta.c
|
|
@@ -210,7 +210,7 @@
|
|
/* Various constants */
|
|
|
|
/* Coalescing */
|
|
-#define MVNETA_TXDONE_COAL_PKTS 1
|
|
+#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
|
|
#define MVNETA_RX_COAL_PKTS 32
|
|
#define MVNETA_RX_COAL_USEC 100
|
|
|
|
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
|
|
index 74581cbcafa7..a5802419381f 100644
|
|
--- a/drivers/net/usb/cdc_ncm.c
|
|
+++ b/drivers/net/usb/cdc_ncm.c
|
|
@@ -477,6 +477,13 @@ advance:
|
|
if (cdc_ncm_setup(ctx))
|
|
goto error2;
|
|
|
|
+ /* Some firmwares need a pause here or they will silently fail
|
|
+ * to set up the interface properly. This value was decided
|
|
+ * empirically on a Sierra Wireless MC7455 running 02.08.02.00
|
|
+ * firmware.
|
|
+ */
|
|
+ usleep_range(10000, 20000);
|
|
+
|
|
/* configure data interface */
|
|
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
|
|
if (temp)
|
|
@@ -598,24 +605,13 @@ EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
|
|
|
|
static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
|
|
{
|
|
- int ret;
|
|
-
|
|
/* MBIM backwards compatible function? */
|
|
cdc_ncm_select_altsetting(dev, intf);
|
|
if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
|
|
return -ENODEV;
|
|
|
|
/* NCM data altsetting is always 1 */
|
|
- ret = cdc_ncm_bind_common(dev, intf, 1);
|
|
-
|
|
- /*
|
|
- * We should get an event when network connection is "connected" or
|
|
- * "disconnected". Set network connection in "disconnected" state
|
|
- * (carrier is OFF) during attach, so the IP network stack does not
|
|
- * start IPv6 negotiation and more.
|
|
- */
|
|
- usbnet_link_change(dev, 0, 0);
|
|
- return ret;
|
|
+ return cdc_ncm_bind_common(dev, intf, 1);
|
|
}
|
|
|
|
static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
|
|
@@ -1161,7 +1157,8 @@ static void cdc_ncm_disconnect(struct usb_interface *intf)
|
|
|
|
static const struct driver_info cdc_ncm_info = {
|
|
.description = "CDC NCM",
|
|
- .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
|
|
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
|
|
+ | FLAG_LINK_INTR,
|
|
.bind = cdc_ncm_bind,
|
|
.unbind = cdc_ncm_unbind,
|
|
.check_connect = cdc_ncm_check_connect,
|
|
@@ -1175,7 +1172,7 @@ static const struct driver_info cdc_ncm_info = {
|
|
static const struct driver_info wwan_info = {
|
|
.description = "Mobile Broadband Network Device",
|
|
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
|
|
- | FLAG_WWAN,
|
|
+ | FLAG_LINK_INTR | FLAG_WWAN,
|
|
.bind = cdc_ncm_bind,
|
|
.unbind = cdc_ncm_unbind,
|
|
.check_connect = cdc_ncm_check_connect,
|
|
@@ -1189,7 +1186,7 @@ static const struct driver_info wwan_info = {
|
|
static const struct driver_info wwan_noarp_info = {
|
|
.description = "Mobile Broadband Network Device (NO ARP)",
|
|
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
|
|
- | FLAG_WWAN | FLAG_NOARP,
|
|
+ | FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP,
|
|
.bind = cdc_ncm_bind,
|
|
.unbind = cdc_ncm_unbind,
|
|
.check_connect = cdc_ncm_check_connect,
|
|
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
|
|
index f77ef36acf87..61879b1f7083 100644
|
|
--- a/drivers/net/wireless/ath/ath5k/led.c
|
|
+++ b/drivers/net/wireless/ath/ath5k/led.c
|
|
@@ -77,7 +77,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath5k_led_devices) = {
|
|
/* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
|
|
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
|
|
/* HP Compaq C700 (nitrousnrg@gmail.com) */
|
|
- { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
|
|
+ { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) },
|
|
/* LiteOn AR5BXB63 (magooz@salug.it) */
|
|
{ ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
|
|
/* IBM-specific AR5212 (all others) */
|
|
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
|
|
index cb34c7895f2a..735c26620387 100644
|
|
--- a/drivers/net/wireless/mac80211_hwsim.c
|
|
+++ b/drivers/net/wireless/mac80211_hwsim.c
|
|
@@ -1931,6 +1931,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
|
|
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
|
|
!info->attrs[HWSIM_ATTR_FLAGS] ||
|
|
!info->attrs[HWSIM_ATTR_COOKIE] ||
|
|
+ !info->attrs[HWSIM_ATTR_SIGNAL] ||
|
|
!info->attrs[HWSIM_ATTR_TX_INFO])
|
|
goto out;
|
|
|
|
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
|
|
index 6fc0853fd7f9..d066f74f743a 100644
|
|
--- a/drivers/net/wireless/rtlwifi/base.c
|
|
+++ b/drivers/net/wireless/rtlwifi/base.c
|
|
@@ -1392,9 +1392,9 @@ void rtl_watchdog_wq_callback(void *data)
|
|
if (((rtlpriv->link_info.num_rx_inperiod +
|
|
rtlpriv->link_info.num_tx_inperiod) > 8) ||
|
|
(rtlpriv->link_info.num_rx_inperiod > 2))
|
|
- rtlpriv->enter_ps = true;
|
|
- else
|
|
rtlpriv->enter_ps = false;
|
|
+ else
|
|
+ rtlpriv->enter_ps = true;
|
|
|
|
/* LeisurePS only work in infra mode. */
|
|
schedule_work(&rtlpriv->works.lps_change_work);
|
|
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
|
|
index d332d55885f8..2d7cd0c080d3 100644
|
|
--- a/drivers/pci/probe.c
|
|
+++ b/drivers/pci/probe.c
|
|
@@ -173,9 +173,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
|
|
struct pci_bus_region region;
|
|
bool bar_too_big = false, bar_disabled = false;
|
|
|
|
- if (dev->non_compliant_bars)
|
|
- return 0;
|
|
-
|
|
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
|
|
|
|
/* No printks while decoding is disabled! */
|
|
@@ -295,6 +292,9 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
|
{
|
|
unsigned int pos, reg;
|
|
|
|
+ if (dev->non_compliant_bars)
|
|
+ return;
|
|
+
|
|
for (pos = 0; pos < howmany; pos++) {
|
|
struct resource *res = &dev->resource[pos];
|
|
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
|
|
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
|
|
index d111c8687f9b..46497c6cbcc1 100644
|
|
--- a/drivers/platform/x86/hp-wmi.c
|
|
+++ b/drivers/platform/x86/hp-wmi.c
|
|
@@ -640,6 +640,11 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
|
|
if (err)
|
|
return err;
|
|
|
|
+ err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless,
|
|
+ sizeof(wireless), 0);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
if (wireless & 0x1) {
|
|
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
|
|
RFKILL_TYPE_WLAN,
|
|
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
|
|
index ec8ccdae7aba..0090de46aa5e 100644
|
|
--- a/drivers/s390/net/qeth_l2_main.c
|
|
+++ b/drivers/s390/net/qeth_l2_main.c
|
|
@@ -898,6 +898,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
|
|
qeth_l2_set_offline(cgdev);
|
|
|
|
if (card->dev) {
|
|
+ netif_napi_del(&card->napi);
|
|
unregister_netdev(card->dev);
|
|
card->dev = NULL;
|
|
}
|
|
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
|
|
index c1b0b2761f8d..7366bef742de 100644
|
|
--- a/drivers/s390/net/qeth_l3_main.c
|
|
+++ b/drivers/s390/net/qeth_l3_main.c
|
|
@@ -3333,6 +3333,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
|
|
qeth_l3_set_offline(cgdev);
|
|
|
|
if (card->dev) {
|
|
+ netif_napi_del(&card->napi);
|
|
unregister_netdev(card->dev);
|
|
card->dev = NULL;
|
|
}
|
|
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
|
|
index 6a0d362e2596..284efac5f202 100644
|
|
--- a/drivers/scsi/aacraid/commsup.c
|
|
+++ b/drivers/scsi/aacraid/commsup.c
|
|
@@ -590,10 +590,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
|
}
|
|
return -EFAULT;
|
|
}
|
|
- /* We used to udelay() here but that absorbed
|
|
- * a CPU when a timeout occured. Not very
|
|
- * useful. */
|
|
- cpu_relax();
|
|
+ /*
|
|
+ * Allow other processes / CPUS to use core
|
|
+ */
|
|
+ schedule();
|
|
}
|
|
} else if (down_interruptible(&fibptr->event_wait)) {
|
|
/* Do nothing ... satisfy
|
|
@@ -1920,6 +1920,10 @@ int aac_command_thread(void *data)
|
|
if (difference <= 0)
|
|
difference = 1;
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
+
|
|
+ if (kthread_should_stop())
|
|
+ break;
|
|
+
|
|
schedule_timeout(difference);
|
|
|
|
if (kthread_should_stop())
|
|
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
|
|
index a683a831527b..02278130826b 100644
|
|
--- a/drivers/scsi/be2iscsi/be_main.c
|
|
+++ b/drivers/scsi/be2iscsi/be_main.c
|
|
@@ -2978,7 +2978,7 @@ be_sgl_create_contiguous(void *virtual_address,
|
|
{
|
|
WARN_ON(!virtual_address);
|
|
WARN_ON(!physical_address);
|
|
- WARN_ON(!length > 0);
|
|
+ WARN_ON(!length);
|
|
WARN_ON(!sgl);
|
|
|
|
sgl->va = virtual_address;
|
|
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
|
|
index 25ac2c00f8b3..2891faa8e384 100644
|
|
--- a/drivers/scsi/ipr.c
|
|
+++ b/drivers/scsi/ipr.c
|
|
@@ -9607,6 +9607,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
|
|
ioa_cfg->intr_flag = IPR_USE_MSI;
|
|
else {
|
|
ioa_cfg->intr_flag = IPR_USE_LSI;
|
|
+ ioa_cfg->clear_isr = 1;
|
|
ioa_cfg->nvectors = 1;
|
|
dev_info(&pdev->dev, "Cannot enable MSI.\n");
|
|
}
|
|
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
|
|
index 9acbc885239b..5ba69ea8eb92 100644
|
|
--- a/drivers/scsi/scsi_error.c
|
|
+++ b/drivers/scsi/scsi_error.c
|
|
@@ -898,7 +898,6 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
|
|
*/
|
|
void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
|
|
{
|
|
- scmd->device->host->host_failed--;
|
|
scmd->eh_eflags = 0;
|
|
list_move_tail(&scmd->eh_entry, done_q);
|
|
}
|
|
@@ -1892,6 +1891,9 @@ int scsi_error_handler(void *data)
|
|
else
|
|
scsi_unjam_host(shost);
|
|
|
|
+ /* All scmds have been handled */
|
|
+ shost->host_failed = 0;
|
|
+
|
|
/*
|
|
* Note - if the above fails completely, the action is to take
|
|
* individual devices offline and flush the queue of any
|
|
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
|
|
index 9f3168e8e5a8..60031e15d562 100644
|
|
--- a/drivers/scsi/scsi_lib.c
|
|
+++ b/drivers/scsi/scsi_lib.c
|
|
@@ -546,66 +546,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
|
|
|
|
static void __scsi_release_buffers(struct scsi_cmnd *, int);
|
|
|
|
-/*
|
|
- * Function: scsi_end_request()
|
|
- *
|
|
- * Purpose: Post-processing of completed commands (usually invoked at end
|
|
- * of upper level post-processing and scsi_io_completion).
|
|
- *
|
|
- * Arguments: cmd - command that is complete.
|
|
- * error - 0 if I/O indicates success, < 0 for I/O error.
|
|
- * bytes - number of bytes of completed I/O
|
|
- * requeue - indicates whether we should requeue leftovers.
|
|
- *
|
|
- * Lock status: Assumed that lock is not held upon entry.
|
|
- *
|
|
- * Returns: cmd if requeue required, NULL otherwise.
|
|
- *
|
|
- * Notes: This is called for block device requests in order to
|
|
- * mark some number of sectors as complete.
|
|
- *
|
|
- * We are guaranteeing that the request queue will be goosed
|
|
- * at some point during this call.
|
|
- * Notes: If cmd was requeued, upon return it will be a stale pointer.
|
|
- */
|
|
-static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
|
|
- int bytes, int requeue)
|
|
-{
|
|
- struct request_queue *q = cmd->device->request_queue;
|
|
- struct request *req = cmd->request;
|
|
-
|
|
- /*
|
|
- * If there are blocks left over at the end, set up the command
|
|
- * to queue the remainder of them.
|
|
- */
|
|
- if (blk_end_request(req, error, bytes)) {
|
|
- /* kill remainder if no retrys */
|
|
- if (error && scsi_noretry_cmd(cmd))
|
|
- blk_end_request_all(req, error);
|
|
- else {
|
|
- if (requeue) {
|
|
- /*
|
|
- * Bleah. Leftovers again. Stick the
|
|
- * leftovers in the front of the
|
|
- * queue, and goose the queue again.
|
|
- */
|
|
- scsi_release_buffers(cmd);
|
|
- scsi_requeue_command(q, cmd);
|
|
- cmd = NULL;
|
|
- }
|
|
- return cmd;
|
|
- }
|
|
- }
|
|
-
|
|
- /*
|
|
- * This will goose the queue request function at the end, so we don't
|
|
- * need to worry about launching another command.
|
|
- */
|
|
- __scsi_release_buffers(cmd, 0);
|
|
- scsi_next_command(cmd);
|
|
- return NULL;
|
|
-}
|
|
-
|
|
static inline unsigned int scsi_sgtable_index(unsigned short nents)
|
|
{
|
|
unsigned int index;
|
|
@@ -735,16 +675,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
|
|
*
|
|
* Returns: Nothing
|
|
*
|
|
- * Notes: This function is matched in terms of capabilities to
|
|
- * the function that created the scatter-gather list.
|
|
- * In other words, if there are no bounce buffers
|
|
- * (the normal case for most drivers), we don't need
|
|
- * the logic to deal with cleaning up afterwards.
|
|
- *
|
|
- * We must call scsi_end_request(). This will finish off
|
|
- * the specified number of sectors. If we are done, the
|
|
- * command block will be released and the queue function
|
|
- * will be goosed. If we are not done then we have to
|
|
+ * Notes: We will finish off the specified number of sectors. If we
|
|
+ * are done, the command block will be released and the queue
|
|
+ * function will be goosed. If we are not done then we have to
|
|
* figure out what to do next:
|
|
*
|
|
* a) We can call scsi_requeue_command(). The request
|
|
@@ -753,7 +686,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
|
|
* be used if we made forward progress, or if we want
|
|
* to switch from READ(10) to READ(6) for example.
|
|
*
|
|
- * b) We can call scsi_queue_insert(). The request will
|
|
+ * b) We can call __scsi_queue_insert(). The request will
|
|
* be put back on the queue and retried using the same
|
|
* command as before, possibly after a delay.
|
|
*
|
|
@@ -857,12 +790,28 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|
}
|
|
|
|
/*
|
|
- * A number of bytes were successfully read. If there
|
|
- * are leftovers and there is some kind of error
|
|
- * (result != 0), retry the rest.
|
|
+ * special case: failed zero length commands always need to
|
|
+ * drop down into the retry code. Otherwise, if we finished
|
|
+ * all bytes in the request we are done now.
|
|
*/
|
|
- if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
|
|
- return;
|
|
+ if (!(blk_rq_bytes(req) == 0 && error) &&
|
|
+ !blk_end_request(req, error, good_bytes))
|
|
+ goto next_command;
|
|
+
|
|
+ /*
|
|
+ * Kill remainder if no retrys.
|
|
+ */
|
|
+ if (error && scsi_noretry_cmd(cmd)) {
|
|
+ blk_end_request_all(req, error);
|
|
+ goto next_command;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If there had been no error, but we have leftover bytes in the
|
|
+ * requeues just queue the command up again.
|
|
+ */
|
|
+ if (result == 0)
|
|
+ goto requeue;
|
|
|
|
error = __scsi_error_from_host_byte(cmd, result);
|
|
|
|
@@ -984,7 +933,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|
switch (action) {
|
|
case ACTION_FAIL:
|
|
/* Give up and fail the remainder of the request */
|
|
- scsi_release_buffers(cmd);
|
|
if (!(req->cmd_flags & REQ_QUIET)) {
|
|
if (description)
|
|
scmd_printk(KERN_INFO, cmd, "%s\n",
|
|
@@ -994,12 +942,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|
scsi_print_sense("", cmd);
|
|
scsi_print_command(cmd);
|
|
}
|
|
- if (blk_end_request_err(req, error))
|
|
- scsi_requeue_command(q, cmd);
|
|
- else
|
|
- scsi_next_command(cmd);
|
|
- break;
|
|
+ if (!blk_end_request_err(req, error))
|
|
+ goto next_command;
|
|
+ /*FALLTHRU*/
|
|
case ACTION_REPREP:
|
|
+ requeue:
|
|
/* Unprep the request and put it back at the head of the queue.
|
|
* A new command will be prepared and issued.
|
|
*/
|
|
@@ -1015,6 +962,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|
__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
|
|
break;
|
|
}
|
|
+ return;
|
|
+
|
|
+next_command:
|
|
+ __scsi_release_buffers(cmd, 0);
|
|
+ scsi_next_command(cmd);
|
|
}
|
|
|
|
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
|
|
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
|
|
index 34d18dcfa0db..109a535b639c 100644
|
|
--- a/drivers/spi/spi-xilinx.c
|
|
+++ b/drivers/spi/spi-xilinx.c
|
|
@@ -315,7 +315,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
|
|
}
|
|
|
|
/* See if there is more data to send */
|
|
- if (!xspi->remaining_bytes > 0)
|
|
+ if (xspi->remaining_bytes <= 0)
|
|
break;
|
|
}
|
|
|
|
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
|
|
index 32950ad94857..b30c41b3e0cc 100644
|
|
--- a/drivers/staging/iio/accel/sca3000_core.c
|
|
+++ b/drivers/staging/iio/accel/sca3000_core.c
|
|
@@ -588,7 +588,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
|
|
goto error_ret_mut;
|
|
ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
|
|
mutex_unlock(&st->lock);
|
|
- if (ret)
|
|
+ if (ret < 0)
|
|
goto error_ret;
|
|
val = ret;
|
|
if (base_freq > 0)
|
|
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
|
|
index a9af1b9ae160..1f6e09649e5a 100644
|
|
--- a/drivers/tty/vt/keyboard.c
|
|
+++ b/drivers/tty/vt/keyboard.c
|
|
@@ -371,34 +371,22 @@ static void to_utf8(struct vc_data *vc, uint c)
|
|
|
|
static void do_compute_shiftstate(void)
|
|
{
|
|
- unsigned int i, j, k, sym, val;
|
|
+ unsigned int k, sym, val;
|
|
|
|
shift_state = 0;
|
|
memset(shift_down, 0, sizeof(shift_down));
|
|
|
|
- for (i = 0; i < ARRAY_SIZE(key_down); i++) {
|
|
-
|
|
- if (!key_down[i])
|
|
+ for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
|
|
+ sym = U(key_maps[0][k]);
|
|
+ if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
|
|
continue;
|
|
|
|
- k = i * BITS_PER_LONG;
|
|
-
|
|
- for (j = 0; j < BITS_PER_LONG; j++, k++) {
|
|
-
|
|
- if (!test_bit(k, key_down))
|
|
- continue;
|
|
+ val = KVAL(sym);
|
|
+ if (val == KVAL(K_CAPSSHIFT))
|
|
+ val = KVAL(K_SHIFT);
|
|
|
|
- sym = U(key_maps[0][k]);
|
|
- if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
|
|
- continue;
|
|
-
|
|
- val = KVAL(sym);
|
|
- if (val == KVAL(K_CAPSSHIFT))
|
|
- val = KVAL(K_SHIFT);
|
|
-
|
|
- shift_down[val]++;
|
|
- shift_state |= (1 << val);
|
|
- }
|
|
+ shift_down[val]++;
|
|
+ shift_state |= BIT(val);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
|
|
index 62e532fb82ad..cfce807531f6 100644
|
|
--- a/drivers/usb/core/devio.c
|
|
+++ b/drivers/usb/core/devio.c
|
|
@@ -1106,10 +1106,11 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
|
|
|
|
static int proc_connectinfo(struct dev_state *ps, void __user *arg)
|
|
{
|
|
- struct usbdevfs_connectinfo ci = {
|
|
- .devnum = ps->dev->devnum,
|
|
- .slow = ps->dev->speed == USB_SPEED_LOW
|
|
- };
|
|
+ struct usbdevfs_connectinfo ci;
|
|
+
|
|
+ memset(&ci, 0, sizeof(ci));
|
|
+ ci.devnum = ps->dev->devnum;
|
|
+ ci.slow = ps->dev->speed == USB_SPEED_LOW;
|
|
|
|
if (copy_to_user(arg, &ci, sizeof(ci)))
|
|
return -EFAULT;
|
|
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
|
|
index 8eb2de6beee4..4e5156d212dd 100644
|
|
--- a/drivers/usb/core/hub.c
|
|
+++ b/drivers/usb/core/hub.c
|
|
@@ -113,6 +113,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
|
|
#define HUB_DEBOUNCE_STEP 25
|
|
#define HUB_DEBOUNCE_STABLE 100
|
|
|
|
+static void hub_release(struct kref *kref);
|
|
static int usb_reset_and_verify_device(struct usb_device *udev);
|
|
|
|
static inline char *portspeed(struct usb_hub *hub, int portstatus)
|
|
@@ -1024,10 +1025,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
|
unsigned delay;
|
|
|
|
/* Continue a partial initialization */
|
|
- if (type == HUB_INIT2)
|
|
- goto init2;
|
|
- if (type == HUB_INIT3)
|
|
+ if (type == HUB_INIT2 || type == HUB_INIT3) {
|
|
+ device_lock(hub->intfdev);
|
|
+
|
|
+ /* Was the hub disconnected while we were waiting? */
|
|
+ if (hub->disconnected) {
|
|
+ device_unlock(hub->intfdev);
|
|
+ kref_put(&hub->kref, hub_release);
|
|
+ return;
|
|
+ }
|
|
+ if (type == HUB_INIT2)
|
|
+ goto init2;
|
|
goto init3;
|
|
+ }
|
|
+ kref_get(&hub->kref);
|
|
|
|
/* The superspeed hub except for root hub has to use Hub Depth
|
|
* value as an offset into the route string to locate the bits
|
|
@@ -1224,6 +1235,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
|
PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
|
|
schedule_delayed_work(&hub->init_work,
|
|
msecs_to_jiffies(delay));
|
|
+ device_unlock(hub->intfdev);
|
|
return; /* Continues at init3: below */
|
|
} else {
|
|
msleep(delay);
|
|
@@ -1244,6 +1256,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
|
/* Allow autosuspend if it was suppressed */
|
|
if (type <= HUB_INIT3)
|
|
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
|
|
+
|
|
+ if (type == HUB_INIT2 || type == HUB_INIT3)
|
|
+ device_unlock(hub->intfdev);
|
|
+
|
|
+ kref_put(&hub->kref, hub_release);
|
|
}
|
|
|
|
/* Implement the continuations for the delays above */
|
|
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
|
|
index 94e9cddc05c1..aa27ec1f4813 100644
|
|
--- a/drivers/usb/core/quirks.c
|
|
+++ b/drivers/usb/core/quirks.c
|
|
@@ -170,14 +170,6 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|
/* INTEL VALUE SSD */
|
|
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
|
|
|
|
- { } /* terminating entry must be last */
|
|
-};
|
|
-
|
|
-static const struct usb_device_id usb_interface_quirk_list[] = {
|
|
- /* Logitech UVC Cameras */
|
|
- { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
|
|
- .driver_info = USB_QUIRK_RESET_RESUME },
|
|
-
|
|
/* ASUS Base Station(T100) */
|
|
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
|
|
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
|
|
@@ -191,6 +183,14 @@ static const struct usb_device_id usb_interface_quirk_list[] = {
|
|
{ } /* terminating entry must be last */
|
|
};
|
|
|
|
+static const struct usb_device_id usb_interface_quirk_list[] = {
|
|
+ /* Logitech UVC Cameras */
|
|
+ { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
|
|
+ .driver_info = USB_QUIRK_RESET_RESUME },
|
|
+
|
|
+ { } /* terminating entry must be last */
|
|
+};
|
|
+
|
|
static bool usb_match_any_interface(struct usb_device *udev,
|
|
const struct usb_device_id *id)
|
|
{
|
|
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
|
|
index 9d3044bdebe5..c6cc5201665a 100644
|
|
--- a/drivers/usb/musb/musb_host.c
|
|
+++ b/drivers/usb/musb/musb_host.c
|
|
@@ -581,14 +581,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
|
|
musb_writew(ep->regs, MUSB_TXCSR, 0);
|
|
|
|
/* scrub all previous state, clearing toggle */
|
|
- } else {
|
|
- csr = musb_readw(ep->regs, MUSB_RXCSR);
|
|
- if (csr & MUSB_RXCSR_RXPKTRDY)
|
|
- WARNING("rx%d, packet/%d ready?\n", ep->epnum,
|
|
- musb_readw(ep->regs, MUSB_RXCOUNT));
|
|
-
|
|
- musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
|
|
}
|
|
+ csr = musb_readw(ep->regs, MUSB_RXCSR);
|
|
+ if (csr & MUSB_RXCSR_RXPKTRDY)
|
|
+ WARNING("rx%d, packet/%d ready?\n", ep->epnum,
|
|
+ musb_readw(ep->regs, MUSB_RXCOUNT));
|
|
+
|
|
+ musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
|
|
|
|
/* target addr and (for multipoint) hub addr/port */
|
|
if (musb->is_multipoint) {
|
|
@@ -948,9 +947,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
|
|
if (is_in) {
|
|
dma = is_dma_capable() ? ep->rx_channel : NULL;
|
|
|
|
- /* clear nak timeout bit */
|
|
+ /*
|
|
+ * Need to stop the transaction by clearing REQPKT first
|
|
+ * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
|
|
+ * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
|
|
+ */
|
|
rx_csr = musb_readw(epio, MUSB_RXCSR);
|
|
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
|
|
+ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
|
|
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
|
|
rx_csr &= ~MUSB_RXCSR_DATAERROR;
|
|
musb_writew(epio, MUSB_RXCSR, rx_csr);
|
|
|
|
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
|
|
index ed4949faa70d..64223a923932 100644
|
|
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
|
|
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
|
|
@@ -558,6 +558,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
|
|
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
|
|
struct usbhs_pipe *pipe;
|
|
int ret = -EIO;
|
|
+ unsigned long flags;
|
|
+
|
|
+ usbhs_lock(priv, flags);
|
|
|
|
/*
|
|
* if it already have pipe,
|
|
@@ -566,7 +569,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
|
|
if (uep->pipe) {
|
|
usbhs_pipe_clear(uep->pipe);
|
|
usbhs_pipe_sequence_data0(uep->pipe);
|
|
- return 0;
|
|
+ ret = 0;
|
|
+ goto usbhsg_ep_enable_end;
|
|
}
|
|
|
|
pipe = usbhs_pipe_malloc(priv,
|
|
@@ -594,6 +598,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
|
|
ret = 0;
|
|
}
|
|
|
|
+usbhsg_ep_enable_end:
|
|
+ usbhs_unlock(priv, flags);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
|
|
index bcb6f5c2bae4..006a2a721edf 100644
|
|
--- a/drivers/usb/serial/option.c
|
|
+++ b/drivers/usb/serial/option.c
|
|
@@ -274,6 +274,7 @@ static void option_instat_callback(struct urb *urb);
|
|
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
|
|
#define TELIT_PRODUCT_LE920 0x1200
|
|
#define TELIT_PRODUCT_LE910 0x1201
|
|
+#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
|
|
|
|
/* ZTE PRODUCTS */
|
|
#define ZTE_VENDOR_ID 0x19d2
|
|
@@ -1206,6 +1207,8 @@ static const struct usb_device_id option_ids[] = {
|
|
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
|
|
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
|
|
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
|
|
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
|
|
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
|
|
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
|
|
.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
|
|
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
|
|
index 7d7add5ceba4..148e8ea1bc96 100644
|
|
--- a/drivers/virtio/virtio_balloon.c
|
|
+++ b/drivers/virtio/virtio_balloon.c
|
|
@@ -177,6 +177,8 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
|
|
num = min(num, ARRAY_SIZE(vb->pfns));
|
|
|
|
mutex_lock(&vb->balloon_lock);
|
|
+ /* We can't release more pages than taken */
|
|
+ num = min(num, (size_t)vb->num_pages);
|
|
for (vb->num_pfns = 0; vb->num_pfns < num;
|
|
vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
|
|
page = balloon_page_dequeue(vb_dev_info);
|
|
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
|
|
index 8abd7d579037..2e4517277e80 100644
|
|
--- a/drivers/xen/xen-acpi-processor.c
|
|
+++ b/drivers/xen/xen-acpi-processor.c
|
|
@@ -426,36 +426,7 @@ upload:
|
|
|
|
return 0;
|
|
}
|
|
-static int __init check_prereq(void)
|
|
-{
|
|
- struct cpuinfo_x86 *c = &cpu_data(0);
|
|
-
|
|
- if (!xen_initial_domain())
|
|
- return -ENODEV;
|
|
-
|
|
- if (!acpi_gbl_FADT.smi_command)
|
|
- return -ENODEV;
|
|
-
|
|
- if (c->x86_vendor == X86_VENDOR_INTEL) {
|
|
- if (!cpu_has(c, X86_FEATURE_EST))
|
|
- return -ENODEV;
|
|
|
|
- return 0;
|
|
- }
|
|
- if (c->x86_vendor == X86_VENDOR_AMD) {
|
|
- /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
|
|
- * as we get compile warnings for the static functions.
|
|
- */
|
|
-#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
|
|
-#define USE_HW_PSTATE 0x00000080
|
|
- u32 eax, ebx, ecx, edx;
|
|
- cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
|
|
- if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
|
|
- return -ENODEV;
|
|
- return 0;
|
|
- }
|
|
- return -ENODEV;
|
|
-}
|
|
/* acpi_perf_data is a pointer to percpu data. */
|
|
static struct acpi_processor_performance __percpu *acpi_perf_data;
|
|
|
|
@@ -511,10 +482,10 @@ static struct syscore_ops xap_syscore_ops = {
|
|
static int __init xen_acpi_processor_init(void)
|
|
{
|
|
unsigned int i;
|
|
- int rc = check_prereq();
|
|
+ int rc;
|
|
|
|
- if (rc)
|
|
- return rc;
|
|
+ if (!xen_initial_domain())
|
|
+ return -ENODEV;
|
|
|
|
nr_acpi_bits = get_max_acpi_id() + 1;
|
|
acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
|
|
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
|
|
index 75fe3d466515..ba3fac8318bb 100644
|
|
--- a/drivers/xen/xen-pciback/conf_space.c
|
|
+++ b/drivers/xen/xen-pciback/conf_space.c
|
|
@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
|
|
field_start = OFFSET(cfg_entry);
|
|
field_end = OFFSET(cfg_entry) + field->size;
|
|
|
|
- if ((req_start >= field_start && req_start < field_end)
|
|
- || (req_end > field_start && req_end <= field_end)) {
|
|
+ if (req_end > field_start && field_end > req_start) {
|
|
err = conf_space_read(dev, cfg_entry, field_start,
|
|
&tmp_val);
|
|
if (err)
|
|
@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
|
|
field_start = OFFSET(cfg_entry);
|
|
field_end = OFFSET(cfg_entry) + field->size;
|
|
|
|
- if ((req_start >= field_start && req_start < field_end)
|
|
- || (req_end > field_start && req_end <= field_end)) {
|
|
+ if (req_end > field_start && field_end > req_start) {
|
|
tmp_val = 0;
|
|
|
|
err = xen_pcibk_config_read(dev, field_start,
|
|
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
|
|
index d05a30072023..7c33afd7d5d3 100644
|
|
--- a/fs/cifs/connect.c
|
|
+++ b/fs/cifs/connect.c
|
|
@@ -408,7 +408,9 @@ cifs_echo_request(struct work_struct *work)
|
|
* server->ops->need_neg() == true. Also, no need to ping if
|
|
* we got a response recently.
|
|
*/
|
|
- if (!server->ops->need_neg || server->ops->need_neg(server) ||
|
|
+
|
|
+ if (server->tcpStatus == CifsNeedReconnect ||
|
|
+ server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
|
|
(server->ops->can_echo && !server->ops->can_echo(server)) ||
|
|
time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
|
|
goto requeue_echo;
|
|
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
|
|
index 0c2425b21974..a998c929286f 100644
|
|
--- a/fs/cifs/dir.c
|
|
+++ b/fs/cifs/dir.c
|
|
@@ -227,6 +227,13 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
|
|
goto cifs_create_get_file_info;
|
|
}
|
|
|
|
+ if (S_ISDIR(newinode->i_mode)) {
|
|
+ CIFSSMBClose(xid, tcon, fid->netfid);
|
|
+ iput(newinode);
|
|
+ rc = -EISDIR;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
if (!S_ISREG(newinode->i_mode)) {
|
|
/*
|
|
* The server may allow us to open things like
|
|
@@ -391,10 +398,14 @@ cifs_create_set_dentry:
|
|
if (rc != 0) {
|
|
cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n",
|
|
rc);
|
|
- if (server->ops->close)
|
|
- server->ops->close(xid, tcon, fid);
|
|
- goto out;
|
|
+ goto out_err;
|
|
}
|
|
+
|
|
+ if (S_ISDIR(newinode->i_mode)) {
|
|
+ rc = -EISDIR;
|
|
+ goto out_err;
|
|
+ }
|
|
+
|
|
d_drop(direntry);
|
|
d_add(direntry, newinode);
|
|
|
|
@@ -402,6 +413,13 @@ out:
|
|
kfree(buf);
|
|
kfree(full_path);
|
|
return rc;
|
|
+
|
|
+out_err:
|
|
+ if (server->ops->close)
|
|
+ server->ops->close(xid, tcon, fid);
|
|
+ if (newinode)
|
|
+ iput(newinode);
|
|
+ goto out;
|
|
}
|
|
|
|
int
|
|
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
|
|
index eb0de4c3ca76..9dd8c968d94e 100644
|
|
--- a/fs/cifs/smb2pdu.c
|
|
+++ b/fs/cifs/smb2pdu.c
|
|
@@ -1250,6 +1250,33 @@ SMB2_echo(struct TCP_Server_Info *server)
|
|
|
|
cifs_dbg(FYI, "In echo request\n");
|
|
|
|
+ if (server->tcpStatus == CifsNeedNegotiate) {
|
|
+ struct list_head *tmp, *tmp2;
|
|
+ struct cifs_ses *ses;
|
|
+ struct cifs_tcon *tcon;
|
|
+
|
|
+ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
|
|
+ spin_lock(&cifs_tcp_ses_lock);
|
|
+ list_for_each(tmp, &server->smb_ses_list) {
|
|
+ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
|
|
+ list_for_each(tmp2, &ses->tcon_list) {
|
|
+ tcon = list_entry(tmp2, struct cifs_tcon,
|
|
+ tcon_list);
|
|
+ /* add check for persistent handle reconnect */
|
|
+ if (tcon && tcon->need_reconnect) {
|
|
+ spin_unlock(&cifs_tcp_ses_lock);
|
|
+ rc = smb2_reconnect(SMB2_ECHO, tcon);
|
|
+ spin_lock(&cifs_tcp_ses_lock);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ spin_unlock(&cifs_tcp_ses_lock);
|
|
+ }
|
|
+
|
|
+ /* if no session, renegotiate failed above */
|
|
+ if (server->tcpStatus == CifsNeedNegotiate)
|
|
+ return -EIO;
|
|
+
|
|
rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
|
|
if (rc)
|
|
return rc;
|
|
diff --git a/fs/dcache.c b/fs/dcache.c
|
|
index 17222fa5bdc6..2d0b9d2f3c43 100644
|
|
--- a/fs/dcache.c
|
|
+++ b/fs/dcache.c
|
|
@@ -1311,7 +1311,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
|
|
struct dentry *dentry = __d_alloc(parent->d_sb, name);
|
|
if (!dentry)
|
|
return NULL;
|
|
-
|
|
+ dentry->d_flags |= DCACHE_RCUACCESS;
|
|
spin_lock(&parent->d_lock);
|
|
/*
|
|
* don't need child lock because it is not subject
|
|
@@ -2101,7 +2101,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
|
|
{
|
|
BUG_ON(!d_unhashed(entry));
|
|
hlist_bl_lock(b);
|
|
- entry->d_flags |= DCACHE_RCUACCESS;
|
|
hlist_bl_add_head_rcu(&entry->d_hash, b);
|
|
hlist_bl_unlock(b);
|
|
}
|
|
@@ -2285,6 +2284,7 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
|
|
|
|
/* ... and switch the parents */
|
|
if (IS_ROOT(dentry)) {
|
|
+ dentry->d_flags |= DCACHE_RCUACCESS;
|
|
dentry->d_parent = target->d_parent;
|
|
target->d_parent = target;
|
|
INIT_LIST_HEAD(&target->d_child);
|
|
@@ -2401,6 +2401,7 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
|
|
switch_names(dentry, anon);
|
|
swap(dentry->d_name.hash, anon->d_name.hash);
|
|
|
|
+ dentry->d_flags |= DCACHE_RCUACCESS;
|
|
dentry->d_parent = dentry;
|
|
list_del_init(&dentry->d_child);
|
|
anon->d_parent = dparent;
|
|
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
|
|
index 9ff3664bb3ea..d4644cc938ba 100644
|
|
--- a/fs/ecryptfs/file.c
|
|
+++ b/fs/ecryptfs/file.c
|
|
@@ -183,6 +183,19 @@ out:
|
|
return rc;
|
|
}
|
|
|
|
+static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
|
|
+{
|
|
+ struct file *lower_file = ecryptfs_file_to_lower(file);
|
|
+ /*
|
|
+ * Don't allow mmap on top of file systems that don't support it
|
|
+ * natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
|
|
+ * allows recursive mounting, this will need to be extended.
|
|
+ */
|
|
+ if (!lower_file->f_op->mmap)
|
|
+ return -ENODEV;
|
|
+ return generic_file_mmap(file, vma);
|
|
+}
|
|
+
|
|
/**
|
|
* ecryptfs_open
|
|
* @inode: inode speciying file to open
|
|
@@ -358,7 +371,7 @@ const struct file_operations ecryptfs_main_fops = {
|
|
#ifdef CONFIG_COMPAT
|
|
.compat_ioctl = ecryptfs_compat_ioctl,
|
|
#endif
|
|
- .mmap = generic_file_mmap,
|
|
+ .mmap = ecryptfs_mmap,
|
|
.open = ecryptfs_open,
|
|
.flush = ecryptfs_flush,
|
|
.release = ecryptfs_release,
|
|
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
|
|
index df633bb25909..7eea76168d33 100644
|
|
--- a/fs/ext4/extents.c
|
|
+++ b/fs/ext4/extents.c
|
|
@@ -361,9 +361,13 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
|
|
ext4_fsblk_t block = ext4_ext_pblock(ext);
|
|
int len = ext4_ext_get_actual_len(ext);
|
|
ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
|
|
- ext4_lblk_t last = lblock + len - 1;
|
|
|
|
- if (len == 0 || lblock > last)
|
|
+ /*
|
|
+ * We allow neither:
|
|
+ * - zero length
|
|
+ * - overflow/wrap-around
|
|
+ */
|
|
+ if (lblock + len <= lblock)
|
|
return 0;
|
|
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
|
|
}
|
|
@@ -454,6 +458,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
|
|
error_msg = "invalid extent entries";
|
|
goto corrupted;
|
|
}
|
|
+ if (unlikely(depth > 32)) {
|
|
+ error_msg = "too large eh_depth";
|
|
+ goto corrupted;
|
|
+ }
|
|
/* Verify checksum on non-root extent tree nodes */
|
|
if (ext_depth(inode) != depth &&
|
|
!ext4_extent_block_csum_verify(inode, eh)) {
|
|
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
|
|
index 4d4718cf25ab..00cbc648e1dc 100644
|
|
--- a/fs/ext4/ialloc.c
|
|
+++ b/fs/ext4/ialloc.c
|
|
@@ -1027,11 +1027,13 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
|
|
goto iget_failed;
|
|
|
|
/*
|
|
- * If the orphans has i_nlinks > 0 then it should be able to be
|
|
- * truncated, otherwise it won't be removed from the orphan list
|
|
- * during processing and an infinite loop will result.
|
|
+ * If the orphans has i_nlinks > 0 then it should be able to
|
|
+ * be truncated, otherwise it won't be removed from the orphan
|
|
+ * list during processing and an infinite loop will result.
|
|
+ * Similarly, it must not be a bad inode.
|
|
*/
|
|
- if (inode->i_nlink && !ext4_can_truncate(inode))
|
|
+ if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
|
|
+ is_bad_inode(inode))
|
|
goto bad_orphan;
|
|
|
|
if (NEXT_ORPHAN(inode) > max_ino)
|
|
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
|
|
index fb7e576df25c..221b58298847 100644
|
|
--- a/fs/ext4/inode.c
|
|
+++ b/fs/ext4/inode.c
|
|
@@ -206,9 +206,9 @@ void ext4_evict_inode(struct inode *inode)
|
|
* Note that directories do not have this problem because they
|
|
* don't use page cache.
|
|
*/
|
|
- if (ext4_should_journal_data(inode) &&
|
|
- (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
|
|
- inode->i_ino != EXT4_JOURNAL_INO) {
|
|
+ if (inode->i_ino != EXT4_JOURNAL_INO &&
|
|
+ ext4_should_journal_data(inode) &&
|
|
+ (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
|
|
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
|
|
tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
|
|
|
|
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
|
|
index 61ee01603940..08b4495c1b12 100644
|
|
--- a/fs/ext4/mballoc.c
|
|
+++ b/fs/ext4/mballoc.c
|
|
@@ -1232,6 +1232,7 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
|
|
static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
|
|
{
|
|
int order = 1;
|
|
+ int bb_incr = 1 << (e4b->bd_blkbits - 1);
|
|
void *bb;
|
|
|
|
BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
|
|
@@ -1244,7 +1245,8 @@ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
|
|
/* this block is part of buddy of order 'order' */
|
|
return order;
|
|
}
|
|
- bb += 1 << (e4b->bd_blkbits - order);
|
|
+ bb += bb_incr;
|
|
+ bb_incr >>= 1;
|
|
order++;
|
|
}
|
|
return 0;
|
|
@@ -2514,7 +2516,7 @@ int ext4_mb_init(struct super_block *sb)
|
|
{
|
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
|
unsigned i, j;
|
|
- unsigned offset;
|
|
+ unsigned offset, offset_incr;
|
|
unsigned max;
|
|
int ret;
|
|
|
|
@@ -2543,11 +2545,13 @@ int ext4_mb_init(struct super_block *sb)
|
|
|
|
i = 1;
|
|
offset = 0;
|
|
+ offset_incr = 1 << (sb->s_blocksize_bits - 1);
|
|
max = sb->s_blocksize << 2;
|
|
do {
|
|
sbi->s_mb_offsets[i] = offset;
|
|
sbi->s_mb_maxs[i] = max;
|
|
- offset += 1 << (sb->s_blocksize_bits - i);
|
|
+ offset += offset_incr;
|
|
+ offset_incr = offset_incr >> 1;
|
|
max = max >> 1;
|
|
i++;
|
|
} while (i <= sb->s_blocksize_bits + 1);
|
|
@@ -2872,7 +2876,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
|
|
ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
|
|
"fs metadata", block, block+len);
|
|
/* File system mounted not to panic on error
|
|
- * Fix the bitmap and repeat the block allocation
|
|
+ * Fix the bitmap and return EUCLEAN
|
|
* We leak some of the blocks here.
|
|
*/
|
|
ext4_lock_group(sb, ac->ac_b_ex.fe_group);
|
|
@@ -2881,7 +2885,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
|
|
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
|
|
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
|
|
if (!err)
|
|
- err = -EAGAIN;
|
|
+ err = -EUCLEAN;
|
|
goto out_err;
|
|
}
|
|
|
|
@@ -4448,18 +4452,7 @@ repeat:
|
|
}
|
|
if (likely(ac->ac_status == AC_STATUS_FOUND)) {
|
|
*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
|
|
- if (*errp == -EAGAIN) {
|
|
- /*
|
|
- * drop the reference that we took
|
|
- * in ext4_mb_use_best_found
|
|
- */
|
|
- ext4_mb_release_context(ac);
|
|
- ac->ac_b_ex.fe_group = 0;
|
|
- ac->ac_b_ex.fe_start = 0;
|
|
- ac->ac_b_ex.fe_len = 0;
|
|
- ac->ac_status = AC_STATUS_CONTINUE;
|
|
- goto repeat;
|
|
- } else if (*errp) {
|
|
+ if (*errp) {
|
|
ext4_discard_allocated_blocks(ac);
|
|
goto errout;
|
|
} else {
|
|
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
|
|
index 063eb5094a63..15a81897df4e 100644
|
|
--- a/fs/ext4/super.c
|
|
+++ b/fs/ext4/super.c
|
|
@@ -2153,6 +2153,16 @@ static void ext4_orphan_cleanup(struct super_block *sb,
|
|
while (es->s_last_orphan) {
|
|
struct inode *inode;
|
|
|
|
+ /*
|
|
+ * We may have encountered an error during cleanup; if
|
|
+ * so, skip the rest.
|
|
+ */
|
|
+ if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
|
|
+ jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
|
|
+ es->s_last_orphan = 0;
|
|
+ break;
|
|
+ }
|
|
+
|
|
inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
|
|
if (IS_ERR(inode)) {
|
|
es->s_last_orphan = 0;
|
|
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
|
|
index 4d371f3b9a45..efe802e5bb3d 100644
|
|
--- a/fs/fuse/inode.c
|
|
+++ b/fs/fuse/inode.c
|
|
@@ -913,7 +913,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
|
|
arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
|
|
FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
|
|
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
|
|
- FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
|
|
+ FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
|
|
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO;
|
|
req->in.h.opcode = FUSE_INIT;
|
|
req->in.numargs = 1;
|
|
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
|
|
index d8ac734a1e44..c2b89a1a403b 100644
|
|
--- a/fs/nfs/nfs4proc.c
|
|
+++ b/fs/nfs/nfs4proc.c
|
|
@@ -2332,12 +2332,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
|
|
call_close |= is_wronly;
|
|
else if (is_wronly)
|
|
calldata->arg.fmode |= FMODE_WRITE;
|
|
+ if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
|
|
+ call_close |= is_rdwr;
|
|
} else if (is_rdwr)
|
|
calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
|
|
|
|
- if (calldata->arg.fmode == 0)
|
|
- call_close |= is_rdwr;
|
|
-
|
|
if (!nfs4_valid_open_stateid(state))
|
|
call_close = 0;
|
|
spin_unlock(&state->owner->so_lock);
|
|
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
|
|
index 41e6a04a561f..0f9a5b4ad53b 100644
|
|
--- a/fs/nilfs2/the_nilfs.c
|
|
+++ b/fs/nilfs2/the_nilfs.c
|
|
@@ -431,7 +431,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
|
|
if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
|
|
return 0;
|
|
bytes = le16_to_cpu(sbp->s_bytes);
|
|
- if (bytes > BLOCK_SIZE)
|
|
+ if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
|
|
return 0;
|
|
crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
|
|
sumoff);
|
|
diff --git a/fs/pipe.c b/fs/pipe.c
|
|
index 50267e6ba688..c281867c453e 100644
|
|
--- a/fs/pipe.c
|
|
+++ b/fs/pipe.c
|
|
@@ -39,6 +39,12 @@ unsigned int pipe_max_size = 1048576;
|
|
*/
|
|
unsigned int pipe_min_size = PAGE_SIZE;
|
|
|
|
+/* Maximum allocatable pages per user. Hard limit is unset by default, soft
|
|
+ * matches default values.
|
|
+ */
|
|
+unsigned long pipe_user_pages_hard;
|
|
+unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
|
|
+
|
|
/*
|
|
* We use a start+len construction, which provides full use of the
|
|
* allocated memory.
|
|
@@ -794,20 +800,49 @@ pipe_fasync(int fd, struct file *filp, int on)
|
|
return retval;
|
|
}
|
|
|
|
+static void account_pipe_buffers(struct pipe_inode_info *pipe,
|
|
+ unsigned long old, unsigned long new)
|
|
+{
|
|
+ atomic_long_add(new - old, &pipe->user->pipe_bufs);
|
|
+}
|
|
+
|
|
+static bool too_many_pipe_buffers_soft(struct user_struct *user)
|
|
+{
|
|
+ return pipe_user_pages_soft &&
|
|
+ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
|
|
+}
|
|
+
|
|
+static bool too_many_pipe_buffers_hard(struct user_struct *user)
|
|
+{
|
|
+ return pipe_user_pages_hard &&
|
|
+ atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
|
|
+}
|
|
+
|
|
struct pipe_inode_info *alloc_pipe_info(void)
|
|
{
|
|
struct pipe_inode_info *pipe;
|
|
|
|
pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
|
|
if (pipe) {
|
|
- pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
|
|
+ unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
|
|
+ struct user_struct *user = get_current_user();
|
|
+
|
|
+ if (!too_many_pipe_buffers_hard(user)) {
|
|
+ if (too_many_pipe_buffers_soft(user))
|
|
+ pipe_bufs = 1;
|
|
+ pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
|
|
+ }
|
|
+
|
|
if (pipe->bufs) {
|
|
init_waitqueue_head(&pipe->wait);
|
|
pipe->r_counter = pipe->w_counter = 1;
|
|
- pipe->buffers = PIPE_DEF_BUFFERS;
|
|
+ pipe->buffers = pipe_bufs;
|
|
+ pipe->user = user;
|
|
+ account_pipe_buffers(pipe, 0, pipe_bufs);
|
|
mutex_init(&pipe->mutex);
|
|
return pipe;
|
|
}
|
|
+ free_uid(user);
|
|
kfree(pipe);
|
|
}
|
|
|
|
@@ -818,6 +853,8 @@ void free_pipe_info(struct pipe_inode_info *pipe)
|
|
{
|
|
int i;
|
|
|
|
+ account_pipe_buffers(pipe, pipe->buffers, 0);
|
|
+ free_uid(pipe->user);
|
|
for (i = 0; i < pipe->buffers; i++) {
|
|
struct pipe_buffer *buf = pipe->bufs + i;
|
|
if (buf->ops)
|
|
@@ -1208,6 +1245,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
|
|
memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
|
|
}
|
|
|
|
+ account_pipe_buffers(pipe, pipe->buffers, nr_pages);
|
|
pipe->curbuf = 0;
|
|
kfree(pipe->bufs);
|
|
pipe->bufs = bufs;
|
|
@@ -1279,6 +1317,11 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
|
|
ret = -EPERM;
|
|
goto out;
|
|
+ } else if ((too_many_pipe_buffers_hard(pipe->user) ||
|
|
+ too_many_pipe_buffers_soft(pipe->user)) &&
|
|
+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
|
|
+ ret = -EPERM;
|
|
+ goto out;
|
|
}
|
|
ret = pipe_set_size(pipe, nr_pages);
|
|
break;
|
|
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
|
|
index 881324c08430..a335e4e6aba1 100644
|
|
--- a/fs/ubifs/file.c
|
|
+++ b/fs/ubifs/file.c
|
|
@@ -54,6 +54,7 @@
|
|
#include <linux/mount.h>
|
|
#include <linux/namei.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/migrate.h>
|
|
|
|
static int read_block(struct inode *inode, void *addr, unsigned int block,
|
|
struct ubifs_data_node *dn)
|
|
@@ -1422,6 +1423,26 @@ static int ubifs_set_page_dirty(struct page *page)
|
|
return ret;
|
|
}
|
|
|
|
+#ifdef CONFIG_MIGRATION
|
|
+static int ubifs_migrate_page(struct address_space *mapping,
|
|
+ struct page *newpage, struct page *page, enum migrate_mode mode)
|
|
+{
|
|
+ int rc;
|
|
+
|
|
+ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
|
|
+ if (rc != MIGRATEPAGE_SUCCESS)
|
|
+ return rc;
|
|
+
|
|
+ if (PagePrivate(page)) {
|
|
+ ClearPagePrivate(page);
|
|
+ SetPagePrivate(newpage);
|
|
+ }
|
|
+
|
|
+ migrate_page_copy(newpage, page);
|
|
+ return MIGRATEPAGE_SUCCESS;
|
|
+}
|
|
+#endif
|
|
+
|
|
static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
|
|
{
|
|
/*
|
|
@@ -1558,6 +1579,9 @@ const struct address_space_operations ubifs_file_address_operations = {
|
|
.write_end = ubifs_write_end,
|
|
.invalidatepage = ubifs_invalidatepage,
|
|
.set_page_dirty = ubifs_set_page_dirty,
|
|
+#ifdef CONFIG_MIGRATION
|
|
+ .migratepage = ubifs_migrate_page,
|
|
+#endif
|
|
.releasepage = ubifs_releasepage,
|
|
};
|
|
|
|
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
|
|
index f010ab4594f1..06dec557d247 100644
|
|
--- a/fs/xfs/xfs_inode.c
|
|
+++ b/fs/xfs/xfs_inode.c
|
|
@@ -2604,13 +2604,14 @@ xfs_iflush_cluster(
|
|
* We need to check under the i_flags_lock for a valid inode
|
|
* here. Skip it if it is not valid or the wrong inode.
|
|
*/
|
|
- spin_lock(&ip->i_flags_lock);
|
|
- if (!ip->i_ino ||
|
|
+ spin_lock(&iq->i_flags_lock);
|
|
+ if (!iq->i_ino ||
|
|
+ __xfs_iflags_test(iq, XFS_ISTALE) ||
|
|
(XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
|
|
- spin_unlock(&ip->i_flags_lock);
|
|
+ spin_unlock(&iq->i_flags_lock);
|
|
continue;
|
|
}
|
|
- spin_unlock(&ip->i_flags_lock);
|
|
+ spin_unlock(&iq->i_flags_lock);
|
|
|
|
/*
|
|
* Do an un-protected check to see if the inode is dirty and
|
|
@@ -2726,7 +2727,7 @@ xfs_iflush(
|
|
struct xfs_buf **bpp)
|
|
{
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
- struct xfs_buf *bp;
|
|
+ struct xfs_buf *bp = NULL;
|
|
struct xfs_dinode *dip;
|
|
int error;
|
|
|
|
@@ -2768,14 +2769,22 @@ xfs_iflush(
|
|
}
|
|
|
|
/*
|
|
- * Get the buffer containing the on-disk inode.
|
|
+ * Get the buffer containing the on-disk inode. We are doing a try-lock
|
|
+ * operation here, so we may get an EAGAIN error. In that case, we
|
|
+ * simply want to return with the inode still dirty.
|
|
+ *
|
|
+ * If we get any other error, we effectively have a corruption situation
|
|
+ * and we cannot flush the inode, so we treat it the same as failing
|
|
+ * xfs_iflush_int().
|
|
*/
|
|
error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
|
|
0);
|
|
- if (error || !bp) {
|
|
+ if (error == EAGAIN) {
|
|
xfs_ifunlock(ip);
|
|
return error;
|
|
}
|
|
+ if (error)
|
|
+ goto corrupt_out;
|
|
|
|
/*
|
|
* First flush out the inode that xfs_iflush was called with.
|
|
@@ -2803,7 +2812,8 @@ xfs_iflush(
|
|
return 0;
|
|
|
|
corrupt_out:
|
|
- xfs_buf_relse(bp);
|
|
+ if (bp)
|
|
+ xfs_buf_relse(bp);
|
|
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
|
|
cluster_corrupt_out:
|
|
error = XFS_ERROR(EFSCORRUPTED);
|
|
diff --git a/include/linux/console.h b/include/linux/console.h
|
|
index 73bab0f58af5..6877ffc97d8c 100644
|
|
--- a/include/linux/console.h
|
|
+++ b/include/linux/console.h
|
|
@@ -153,6 +153,7 @@ extern int console_trylock(void);
|
|
extern void console_unlock(void);
|
|
extern void console_conditional_schedule(void);
|
|
extern void console_unblank(void);
|
|
+extern void console_flush_on_panic(void);
|
|
extern struct tty_driver *console_device(int *);
|
|
extern void console_stop(struct console *);
|
|
extern void console_start(struct console *);
|
|
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
|
|
index a405d3dc0f61..e98692748066 100644
|
|
--- a/include/linux/migrate.h
|
|
+++ b/include/linux/migrate.h
|
|
@@ -55,6 +55,9 @@ extern int migrate_vmas(struct mm_struct *mm,
|
|
extern void migrate_page_copy(struct page *newpage, struct page *page);
|
|
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
|
struct page *newpage, struct page *page);
|
|
+extern int migrate_page_move_mapping(struct address_space *mapping,
|
|
+ struct page *newpage, struct page *page,
|
|
+ struct buffer_head *head, enum migrate_mode mode);
|
|
#else
|
|
|
|
static inline void putback_lru_pages(struct list_head *l) {}
|
|
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
|
|
index dd49566315c6..547a5846e6ac 100644
|
|
--- a/include/linux/netfilter/x_tables.h
|
|
+++ b/include/linux/netfilter/x_tables.h
|
|
@@ -239,11 +239,18 @@ extern void xt_unregister_match(struct xt_match *target);
|
|
extern int xt_register_matches(struct xt_match *match, unsigned int n);
|
|
extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
|
|
|
|
+int xt_check_entry_offsets(const void *base, const char *elems,
|
|
+ unsigned int target_offset,
|
|
+ unsigned int next_offset);
|
|
+
|
|
extern int xt_check_match(struct xt_mtchk_param *,
|
|
unsigned int size, u_int8_t proto, bool inv_proto);
|
|
extern int xt_check_target(struct xt_tgchk_param *,
|
|
unsigned int size, u_int8_t proto, bool inv_proto);
|
|
|
|
+void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
|
+ struct xt_counters_info *info, bool compat);
|
|
+
|
|
extern struct xt_table *xt_register_table(struct net *net,
|
|
const struct xt_table *table,
|
|
struct xt_table_info *bootstrap,
|
|
@@ -423,7 +430,7 @@ extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
|
|
extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
|
|
|
|
extern int xt_compat_match_offset(const struct xt_match *match);
|
|
-extern int xt_compat_match_from_user(struct xt_entry_match *m,
|
|
+extern void xt_compat_match_from_user(struct xt_entry_match *m,
|
|
void **dstptr, unsigned int *size);
|
|
extern int xt_compat_match_to_user(const struct xt_entry_match *m,
|
|
void __user **dstptr, unsigned int *size);
|
|
@@ -433,6 +440,9 @@ extern void xt_compat_target_from_user(struct xt_entry_target *t,
|
|
void **dstptr, unsigned int *size);
|
|
extern int xt_compat_target_to_user(const struct xt_entry_target *t,
|
|
void __user **dstptr, unsigned int *size);
|
|
+int xt_compat_check_entry_offsets(const void *base, const char *elems,
|
|
+ unsigned int target_offset,
|
|
+ unsigned int next_offset);
|
|
|
|
#endif /* CONFIG_COMPAT */
|
|
#endif /* _X_TABLES_H */
|
|
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
|
|
index ab5752692113..b3374f63bc36 100644
|
|
--- a/include/linux/pipe_fs_i.h
|
|
+++ b/include/linux/pipe_fs_i.h
|
|
@@ -42,6 +42,7 @@ struct pipe_buffer {
|
|
* @fasync_readers: reader side fasync
|
|
* @fasync_writers: writer side fasync
|
|
* @bufs: the circular array of pipe buffers
|
|
+ * @user: the user who created this pipe
|
|
**/
|
|
struct pipe_inode_info {
|
|
struct mutex mutex;
|
|
@@ -57,6 +58,7 @@ struct pipe_inode_info {
|
|
struct fasync_struct *fasync_readers;
|
|
struct fasync_struct *fasync_writers;
|
|
struct pipe_buffer *bufs;
|
|
+ struct user_struct *user;
|
|
};
|
|
|
|
/*
|
|
@@ -140,6 +142,8 @@ void pipe_unlock(struct pipe_inode_info *);
|
|
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
|
|
|
|
extern unsigned int pipe_max_size, pipe_min_size;
|
|
+extern unsigned long pipe_user_pages_hard;
|
|
+extern unsigned long pipe_user_pages_soft;
|
|
int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
|
|
|
|
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
|
index 4781332f2e11..7728941e7ddc 100644
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -671,6 +671,7 @@ struct user_struct {
|
|
#endif
|
|
unsigned long locked_shm; /* How many pages of mlocked shm ? */
|
|
unsigned long unix_inflight; /* How many files in flight in unix sockets */
|
|
+ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
|
|
|
|
#ifdef CONFIG_KEYS
|
|
struct key *uid_keyring; /* UID specific keyring */
|
|
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
|
|
index daec99af5d54..1c88b177cb9c 100644
|
|
--- a/include/linux/usb/ehci_def.h
|
|
+++ b/include/linux/usb/ehci_def.h
|
|
@@ -178,11 +178,11 @@ struct ehci_regs {
|
|
* PORTSCx
|
|
*/
|
|
/* HOSTPC: offset 0x84 */
|
|
- u32 hostpc[1]; /* HOSTPC extension */
|
|
+ u32 hostpc[0]; /* HOSTPC extension */
|
|
#define HOSTPC_PHCD (1<<22) /* Phy clock disable */
|
|
#define HOSTPC_PSPD (3<<25) /* Port speed detection */
|
|
|
|
- u32 reserved5[16];
|
|
+ u32 reserved5[17];
|
|
|
|
/* USBMODE_EX: offset 0xc8 */
|
|
u32 usbmode_ex; /* USB Device mode extension */
|
|
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
|
|
new file mode 100644
|
|
index 000000000000..f09331ad0aba
|
|
--- /dev/null
|
|
+++ b/include/rdma/ib.h
|
|
@@ -0,0 +1,54 @@
|
|
+/*
|
|
+ * Copyright (c) 2010 Intel Corporation. All rights reserved.
|
|
+ *
|
|
+ * This software is available to you under a choice of one of two
|
|
+ * licenses. You may choose to be licensed under the terms of the GNU
|
|
+ * General Public License (GPL) Version 2, available from the file
|
|
+ * COPYING in the main directory of this source tree, or the
|
|
+ * OpenIB.org BSD license below:
|
|
+ *
|
|
+ * Redistribution and use in source and binary forms, with or
|
|
+ * without modification, are permitted provided that the following
|
|
+ * conditions are met:
|
|
+ *
|
|
+ * - Redistributions of source code must retain the above
|
|
+ * copyright notice, this list of conditions and the following
|
|
+ * disclaimer.
|
|
+ *
|
|
+ * - Redistributions in binary form must reproduce the above
|
|
+ * copyright notice, this list of conditions and the following
|
|
+ * disclaimer in the documentation and/or other materials
|
|
+ * provided with the distribution.
|
|
+ *
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
+ * SOFTWARE.
|
|
+ */
|
|
+
|
|
+#if !defined(_RDMA_IB_H)
|
|
+#define _RDMA_IB_H
|
|
+
|
|
+#include <linux/types.h>
|
|
+#include <linux/sched.h>
|
|
+
|
|
+/*
|
|
+ * The IB interfaces that use write() as bi-directional ioctl() are
|
|
+ * fundamentally unsafe, since there are lots of ways to trigger "write()"
|
|
+ * calls from various contexts with elevated privileges. That includes the
|
|
+ * traditional suid executable error message writes, but also various kernel
|
|
+ * interfaces that can write to file descriptors.
|
|
+ *
|
|
+ * This function provides protection for the legacy API by restricting the
|
|
+ * calling context.
|
|
+ */
|
|
+static inline bool ib_safe_file_access(struct file *filp)
|
|
+{
|
|
+ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
|
|
+}
|
|
+
|
|
+#endif /* _RDMA_IB_H */
|
|
diff --git a/kernel/module.c b/kernel/module.c
|
|
index f8a4f48b48a9..2c87e521032b 100644
|
|
--- a/kernel/module.c
|
|
+++ b/kernel/module.c
|
|
@@ -2475,13 +2475,18 @@ static inline void kmemleak_load_module(const struct module *mod,
|
|
#endif
|
|
|
|
#ifdef CONFIG_MODULE_SIG
|
|
-static int module_sig_check(struct load_info *info)
|
|
+static int module_sig_check(struct load_info *info, int flags)
|
|
{
|
|
int err = -ENOKEY;
|
|
const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
|
|
const void *mod = info->hdr;
|
|
|
|
- if (info->len > markerlen &&
|
|
+ /*
|
|
+ * Require flags == 0, as a module with version information
|
|
+ * removed is no longer the module that was signed
|
|
+ */
|
|
+ if (flags == 0 &&
|
|
+ info->len > markerlen &&
|
|
memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
|
|
/* We truncate the module to discard the signature */
|
|
info->len -= markerlen;
|
|
@@ -2503,7 +2508,7 @@ static int module_sig_check(struct load_info *info)
|
|
return err;
|
|
}
|
|
#else /* !CONFIG_MODULE_SIG */
|
|
-static int module_sig_check(struct load_info *info)
|
|
+static int module_sig_check(struct load_info *info, int flags)
|
|
{
|
|
return 0;
|
|
}
|
|
@@ -3228,7 +3233,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
|
|
struct module *mod;
|
|
long err;
|
|
|
|
- err = module_sig_check(info);
|
|
+ err = module_sig_check(info, flags);
|
|
if (err)
|
|
goto free_copy;
|
|
|
|
diff --git a/kernel/panic.c b/kernel/panic.c
|
|
index 167ec097ce8b..d3d74c4e2258 100644
|
|
--- a/kernel/panic.c
|
|
+++ b/kernel/panic.c
|
|
@@ -22,6 +22,7 @@
|
|
#include <linux/sysrq.h>
|
|
#include <linux/init.h>
|
|
#include <linux/nmi.h>
|
|
+#include <linux/console.h>
|
|
|
|
#define PANIC_TIMER_STEP 100
|
|
#define PANIC_BLINK_SPD 18
|
|
@@ -128,6 +129,8 @@ void panic(const char *fmt, ...)
|
|
|
|
bust_spinlocks(0);
|
|
|
|
+ console_flush_on_panic();
|
|
+
|
|
if (!panic_blink)
|
|
panic_blink = no_blink;
|
|
|
|
diff --git a/kernel/printk.c b/kernel/printk.c
|
|
index fd0154a57d6e..ee8f6be7d8a9 100644
|
|
--- a/kernel/printk.c
|
|
+++ b/kernel/printk.c
|
|
@@ -2033,13 +2033,24 @@ void console_unlock(void)
|
|
static u64 seen_seq;
|
|
unsigned long flags;
|
|
bool wake_klogd = false;
|
|
- bool retry;
|
|
+ bool do_cond_resched, retry;
|
|
|
|
if (console_suspended) {
|
|
up(&console_sem);
|
|
return;
|
|
}
|
|
|
|
+ /*
|
|
+ * Console drivers are called under logbuf_lock, so
|
|
+ * @console_may_schedule should be cleared before; however, we may
|
|
+ * end up dumping a lot of lines, for example, if called from
|
|
+ * console registration path, and should invoke cond_resched()
|
|
+ * between lines if allowable. Not doing so can cause a very long
|
|
+ * scheduling stall on a slow console leading to RCU stall and
|
|
+ * softlockup warnings which exacerbate the issue with more
|
|
+ * messages practically incapacitating the system.
|
|
+ */
|
|
+ do_cond_resched = console_may_schedule;
|
|
console_may_schedule = 0;
|
|
|
|
/* flush buffered message fragment immediately to console */
|
|
@@ -2096,6 +2107,9 @@ skip:
|
|
call_console_drivers(level, text, len);
|
|
start_critical_timings();
|
|
local_irq_restore(flags);
|
|
+
|
|
+ if (do_cond_resched)
|
|
+ cond_resched();
|
|
}
|
|
console_locked = 0;
|
|
mutex_release(&console_lock_dep_map, 1, _RET_IP_);
|
|
@@ -2164,6 +2178,25 @@ void console_unblank(void)
|
|
console_unlock();
|
|
}
|
|
|
|
+/**
|
|
+ * console_flush_on_panic - flush console content on panic
|
|
+ *
|
|
+ * Immediately output all pending messages no matter what.
|
|
+ */
|
|
+void console_flush_on_panic(void)
|
|
+{
|
|
+ /*
|
|
+ * If someone else is holding the console lock, trylock will fail
|
|
+ * and may_schedule may be set. Ignore and proceed to unlock so
|
|
+ * that messages are flushed out. As this can be called from any
|
|
+ * context and we don't want to get preempted while flushing,
|
|
+ * ensure may_schedule is cleared.
|
|
+ */
|
|
+ console_trylock();
|
|
+ console_may_schedule = 0;
|
|
+ console_unlock();
|
|
+}
|
|
+
|
|
/*
|
|
* Return the console tty driver structure and its associated index
|
|
*/
|
|
diff --git a/kernel/signal.c b/kernel/signal.c
|
|
index 4d1f7fa3138d..7b81c53b0097 100644
|
|
--- a/kernel/signal.c
|
|
+++ b/kernel/signal.c
|
|
@@ -3004,11 +3004,9 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
|
|
* Nor can they impersonate a kill()/tgkill(), which adds source info.
|
|
*/
|
|
if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
|
|
- (task_pid_vnr(current) != pid)) {
|
|
- /* We used to allow any < 0 si_code */
|
|
- WARN_ON_ONCE(info->si_code < 0);
|
|
+ (task_pid_vnr(current) != pid))
|
|
return -EPERM;
|
|
- }
|
|
+
|
|
info->si_signo = sig;
|
|
|
|
/* POSIX.1b doesn't mention process groups. */
|
|
@@ -3053,12 +3051,10 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
|
|
/* Not even root can pretend to send signals from the kernel.
|
|
* Nor can they impersonate a kill()/tgkill(), which adds source info.
|
|
*/
|
|
- if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
|
|
- (task_pid_vnr(current) != pid)) {
|
|
- /* We used to allow any < 0 si_code */
|
|
- WARN_ON_ONCE(info->si_code < 0);
|
|
+ if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
|
|
+ (task_pid_vnr(current) != pid))
|
|
return -EPERM;
|
|
- }
|
|
+
|
|
info->si_signo = sig;
|
|
|
|
return do_send_specific(tgid, pid, sig, info);
|
|
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
|
|
index 9469f4c61a30..4fd49fe1046d 100644
|
|
--- a/kernel/sysctl.c
|
|
+++ b/kernel/sysctl.c
|
|
@@ -1632,6 +1632,20 @@ static struct ctl_table fs_table[] = {
|
|
.proc_handler = &pipe_proc_fn,
|
|
.extra1 = &pipe_min_size,
|
|
},
|
|
+ {
|
|
+ .procname = "pipe-user-pages-hard",
|
|
+ .data = &pipe_user_pages_hard,
|
|
+ .maxlen = sizeof(pipe_user_pages_hard),
|
|
+ .mode = 0644,
|
|
+ .proc_handler = proc_doulongvec_minmax,
|
|
+ },
|
|
+ {
|
|
+ .procname = "pipe-user-pages-soft",
|
|
+ .data = &pipe_user_pages_soft,
|
|
+ .maxlen = sizeof(pipe_user_pages_soft),
|
|
+ .mode = 0644,
|
|
+ .proc_handler = proc_doulongvec_minmax,
|
|
+ },
|
|
{ }
|
|
};
|
|
|
|
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
|
|
index fdb23e84b011..7be4d67cecbd 100644
|
|
--- a/kernel/trace/trace_printk.c
|
|
+++ b/kernel/trace/trace_printk.c
|
|
@@ -38,6 +38,10 @@ struct trace_bprintk_fmt {
|
|
static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
|
|
{
|
|
struct trace_bprintk_fmt *pos;
|
|
+
|
|
+ if (!fmt)
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
|
|
if (!strcmp(pos->fmt, fmt))
|
|
return pos;
|
|
@@ -59,7 +63,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
|
|
for (iter = start; iter < end; iter++) {
|
|
struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
|
|
if (tb_fmt) {
|
|
- *iter = tb_fmt->fmt;
|
|
+ if (!IS_ERR(tb_fmt))
|
|
+ *iter = tb_fmt->fmt;
|
|
continue;
|
|
}
|
|
|
|
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
|
|
index eb43517bf261..c32437f6be61 100644
|
|
--- a/lib/dma-debug.c
|
|
+++ b/lib/dma-debug.c
|
|
@@ -445,9 +445,9 @@ static struct dma_debug_entry *dma_entry_alloc(void)
|
|
spin_lock_irqsave(&free_entries_lock, flags);
|
|
|
|
if (list_empty(&free_entries)) {
|
|
- pr_err("DMA-API: debugging out of memory - disabling\n");
|
|
global_disable = true;
|
|
spin_unlock_irqrestore(&free_entries_lock, flags);
|
|
+ pr_err("DMA-API: debugging out of memory - disabling\n");
|
|
return NULL;
|
|
}
|
|
|
|
diff --git a/mm/migrate.c b/mm/migrate.c
|
|
index a88c12f2235d..808f8abb1b8f 100644
|
|
--- a/mm/migrate.c
|
|
+++ b/mm/migrate.c
|
|
@@ -30,6 +30,7 @@
|
|
#include <linux/mempolicy.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/security.h>
|
|
+#include <linux/backing-dev.h>
|
|
#include <linux/memcontrol.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/hugetlb.h>
|
|
@@ -307,10 +308,12 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
|
|
* 2 for pages with a mapping
|
|
* 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
|
|
*/
|
|
-static int migrate_page_move_mapping(struct address_space *mapping,
|
|
+int migrate_page_move_mapping(struct address_space *mapping,
|
|
struct page *newpage, struct page *page,
|
|
struct buffer_head *head, enum migrate_mode mode)
|
|
{
|
|
+ struct zone *oldzone, *newzone;
|
|
+ int dirty;
|
|
int expected_count = 0;
|
|
void **pslot;
|
|
|
|
@@ -321,6 +324,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
|
return MIGRATEPAGE_SUCCESS;
|
|
}
|
|
|
|
+ oldzone = page_zone(page);
|
|
+ newzone = page_zone(newpage);
|
|
+
|
|
spin_lock_irq(&mapping->tree_lock);
|
|
|
|
pslot = radix_tree_lookup_slot(&mapping->page_tree,
|
|
@@ -361,6 +367,13 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
|
set_page_private(newpage, page_private(page));
|
|
}
|
|
|
|
+ /* Move dirty while page refs frozen and newpage not yet exposed */
|
|
+ dirty = PageDirty(page);
|
|
+ if (dirty) {
|
|
+ ClearPageDirty(page);
|
|
+ SetPageDirty(newpage);
|
|
+ }
|
|
+
|
|
radix_tree_replace_slot(pslot, newpage);
|
|
|
|
/*
|
|
@@ -370,6 +383,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
|
*/
|
|
page_unfreeze_refs(page, expected_count - 1);
|
|
|
|
+ spin_unlock(&mapping->tree_lock);
|
|
+ /* Leave irq disabled to prevent preemption while updating stats */
|
|
+
|
|
/*
|
|
* If moved to a different zone then also account
|
|
* the page for that zone. Other VM counters will be
|
|
@@ -380,16 +396,23 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
|
* via NR_FILE_PAGES and NR_ANON_PAGES if they
|
|
* are mapped to swap space.
|
|
*/
|
|
- __dec_zone_page_state(page, NR_FILE_PAGES);
|
|
- __inc_zone_page_state(newpage, NR_FILE_PAGES);
|
|
- if (!PageSwapCache(page) && PageSwapBacked(page)) {
|
|
- __dec_zone_page_state(page, NR_SHMEM);
|
|
- __inc_zone_page_state(newpage, NR_SHMEM);
|
|
+ if (newzone != oldzone) {
|
|
+ __dec_zone_state(oldzone, NR_FILE_PAGES);
|
|
+ __inc_zone_state(newzone, NR_FILE_PAGES);
|
|
+ if (PageSwapBacked(page) && !PageSwapCache(page)) {
|
|
+ __dec_zone_state(oldzone, NR_SHMEM);
|
|
+ __inc_zone_state(newzone, NR_SHMEM);
|
|
+ }
|
|
+ if (dirty && mapping_cap_account_dirty(mapping)) {
|
|
+ __dec_zone_state(oldzone, NR_FILE_DIRTY);
|
|
+ __inc_zone_state(newzone, NR_FILE_DIRTY);
|
|
+ }
|
|
}
|
|
- spin_unlock_irq(&mapping->tree_lock);
|
|
+ local_irq_enable();
|
|
|
|
return MIGRATEPAGE_SUCCESS;
|
|
}
|
|
+EXPORT_SYMBOL(migrate_page_move_mapping);
|
|
|
|
/*
|
|
* The expected number of remaining references is the same as that
|
|
@@ -460,20 +483,9 @@ void migrate_page_copy(struct page *newpage, struct page *page)
|
|
if (PageMappedToDisk(page))
|
|
SetPageMappedToDisk(newpage);
|
|
|
|
- if (PageDirty(page)) {
|
|
- clear_page_dirty_for_io(page);
|
|
- /*
|
|
- * Want to mark the page and the radix tree as dirty, and
|
|
- * redo the accounting that clear_page_dirty_for_io undid,
|
|
- * but we can't use set_page_dirty because that function
|
|
- * is actually a signal that all of the page has become dirty.
|
|
- * Whereas only part of our page may be dirty.
|
|
- */
|
|
- if (PageSwapBacked(page))
|
|
- SetPageDirty(newpage);
|
|
- else
|
|
- __set_page_dirty_nobuffers(newpage);
|
|
- }
|
|
+ /* Move dirty on pages not done by migrate_page_move_mapping() */
|
|
+ if (PageDirty(page))
|
|
+ SetPageDirty(newpage);
|
|
|
|
mlock_migrate_page(newpage, page);
|
|
ksm_migrate_page(newpage, page);
|
|
@@ -492,6 +504,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
|
|
if (PageWriteback(newpage))
|
|
end_page_writeback(newpage);
|
|
}
|
|
+EXPORT_SYMBOL(migrate_page_copy);
|
|
|
|
/************************************************************
|
|
* Migration functions
|
|
diff --git a/mm/shmem.c b/mm/shmem.c
|
|
index 4e4a7349c5cd..cc02b6c6eec4 100644
|
|
--- a/mm/shmem.c
|
|
+++ b/mm/shmem.c
|
|
@@ -1948,9 +1948,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
|
NULL);
|
|
if (error) {
|
|
/* Remove the !PageUptodate pages we added */
|
|
- shmem_undo_range(inode,
|
|
- (loff_t)start << PAGE_CACHE_SHIFT,
|
|
- (loff_t)index << PAGE_CACHE_SHIFT, true);
|
|
+ if (index > start) {
|
|
+ shmem_undo_range(inode,
|
|
+ (loff_t)start << PAGE_CACHE_SHIFT,
|
|
+ ((loff_t)index << PAGE_CACHE_SHIFT) - 1, true);
|
|
+ }
|
|
goto undone;
|
|
}
|
|
|
|
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
|
|
index 5f36f70ce44d..4b966c6c0145 100644
|
|
--- a/net/bluetooth/l2cap_sock.c
|
|
+++ b/net/bluetooth/l2cap_sock.c
|
|
@@ -725,7 +725,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
|
|
break;
|
|
}
|
|
|
|
- if (get_user(opt, (u32 __user *) optval)) {
|
|
+ if (get_user(opt, (u16 __user *) optval)) {
|
|
err = -EFAULT;
|
|
break;
|
|
}
|
|
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
|
|
index 7ec4e0522215..c1de8d404c47 100644
|
|
--- a/net/ceph/osdmap.c
|
|
+++ b/net/ceph/osdmap.c
|
|
@@ -798,6 +798,110 @@ bad:
|
|
}
|
|
|
|
/*
|
|
+ * Encoding order is (new_up_client, new_state, new_weight). Need to
|
|
+ * apply in the (new_weight, new_state, new_up_client) order, because
|
|
+ * an incremental map may look like e.g.
|
|
+ *
|
|
+ * new_up_client: { osd=6, addr=... } # set osd_state and addr
|
|
+ * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
|
|
+ */
|
|
+static int decode_new_up_state_weight(void **p, void *end,
|
|
+ struct ceph_osdmap *map)
|
|
+{
|
|
+ void *new_up_client;
|
|
+ void *new_state;
|
|
+ void *new_weight_end;
|
|
+ u32 len;
|
|
+
|
|
+ new_up_client = *p;
|
|
+ ceph_decode_32_safe(p, end, len, e_inval);
|
|
+ len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
|
|
+ ceph_decode_need(p, end, len, e_inval);
|
|
+ *p += len;
|
|
+
|
|
+ new_state = *p;
|
|
+ ceph_decode_32_safe(p, end, len, e_inval);
|
|
+ len *= sizeof(u32) + sizeof(u8);
|
|
+ ceph_decode_need(p, end, len, e_inval);
|
|
+ *p += len;
|
|
+
|
|
+ /* new_weight */
|
|
+ ceph_decode_32_safe(p, end, len, e_inval);
|
|
+ while (len--) {
|
|
+ s32 osd;
|
|
+ u32 w;
|
|
+
|
|
+ ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
|
|
+ osd = ceph_decode_32(p);
|
|
+ w = ceph_decode_32(p);
|
|
+ BUG_ON(osd >= map->max_osd);
|
|
+ pr_info("osd%d weight 0x%x %s\n", osd, w,
|
|
+ w == CEPH_OSD_IN ? "(in)" :
|
|
+ (w == CEPH_OSD_OUT ? "(out)" : ""));
|
|
+ map->osd_weight[osd] = w;
|
|
+
|
|
+ /*
|
|
+ * If we are marking in, set the EXISTS, and clear the
|
|
+ * AUTOOUT and NEW bits.
|
|
+ */
|
|
+ if (w) {
|
|
+ map->osd_state[osd] |= CEPH_OSD_EXISTS;
|
|
+ map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
|
|
+ CEPH_OSD_NEW);
|
|
+ }
|
|
+ }
|
|
+ new_weight_end = *p;
|
|
+
|
|
+ /* new_state (up/down) */
|
|
+ *p = new_state;
|
|
+ len = ceph_decode_32(p);
|
|
+ while (len--) {
|
|
+ s32 osd;
|
|
+ u8 xorstate;
|
|
+
|
|
+ osd = ceph_decode_32(p);
|
|
+ xorstate = ceph_decode_8(p);
|
|
+ if (xorstate == 0)
|
|
+ xorstate = CEPH_OSD_UP;
|
|
+ BUG_ON(osd >= map->max_osd);
|
|
+ if ((map->osd_state[osd] & CEPH_OSD_UP) &&
|
|
+ (xorstate & CEPH_OSD_UP))
|
|
+ pr_info("osd%d down\n", osd);
|
|
+ if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
|
|
+ (xorstate & CEPH_OSD_EXISTS)) {
|
|
+ pr_info("osd%d does not exist\n", osd);
|
|
+ map->osd_weight[osd] = CEPH_OSD_IN;
|
|
+ memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
|
|
+ map->osd_state[osd] = 0;
|
|
+ } else {
|
|
+ map->osd_state[osd] ^= xorstate;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* new_up_client */
|
|
+ *p = new_up_client;
|
|
+ len = ceph_decode_32(p);
|
|
+ while (len--) {
|
|
+ s32 osd;
|
|
+ struct ceph_entity_addr addr;
|
|
+
|
|
+ osd = ceph_decode_32(p);
|
|
+ ceph_decode_copy(p, &addr, sizeof(addr));
|
|
+ ceph_decode_addr(&addr);
|
|
+ BUG_ON(osd >= map->max_osd);
|
|
+ pr_info("osd%d up\n", osd);
|
|
+ map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
|
|
+ map->osd_addr[osd] = addr;
|
|
+ }
|
|
+
|
|
+ *p = new_weight_end;
|
|
+ return 0;
|
|
+
|
|
+e_inval:
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+/*
|
|
* decode and apply an incremental map update.
|
|
*/
|
|
struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
|
|
@@ -912,50 +1016,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
|
|
__remove_pg_pool(&map->pg_pools, pi);
|
|
}
|
|
|
|
- /* new_up */
|
|
- err = -EINVAL;
|
|
- ceph_decode_32_safe(p, end, len, bad);
|
|
- while (len--) {
|
|
- u32 osd;
|
|
- struct ceph_entity_addr addr;
|
|
- ceph_decode_32_safe(p, end, osd, bad);
|
|
- ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
|
|
- ceph_decode_addr(&addr);
|
|
- pr_info("osd%d up\n", osd);
|
|
- BUG_ON(osd >= map->max_osd);
|
|
- map->osd_state[osd] |= CEPH_OSD_UP;
|
|
- map->osd_addr[osd] = addr;
|
|
- }
|
|
-
|
|
- /* new_state */
|
|
- ceph_decode_32_safe(p, end, len, bad);
|
|
- while (len--) {
|
|
- u32 osd;
|
|
- u8 xorstate;
|
|
- ceph_decode_32_safe(p, end, osd, bad);
|
|
- xorstate = **(u8 **)p;
|
|
- (*p)++; /* clean flag */
|
|
- if (xorstate == 0)
|
|
- xorstate = CEPH_OSD_UP;
|
|
- if (xorstate & CEPH_OSD_UP)
|
|
- pr_info("osd%d down\n", osd);
|
|
- if (osd < map->max_osd)
|
|
- map->osd_state[osd] ^= xorstate;
|
|
- }
|
|
-
|
|
- /* new_weight */
|
|
- ceph_decode_32_safe(p, end, len, bad);
|
|
- while (len--) {
|
|
- u32 osd, off;
|
|
- ceph_decode_need(p, end, sizeof(u32)*2, bad);
|
|
- osd = ceph_decode_32(p);
|
|
- off = ceph_decode_32(p);
|
|
- pr_info("osd%d weight 0x%x %s\n", osd, off,
|
|
- off == CEPH_OSD_IN ? "(in)" :
|
|
- (off == CEPH_OSD_OUT ? "(out)" : ""));
|
|
- if (osd < map->max_osd)
|
|
- map->osd_weight[osd] = off;
|
|
- }
|
|
+ /* new_up_client, new_state, new_weight */
|
|
+ err = decode_new_up_state_weight(p, end, map);
|
|
+ if (err)
|
|
+ goto bad;
|
|
|
|
/* new_pg_temp */
|
|
ceph_decode_32_safe(p, end, len, bad);
|
|
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
|
|
index b31553d385bb..89570f070e0e 100644
|
|
--- a/net/ipv4/ipmr.c
|
|
+++ b/net/ipv4/ipmr.c
|
|
@@ -881,8 +881,10 @@ static struct mfc_cache *ipmr_cache_alloc(void)
|
|
{
|
|
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
|
|
|
|
- if (c)
|
|
+ if (c) {
|
|
+ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
|
|
c->mfc_un.res.minvif = MAXVIFS;
|
|
+ }
|
|
return c;
|
|
}
|
|
|
|
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
|
|
index c8abe31961ed..95a5f261fe8a 100644
|
|
--- a/net/ipv4/netfilter/arp_tables.c
|
|
+++ b/net/ipv4/netfilter/arp_tables.c
|
|
@@ -350,11 +350,12 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
|
}
|
|
|
|
/* All zeroes == unconditional rule. */
|
|
-static inline bool unconditional(const struct arpt_arp *arp)
|
|
+static inline bool unconditional(const struct arpt_entry *e)
|
|
{
|
|
static const struct arpt_arp uncond;
|
|
|
|
- return memcmp(arp, &uncond, sizeof(uncond)) == 0;
|
|
+ return e->target_offset == sizeof(struct arpt_entry) &&
|
|
+ memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
|
|
}
|
|
|
|
/* Figures out from what hook each rule can be called: returns 0 if
|
|
@@ -393,11 +394,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
|
|
|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
|
|
|
|
/* Unconditional return/END. */
|
|
- if ((e->target_offset == sizeof(struct arpt_entry) &&
|
|
+ if ((unconditional(e) &&
|
|
(strcmp(t->target.u.user.name,
|
|
XT_STANDARD_TARGET) == 0) &&
|
|
- t->verdict < 0 && unconditional(&e->arp)) ||
|
|
- visited) {
|
|
+ t->verdict < 0) || visited) {
|
|
unsigned int oldpos, size;
|
|
|
|
if ((strcmp(t->target.u.user.name,
|
|
@@ -430,6 +430,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
|
|
size = e->next_offset;
|
|
e = (struct arpt_entry *)
|
|
(entry0 + pos + size);
|
|
+ if (pos + size >= newinfo->size)
|
|
+ return 0;
|
|
e->counters.pcnt = pos;
|
|
pos += size;
|
|
} else {
|
|
@@ -452,6 +454,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
|
|
} else {
|
|
/* ... this is a fallthru */
|
|
newpos = pos + e->next_offset;
|
|
+ if (newpos >= newinfo->size)
|
|
+ return 0;
|
|
}
|
|
e = (struct arpt_entry *)
|
|
(entry0 + newpos);
|
|
@@ -465,25 +469,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
|
|
return 1;
|
|
}
|
|
|
|
-static inline int check_entry(const struct arpt_entry *e, const char *name)
|
|
-{
|
|
- const struct xt_entry_target *t;
|
|
-
|
|
- if (!arp_checkentry(&e->arp)) {
|
|
- duprintf("arp_tables: arp check failed %p %s.\n", e, name);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
|
|
- return -EINVAL;
|
|
-
|
|
- t = arpt_get_target_c(e);
|
|
- if (e->target_offset + t->u.target_size > e->next_offset)
|
|
- return -EINVAL;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
static inline int check_target(struct arpt_entry *e, const char *name)
|
|
{
|
|
struct xt_entry_target *t = arpt_get_target(e);
|
|
@@ -513,10 +498,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
|
|
struct xt_target *target;
|
|
int ret;
|
|
|
|
- ret = check_entry(e, name);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
t = arpt_get_target(e);
|
|
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
|
|
t->u.user.revision);
|
|
@@ -542,7 +523,7 @@ static bool check_underflow(const struct arpt_entry *e)
|
|
const struct xt_entry_target *t;
|
|
unsigned int verdict;
|
|
|
|
- if (!unconditional(&e->arp))
|
|
+ if (!unconditional(e))
|
|
return false;
|
|
t = arpt_get_target_c(e);
|
|
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
|
|
@@ -561,9 +542,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
|
|
unsigned int valid_hooks)
|
|
{
|
|
unsigned int h;
|
|
+ int err;
|
|
|
|
if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
|
|
- (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
|
|
+ (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
|
|
+ (unsigned char *)e + e->next_offset > limit) {
|
|
duprintf("Bad offset %p\n", e);
|
|
return -EINVAL;
|
|
}
|
|
@@ -575,6 +558,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (!arp_checkentry(&e->arp))
|
|
+ return -EINVAL;
|
|
+
|
|
+ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
|
|
+ e->next_offset);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
/* Check hooks & underflows */
|
|
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
|
|
if (!(valid_hooks & (1 << h)))
|
|
@@ -583,9 +574,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
|
|
newinfo->hook_entry[h] = hook_entries[h];
|
|
if ((unsigned char *)e - base == underflows[h]) {
|
|
if (!check_underflow(e)) {
|
|
- pr_err("Underflows must be unconditional and "
|
|
- "use the STANDARD target with "
|
|
- "ACCEPT/DROP\n");
|
|
+ pr_debug("Underflows must be unconditional and "
|
|
+ "use the STANDARD target with "
|
|
+ "ACCEPT/DROP\n");
|
|
return -EINVAL;
|
|
}
|
|
newinfo->underflow[h] = underflows[h];
|
|
@@ -675,10 +666,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
|
|
}
|
|
}
|
|
|
|
- if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
|
|
- duprintf("Looping hook\n");
|
|
+ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
|
|
return -ELOOP;
|
|
- }
|
|
|
|
/* Finally, each sanity check must pass */
|
|
i = 0;
|
|
@@ -1071,6 +1060,9 @@ static int do_replace(struct net *net, const void __user *user,
|
|
/* overflow check */
|
|
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
|
return -ENOMEM;
|
|
+ if (tmp.num_counters == 0)
|
|
+ return -EINVAL;
|
|
+
|
|
tmp.name[sizeof(tmp.name)-1] = 0;
|
|
|
|
newinfo = xt_alloc_table_info(tmp.size);
|
|
@@ -1111,56 +1103,18 @@ static int do_add_counters(struct net *net, const void __user *user,
|
|
unsigned int i, curcpu;
|
|
struct xt_counters_info tmp;
|
|
struct xt_counters *paddc;
|
|
- unsigned int num_counters;
|
|
- const char *name;
|
|
- int size;
|
|
- void *ptmp;
|
|
struct xt_table *t;
|
|
const struct xt_table_info *private;
|
|
int ret = 0;
|
|
void *loc_cpu_entry;
|
|
struct arpt_entry *iter;
|
|
unsigned int addend;
|
|
-#ifdef CONFIG_COMPAT
|
|
- struct compat_xt_counters_info compat_tmp;
|
|
-
|
|
- if (compat) {
|
|
- ptmp = &compat_tmp;
|
|
- size = sizeof(struct compat_xt_counters_info);
|
|
- } else
|
|
-#endif
|
|
- {
|
|
- ptmp = &tmp;
|
|
- size = sizeof(struct xt_counters_info);
|
|
- }
|
|
|
|
- if (copy_from_user(ptmp, user, size) != 0)
|
|
- return -EFAULT;
|
|
+ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
|
|
+ if (IS_ERR(paddc))
|
|
+ return PTR_ERR(paddc);
|
|
|
|
-#ifdef CONFIG_COMPAT
|
|
- if (compat) {
|
|
- num_counters = compat_tmp.num_counters;
|
|
- name = compat_tmp.name;
|
|
- } else
|
|
-#endif
|
|
- {
|
|
- num_counters = tmp.num_counters;
|
|
- name = tmp.name;
|
|
- }
|
|
-
|
|
- if (len != size + num_counters * sizeof(struct xt_counters))
|
|
- return -EINVAL;
|
|
-
|
|
- paddc = vmalloc(len - size);
|
|
- if (!paddc)
|
|
- return -ENOMEM;
|
|
-
|
|
- if (copy_from_user(paddc, user + size, len - size) != 0) {
|
|
- ret = -EFAULT;
|
|
- goto free;
|
|
- }
|
|
-
|
|
- t = xt_find_table_lock(net, NFPROTO_ARP, name);
|
|
+ t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
|
|
if (IS_ERR_OR_NULL(t)) {
|
|
ret = t ? PTR_ERR(t) : -ENOENT;
|
|
goto free;
|
|
@@ -1168,7 +1122,7 @@ static int do_add_counters(struct net *net, const void __user *user,
|
|
|
|
local_bh_disable();
|
|
private = t->private;
|
|
- if (private->number != num_counters) {
|
|
+ if (private->number != tmp.num_counters) {
|
|
ret = -EINVAL;
|
|
goto unlock_up_free;
|
|
}
|
|
@@ -1194,6 +1148,18 @@ static int do_add_counters(struct net *net, const void __user *user,
|
|
}
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
+struct compat_arpt_replace {
|
|
+ char name[XT_TABLE_MAXNAMELEN];
|
|
+ u32 valid_hooks;
|
|
+ u32 num_entries;
|
|
+ u32 size;
|
|
+ u32 hook_entry[NF_ARP_NUMHOOKS];
|
|
+ u32 underflow[NF_ARP_NUMHOOKS];
|
|
+ u32 num_counters;
|
|
+ compat_uptr_t counters;
|
|
+ struct compat_arpt_entry entries[0];
|
|
+};
|
|
+
|
|
static inline void compat_release_entry(struct compat_arpt_entry *e)
|
|
{
|
|
struct xt_entry_target *t;
|
|
@@ -1202,24 +1168,22 @@ static inline void compat_release_entry(struct compat_arpt_entry *e)
|
|
module_put(t->u.kernel.target->me);
|
|
}
|
|
|
|
-static inline int
|
|
+static int
|
|
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
|
|
struct xt_table_info *newinfo,
|
|
unsigned int *size,
|
|
const unsigned char *base,
|
|
- const unsigned char *limit,
|
|
- const unsigned int *hook_entries,
|
|
- const unsigned int *underflows,
|
|
- const char *name)
|
|
+ const unsigned char *limit)
|
|
{
|
|
struct xt_entry_target *t;
|
|
struct xt_target *target;
|
|
unsigned int entry_offset;
|
|
- int ret, off, h;
|
|
+ int ret, off;
|
|
|
|
duprintf("check_compat_entry_size_and_hooks %p\n", e);
|
|
if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
|
|
- (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
|
|
+ (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
|
|
+ (unsigned char *)e + e->next_offset > limit) {
|
|
duprintf("Bad offset %p, limit = %p\n", e, limit);
|
|
return -EINVAL;
|
|
}
|
|
@@ -1231,8 +1195,11 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- /* For purposes of check_entry casting the compat entry is fine */
|
|
- ret = check_entry((struct arpt_entry *)e, name);
|
|
+ if (!arp_checkentry(&e->arp))
|
|
+ return -EINVAL;
|
|
+
|
|
+ ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
|
|
+ e->next_offset);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -1256,17 +1223,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
|
|
if (ret)
|
|
goto release_target;
|
|
|
|
- /* Check hooks & underflows */
|
|
- for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
|
|
- if ((unsigned char *)e - base == hook_entries[h])
|
|
- newinfo->hook_entry[h] = hook_entries[h];
|
|
- if ((unsigned char *)e - base == underflows[h])
|
|
- newinfo->underflow[h] = underflows[h];
|
|
- }
|
|
-
|
|
- /* Clear counters and comefrom */
|
|
- memset(&e->counters, 0, sizeof(e->counters));
|
|
- e->comefrom = 0;
|
|
return 0;
|
|
|
|
release_target:
|
|
@@ -1275,18 +1231,17 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
-static int
|
|
+static void
|
|
compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
|
|
- unsigned int *size, const char *name,
|
|
+ unsigned int *size,
|
|
struct xt_table_info *newinfo, unsigned char *base)
|
|
{
|
|
struct xt_entry_target *t;
|
|
struct xt_target *target;
|
|
struct arpt_entry *de;
|
|
unsigned int origsize;
|
|
- int ret, h;
|
|
+ int h;
|
|
|
|
- ret = 0;
|
|
origsize = *size;
|
|
de = (struct arpt_entry *)*dstptr;
|
|
memcpy(de, e, sizeof(struct arpt_entry));
|
|
@@ -1307,144 +1262,81 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
|
|
if ((unsigned char *)de - base < newinfo->underflow[h])
|
|
newinfo->underflow[h] -= origsize - *size;
|
|
}
|
|
- return ret;
|
|
}
|
|
|
|
-static int translate_compat_table(const char *name,
|
|
- unsigned int valid_hooks,
|
|
- struct xt_table_info **pinfo,
|
|
+static int translate_compat_table(struct xt_table_info **pinfo,
|
|
void **pentry0,
|
|
- unsigned int total_size,
|
|
- unsigned int number,
|
|
- unsigned int *hook_entries,
|
|
- unsigned int *underflows)
|
|
+ const struct compat_arpt_replace *compatr)
|
|
{
|
|
unsigned int i, j;
|
|
struct xt_table_info *newinfo, *info;
|
|
void *pos, *entry0, *entry1;
|
|
struct compat_arpt_entry *iter0;
|
|
- struct arpt_entry *iter1;
|
|
+ struct arpt_replace repl;
|
|
unsigned int size;
|
|
int ret = 0;
|
|
|
|
info = *pinfo;
|
|
entry0 = *pentry0;
|
|
- size = total_size;
|
|
- info->number = number;
|
|
-
|
|
- /* Init all hooks to impossible value. */
|
|
- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
|
- info->hook_entry[i] = 0xFFFFFFFF;
|
|
- info->underflow[i] = 0xFFFFFFFF;
|
|
- }
|
|
+ size = compatr->size;
|
|
+ info->number = compatr->num_entries;
|
|
|
|
duprintf("translate_compat_table: size %u\n", info->size);
|
|
j = 0;
|
|
xt_compat_lock(NFPROTO_ARP);
|
|
- xt_compat_init_offsets(NFPROTO_ARP, number);
|
|
+ xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
|
|
/* Walk through entries, checking offsets. */
|
|
- xt_entry_foreach(iter0, entry0, total_size) {
|
|
+ xt_entry_foreach(iter0, entry0, compatr->size) {
|
|
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
|
|
entry0,
|
|
- entry0 + total_size,
|
|
- hook_entries,
|
|
- underflows,
|
|
- name);
|
|
+ entry0 + compatr->size);
|
|
if (ret != 0)
|
|
goto out_unlock;
|
|
++j;
|
|
}
|
|
|
|
ret = -EINVAL;
|
|
- if (j != number) {
|
|
+ if (j != compatr->num_entries) {
|
|
duprintf("translate_compat_table: %u not %u entries\n",
|
|
- j, number);
|
|
+ j, compatr->num_entries);
|
|
goto out_unlock;
|
|
}
|
|
|
|
- /* Check hooks all assigned */
|
|
- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
|
- /* Only hooks which are valid */
|
|
- if (!(valid_hooks & (1 << i)))
|
|
- continue;
|
|
- if (info->hook_entry[i] == 0xFFFFFFFF) {
|
|
- duprintf("Invalid hook entry %u %u\n",
|
|
- i, hook_entries[i]);
|
|
- goto out_unlock;
|
|
- }
|
|
- if (info->underflow[i] == 0xFFFFFFFF) {
|
|
- duprintf("Invalid underflow %u %u\n",
|
|
- i, underflows[i]);
|
|
- goto out_unlock;
|
|
- }
|
|
- }
|
|
-
|
|
ret = -ENOMEM;
|
|
newinfo = xt_alloc_table_info(size);
|
|
if (!newinfo)
|
|
goto out_unlock;
|
|
|
|
- newinfo->number = number;
|
|
+ newinfo->number = compatr->num_entries;
|
|
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
|
newinfo->hook_entry[i] = info->hook_entry[i];
|
|
newinfo->underflow[i] = info->underflow[i];
|
|
}
|
|
entry1 = newinfo->entries[raw_smp_processor_id()];
|
|
pos = entry1;
|
|
- size = total_size;
|
|
- xt_entry_foreach(iter0, entry0, total_size) {
|
|
- ret = compat_copy_entry_from_user(iter0, &pos, &size,
|
|
- name, newinfo, entry1);
|
|
- if (ret != 0)
|
|
- break;
|
|
- }
|
|
+ size = compatr->size;
|
|
+ xt_entry_foreach(iter0, entry0, compatr->size)
|
|
+ compat_copy_entry_from_user(iter0, &pos, &size,
|
|
+ newinfo, entry1);
|
|
+
|
|
+ /* all module references in entry0 are now gone */
|
|
+
|
|
xt_compat_flush_offsets(NFPROTO_ARP);
|
|
xt_compat_unlock(NFPROTO_ARP);
|
|
- if (ret)
|
|
- goto free_newinfo;
|
|
|
|
- ret = -ELOOP;
|
|
- if (!mark_source_chains(newinfo, valid_hooks, entry1))
|
|
- goto free_newinfo;
|
|
+ memcpy(&repl, compatr, sizeof(*compatr));
|
|
|
|
- i = 0;
|
|
- xt_entry_foreach(iter1, entry1, newinfo->size) {
|
|
- ret = check_target(iter1, name);
|
|
- if (ret != 0)
|
|
- break;
|
|
- ++i;
|
|
- if (strcmp(arpt_get_target(iter1)->u.user.name,
|
|
- XT_ERROR_TARGET) == 0)
|
|
- ++newinfo->stacksize;
|
|
- }
|
|
- if (ret) {
|
|
- /*
|
|
- * The first i matches need cleanup_entry (calls ->destroy)
|
|
- * because they had called ->check already. The other j-i
|
|
- * entries need only release.
|
|
- */
|
|
- int skip = i;
|
|
- j -= i;
|
|
- xt_entry_foreach(iter0, entry0, newinfo->size) {
|
|
- if (skip-- > 0)
|
|
- continue;
|
|
- if (j-- == 0)
|
|
- break;
|
|
- compat_release_entry(iter0);
|
|
- }
|
|
- xt_entry_foreach(iter1, entry1, newinfo->size) {
|
|
- if (i-- == 0)
|
|
- break;
|
|
- cleanup_entry(iter1);
|
|
- }
|
|
- xt_free_table_info(newinfo);
|
|
- return ret;
|
|
+ for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
|
|
+ repl.hook_entry[i] = newinfo->hook_entry[i];
|
|
+ repl.underflow[i] = newinfo->underflow[i];
|
|
}
|
|
|
|
- /* And one copy for every other CPU */
|
|
- for_each_possible_cpu(i)
|
|
- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
|
|
- memcpy(newinfo->entries[i], entry1, newinfo->size);
|
|
+ repl.num_counters = 0;
|
|
+ repl.counters = NULL;
|
|
+ repl.size = newinfo->size;
|
|
+ ret = translate_table(newinfo, entry1, &repl);
|
|
+ if (ret)
|
|
+ goto free_newinfo;
|
|
|
|
*pinfo = newinfo;
|
|
*pentry0 = entry1;
|
|
@@ -1453,31 +1345,18 @@ static int translate_compat_table(const char *name,
|
|
|
|
free_newinfo:
|
|
xt_free_table_info(newinfo);
|
|
-out:
|
|
- xt_entry_foreach(iter0, entry0, total_size) {
|
|
+ return ret;
|
|
+out_unlock:
|
|
+ xt_compat_flush_offsets(NFPROTO_ARP);
|
|
+ xt_compat_unlock(NFPROTO_ARP);
|
|
+ xt_entry_foreach(iter0, entry0, compatr->size) {
|
|
if (j-- == 0)
|
|
break;
|
|
compat_release_entry(iter0);
|
|
}
|
|
return ret;
|
|
-out_unlock:
|
|
- xt_compat_flush_offsets(NFPROTO_ARP);
|
|
- xt_compat_unlock(NFPROTO_ARP);
|
|
- goto out;
|
|
}
|
|
|
|
-struct compat_arpt_replace {
|
|
- char name[XT_TABLE_MAXNAMELEN];
|
|
- u32 valid_hooks;
|
|
- u32 num_entries;
|
|
- u32 size;
|
|
- u32 hook_entry[NF_ARP_NUMHOOKS];
|
|
- u32 underflow[NF_ARP_NUMHOOKS];
|
|
- u32 num_counters;
|
|
- compat_uptr_t counters;
|
|
- struct compat_arpt_entry entries[0];
|
|
-};
|
|
-
|
|
static int compat_do_replace(struct net *net, void __user *user,
|
|
unsigned int len)
|
|
{
|
|
@@ -1495,6 +1374,9 @@ static int compat_do_replace(struct net *net, void __user *user,
|
|
return -ENOMEM;
|
|
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
|
return -ENOMEM;
|
|
+ if (tmp.num_counters == 0)
|
|
+ return -EINVAL;
|
|
+
|
|
tmp.name[sizeof(tmp.name)-1] = 0;
|
|
|
|
newinfo = xt_alloc_table_info(tmp.size);
|
|
@@ -1508,10 +1390,7 @@ static int compat_do_replace(struct net *net, void __user *user,
|
|
goto free_newinfo;
|
|
}
|
|
|
|
- ret = translate_compat_table(tmp.name, tmp.valid_hooks,
|
|
- &newinfo, &loc_cpu_entry, tmp.size,
|
|
- tmp.num_entries, tmp.hook_entry,
|
|
- tmp.underflow);
|
|
+ ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
|
|
if (ret != 0)
|
|
goto free_newinfo;
|
|
|
|
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
|
|
index 651c10774d58..92c8f2727ee9 100644
|
|
--- a/net/ipv4/netfilter/ip_tables.c
|
|
+++ b/net/ipv4/netfilter/ip_tables.c
|
|
@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
|
|
|
|
/* All zeroes == unconditional rule. */
|
|
/* Mildly perf critical (only if packet tracing is on) */
|
|
-static inline bool unconditional(const struct ipt_ip *ip)
|
|
+static inline bool unconditional(const struct ipt_entry *e)
|
|
{
|
|
static const struct ipt_ip uncond;
|
|
|
|
- return memcmp(ip, &uncond, sizeof(uncond)) == 0;
|
|
+ return e->target_offset == sizeof(struct ipt_entry) &&
|
|
+ memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
|
|
#undef FWINV
|
|
}
|
|
|
|
@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
|
|
} else if (s == e) {
|
|
(*rulenum)++;
|
|
|
|
- if (s->target_offset == sizeof(struct ipt_entry) &&
|
|
+ if (unconditional(s) &&
|
|
strcmp(t->target.u.kernel.target->name,
|
|
XT_STANDARD_TARGET) == 0 &&
|
|
- t->verdict < 0 &&
|
|
- unconditional(&s->ip)) {
|
|
+ t->verdict < 0) {
|
|
/* Tail of chains: STANDARD target (return/policy) */
|
|
*comment = *chainname == hookname
|
|
? comments[NF_IP_TRACE_COMMENT_POLICY]
|
|
@@ -467,11 +467,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
|
|
|
|
/* Unconditional return/END. */
|
|
- if ((e->target_offset == sizeof(struct ipt_entry) &&
|
|
+ if ((unconditional(e) &&
|
|
(strcmp(t->target.u.user.name,
|
|
XT_STANDARD_TARGET) == 0) &&
|
|
- t->verdict < 0 && unconditional(&e->ip)) ||
|
|
- visited) {
|
|
+ t->verdict < 0) || visited) {
|
|
unsigned int oldpos, size;
|
|
|
|
if ((strcmp(t->target.u.user.name,
|
|
@@ -512,6 +511,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|
size = e->next_offset;
|
|
e = (struct ipt_entry *)
|
|
(entry0 + pos + size);
|
|
+ if (pos + size >= newinfo->size)
|
|
+ return 0;
|
|
e->counters.pcnt = pos;
|
|
pos += size;
|
|
} else {
|
|
@@ -533,6 +534,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|
} else {
|
|
/* ... this is a fallthru */
|
|
newpos = pos + e->next_offset;
|
|
+ if (newpos >= newinfo->size)
|
|
+ return 0;
|
|
}
|
|
e = (struct ipt_entry *)
|
|
(entry0 + newpos);
|
|
@@ -560,27 +563,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
|
|
}
|
|
|
|
static int
|
|
-check_entry(const struct ipt_entry *e, const char *name)
|
|
-{
|
|
- const struct xt_entry_target *t;
|
|
-
|
|
- if (!ip_checkentry(&e->ip)) {
|
|
- duprintf("ip check failed %p %s.\n", e, name);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (e->target_offset + sizeof(struct xt_entry_target) >
|
|
- e->next_offset)
|
|
- return -EINVAL;
|
|
-
|
|
- t = ipt_get_target_c(e);
|
|
- if (e->target_offset + t->u.target_size > e->next_offset)
|
|
- return -EINVAL;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int
|
|
check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
|
|
{
|
|
const struct ipt_ip *ip = par->entryinfo;
|
|
@@ -657,10 +639,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
|
|
struct xt_mtchk_param mtpar;
|
|
struct xt_entry_match *ematch;
|
|
|
|
- ret = check_entry(e, name);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
j = 0;
|
|
mtpar.net = net;
|
|
mtpar.table = name;
|
|
@@ -704,7 +682,7 @@ static bool check_underflow(const struct ipt_entry *e)
|
|
const struct xt_entry_target *t;
|
|
unsigned int verdict;
|
|
|
|
- if (!unconditional(&e->ip))
|
|
+ if (!unconditional(e))
|
|
return false;
|
|
t = ipt_get_target_c(e);
|
|
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
|
|
@@ -724,9 +702,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
|
|
unsigned int valid_hooks)
|
|
{
|
|
unsigned int h;
|
|
+ int err;
|
|
|
|
if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
|
|
- (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
|
|
+ (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
|
|
+ (unsigned char *)e + e->next_offset > limit) {
|
|
duprintf("Bad offset %p\n", e);
|
|
return -EINVAL;
|
|
}
|
|
@@ -738,6 +718,14 @@ check_entry_size_and_hooks(struct ipt_entry *e,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (!ip_checkentry(&e->ip))
|
|
+ return -EINVAL;
|
|
+
|
|
+ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
|
|
+ e->next_offset);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
/* Check hooks & underflows */
|
|
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
|
|
if (!(valid_hooks & (1 << h)))
|
|
@@ -746,9 +734,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
|
|
newinfo->hook_entry[h] = hook_entries[h];
|
|
if ((unsigned char *)e - base == underflows[h]) {
|
|
if (!check_underflow(e)) {
|
|
- pr_err("Underflows must be unconditional and "
|
|
- "use the STANDARD target with "
|
|
- "ACCEPT/DROP\n");
|
|
+ pr_debug("Underflows must be unconditional and "
|
|
+ "use the STANDARD target with "
|
|
+ "ACCEPT/DROP\n");
|
|
return -EINVAL;
|
|
}
|
|
newinfo->underflow[h] = underflows[h];
|
|
@@ -1258,6 +1246,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
|
|
/* overflow check */
|
|
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
|
return -ENOMEM;
|
|
+ if (tmp.num_counters == 0)
|
|
+ return -EINVAL;
|
|
+
|
|
tmp.name[sizeof(tmp.name)-1] = 0;
|
|
|
|
newinfo = xt_alloc_table_info(tmp.size);
|
|
@@ -1299,56 +1290,18 @@ do_add_counters(struct net *net, const void __user *user,
|
|
unsigned int i, curcpu;
|
|
struct xt_counters_info tmp;
|
|
struct xt_counters *paddc;
|
|
- unsigned int num_counters;
|
|
- const char *name;
|
|
- int size;
|
|
- void *ptmp;
|
|
struct xt_table *t;
|
|
const struct xt_table_info *private;
|
|
int ret = 0;
|
|
void *loc_cpu_entry;
|
|
struct ipt_entry *iter;
|
|
unsigned int addend;
|
|
-#ifdef CONFIG_COMPAT
|
|
- struct compat_xt_counters_info compat_tmp;
|
|
-
|
|
- if (compat) {
|
|
- ptmp = &compat_tmp;
|
|
- size = sizeof(struct compat_xt_counters_info);
|
|
- } else
|
|
-#endif
|
|
- {
|
|
- ptmp = &tmp;
|
|
- size = sizeof(struct xt_counters_info);
|
|
- }
|
|
-
|
|
- if (copy_from_user(ptmp, user, size) != 0)
|
|
- return -EFAULT;
|
|
-
|
|
-#ifdef CONFIG_COMPAT
|
|
- if (compat) {
|
|
- num_counters = compat_tmp.num_counters;
|
|
- name = compat_tmp.name;
|
|
- } else
|
|
-#endif
|
|
- {
|
|
- num_counters = tmp.num_counters;
|
|
- name = tmp.name;
|
|
- }
|
|
|
|
- if (len != size + num_counters * sizeof(struct xt_counters))
|
|
- return -EINVAL;
|
|
-
|
|
- paddc = vmalloc(len - size);
|
|
- if (!paddc)
|
|
- return -ENOMEM;
|
|
-
|
|
- if (copy_from_user(paddc, user + size, len - size) != 0) {
|
|
- ret = -EFAULT;
|
|
- goto free;
|
|
- }
|
|
+ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
|
|
+ if (IS_ERR(paddc))
|
|
+ return PTR_ERR(paddc);
|
|
|
|
- t = xt_find_table_lock(net, AF_INET, name);
|
|
+ t = xt_find_table_lock(net, AF_INET, tmp.name);
|
|
if (IS_ERR_OR_NULL(t)) {
|
|
ret = t ? PTR_ERR(t) : -ENOENT;
|
|
goto free;
|
|
@@ -1356,7 +1309,7 @@ do_add_counters(struct net *net, const void __user *user,
|
|
|
|
local_bh_disable();
|
|
private = t->private;
|
|
- if (private->number != num_counters) {
|
|
+ if (private->number != tmp.num_counters) {
|
|
ret = -EINVAL;
|
|
goto unlock_up_free;
|
|
}
|
|
@@ -1435,7 +1388,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
|
|
|
|
static int
|
|
compat_find_calc_match(struct xt_entry_match *m,
|
|
- const char *name,
|
|
const struct ipt_ip *ip,
|
|
unsigned int hookmask,
|
|
int *size)
|
|
@@ -1471,21 +1423,19 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
|
|
struct xt_table_info *newinfo,
|
|
unsigned int *size,
|
|
const unsigned char *base,
|
|
- const unsigned char *limit,
|
|
- const unsigned int *hook_entries,
|
|
- const unsigned int *underflows,
|
|
- const char *name)
|
|
+ const unsigned char *limit)
|
|
{
|
|
struct xt_entry_match *ematch;
|
|
struct xt_entry_target *t;
|
|
struct xt_target *target;
|
|
unsigned int entry_offset;
|
|
unsigned int j;
|
|
- int ret, off, h;
|
|
+ int ret, off;
|
|
|
|
duprintf("check_compat_entry_size_and_hooks %p\n", e);
|
|
if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
|
|
- (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
|
|
+ (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
|
|
+ (unsigned char *)e + e->next_offset > limit) {
|
|
duprintf("Bad offset %p, limit = %p\n", e, limit);
|
|
return -EINVAL;
|
|
}
|
|
@@ -1497,8 +1447,11 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- /* For purposes of check_entry casting the compat entry is fine */
|
|
- ret = check_entry((struct ipt_entry *)e, name);
|
|
+ if (!ip_checkentry(&e->ip))
|
|
+ return -EINVAL;
|
|
+
|
|
+ ret = xt_compat_check_entry_offsets(e, e->elems,
|
|
+ e->target_offset, e->next_offset);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -1506,8 +1459,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
|
|
entry_offset = (void *)e - (void *)base;
|
|
j = 0;
|
|
xt_ematch_foreach(ematch, e) {
|
|
- ret = compat_find_calc_match(ematch, name,
|
|
- &e->ip, e->comefrom, &off);
|
|
+ ret = compat_find_calc_match(ematch, &e->ip, e->comefrom,
|
|
+ &off);
|
|
if (ret != 0)
|
|
goto release_matches;
|
|
++j;
|
|
@@ -1530,17 +1483,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
|
|
if (ret)
|
|
goto out;
|
|
|
|
- /* Check hooks & underflows */
|
|
- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
|
|
- if ((unsigned char *)e - base == hook_entries[h])
|
|
- newinfo->hook_entry[h] = hook_entries[h];
|
|
- if ((unsigned char *)e - base == underflows[h])
|
|
- newinfo->underflow[h] = underflows[h];
|
|
- }
|
|
-
|
|
- /* Clear counters and comefrom */
|
|
- memset(&e->counters, 0, sizeof(e->counters));
|
|
- e->comefrom = 0;
|
|
return 0;
|
|
|
|
out:
|
|
@@ -1554,19 +1496,18 @@ release_matches:
|
|
return ret;
|
|
}
|
|
|
|
-static int
|
|
+static void
|
|
compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
|
|
- unsigned int *size, const char *name,
|
|
+ unsigned int *size,
|
|
struct xt_table_info *newinfo, unsigned char *base)
|
|
{
|
|
struct xt_entry_target *t;
|
|
struct xt_target *target;
|
|
struct ipt_entry *de;
|
|
unsigned int origsize;
|
|
- int ret, h;
|
|
+ int h;
|
|
struct xt_entry_match *ematch;
|
|
|
|
- ret = 0;
|
|
origsize = *size;
|
|
de = (struct ipt_entry *)*dstptr;
|
|
memcpy(de, e, sizeof(struct ipt_entry));
|
|
@@ -1575,198 +1516,104 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
|
|
*dstptr += sizeof(struct ipt_entry);
|
|
*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
|
|
|
|
- xt_ematch_foreach(ematch, e) {
|
|
- ret = xt_compat_match_from_user(ematch, dstptr, size);
|
|
- if (ret != 0)
|
|
- return ret;
|
|
- }
|
|
+ xt_ematch_foreach(ematch, e)
|
|
+ xt_compat_match_from_user(ematch, dstptr, size);
|
|
+
|
|
de->target_offset = e->target_offset - (origsize - *size);
|
|
t = compat_ipt_get_target(e);
|
|
target = t->u.kernel.target;
|
|
xt_compat_target_from_user(t, dstptr, size);
|
|
|
|
de->next_offset = e->next_offset - (origsize - *size);
|
|
+
|
|
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
|
|
if ((unsigned char *)de - base < newinfo->hook_entry[h])
|
|
newinfo->hook_entry[h] -= origsize - *size;
|
|
if ((unsigned char *)de - base < newinfo->underflow[h])
|
|
newinfo->underflow[h] -= origsize - *size;
|
|
}
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int
|
|
-compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
|
|
-{
|
|
- struct xt_entry_match *ematch;
|
|
- struct xt_mtchk_param mtpar;
|
|
- unsigned int j;
|
|
- int ret = 0;
|
|
-
|
|
- j = 0;
|
|
- mtpar.net = net;
|
|
- mtpar.table = name;
|
|
- mtpar.entryinfo = &e->ip;
|
|
- mtpar.hook_mask = e->comefrom;
|
|
- mtpar.family = NFPROTO_IPV4;
|
|
- xt_ematch_foreach(ematch, e) {
|
|
- ret = check_match(ematch, &mtpar);
|
|
- if (ret != 0)
|
|
- goto cleanup_matches;
|
|
- ++j;
|
|
- }
|
|
-
|
|
- ret = check_target(e, net, name);
|
|
- if (ret)
|
|
- goto cleanup_matches;
|
|
- return 0;
|
|
-
|
|
- cleanup_matches:
|
|
- xt_ematch_foreach(ematch, e) {
|
|
- if (j-- == 0)
|
|
- break;
|
|
- cleanup_match(ematch, net);
|
|
- }
|
|
- return ret;
|
|
}
|
|
|
|
static int
|
|
translate_compat_table(struct net *net,
|
|
- const char *name,
|
|
- unsigned int valid_hooks,
|
|
struct xt_table_info **pinfo,
|
|
void **pentry0,
|
|
- unsigned int total_size,
|
|
- unsigned int number,
|
|
- unsigned int *hook_entries,
|
|
- unsigned int *underflows)
|
|
+ const struct compat_ipt_replace *compatr)
|
|
{
|
|
unsigned int i, j;
|
|
struct xt_table_info *newinfo, *info;
|
|
void *pos, *entry0, *entry1;
|
|
struct compat_ipt_entry *iter0;
|
|
- struct ipt_entry *iter1;
|
|
+ struct ipt_replace repl;
|
|
unsigned int size;
|
|
int ret;
|
|
|
|
info = *pinfo;
|
|
entry0 = *pentry0;
|
|
- size = total_size;
|
|
- info->number = number;
|
|
-
|
|
- /* Init all hooks to impossible value. */
|
|
- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
|
- info->hook_entry[i] = 0xFFFFFFFF;
|
|
- info->underflow[i] = 0xFFFFFFFF;
|
|
- }
|
|
+ size = compatr->size;
|
|
+ info->number = compatr->num_entries;
|
|
|
|
duprintf("translate_compat_table: size %u\n", info->size);
|
|
j = 0;
|
|
xt_compat_lock(AF_INET);
|
|
- xt_compat_init_offsets(AF_INET, number);
|
|
+ xt_compat_init_offsets(AF_INET, compatr->num_entries);
|
|
/* Walk through entries, checking offsets. */
|
|
- xt_entry_foreach(iter0, entry0, total_size) {
|
|
+ xt_entry_foreach(iter0, entry0, compatr->size) {
|
|
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
|
|
entry0,
|
|
- entry0 + total_size,
|
|
- hook_entries,
|
|
- underflows,
|
|
- name);
|
|
+ entry0 + compatr->size);
|
|
if (ret != 0)
|
|
goto out_unlock;
|
|
++j;
|
|
}
|
|
|
|
ret = -EINVAL;
|
|
- if (j != number) {
|
|
+ if (j != compatr->num_entries) {
|
|
duprintf("translate_compat_table: %u not %u entries\n",
|
|
- j, number);
|
|
+ j, compatr->num_entries);
|
|
goto out_unlock;
|
|
}
|
|
|
|
- /* Check hooks all assigned */
|
|
- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
|
- /* Only hooks which are valid */
|
|
- if (!(valid_hooks & (1 << i)))
|
|
- continue;
|
|
- if (info->hook_entry[i] == 0xFFFFFFFF) {
|
|
- duprintf("Invalid hook entry %u %u\n",
|
|
- i, hook_entries[i]);
|
|
- goto out_unlock;
|
|
- }
|
|
- if (info->underflow[i] == 0xFFFFFFFF) {
|
|
- duprintf("Invalid underflow %u %u\n",
|
|
- i, underflows[i]);
|
|
- goto out_unlock;
|
|
- }
|
|
- }
|
|
-
|
|
ret = -ENOMEM;
|
|
newinfo = xt_alloc_table_info(size);
|
|
if (!newinfo)
|
|
goto out_unlock;
|
|
|
|
- newinfo->number = number;
|
|
+ newinfo->number = compatr->num_entries;
|
|
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
|
- newinfo->hook_entry[i] = info->hook_entry[i];
|
|
- newinfo->underflow[i] = info->underflow[i];
|
|
+ newinfo->hook_entry[i] = compatr->hook_entry[i];
|
|
+ newinfo->underflow[i] = compatr->underflow[i];
|
|
}
|
|
entry1 = newinfo->entries[raw_smp_processor_id()];
|
|
pos = entry1;
|
|
- size = total_size;
|
|
- xt_entry_foreach(iter0, entry0, total_size) {
|
|
- ret = compat_copy_entry_from_user(iter0, &pos, &size,
|
|
- name, newinfo, entry1);
|
|
- if (ret != 0)
|
|
- break;
|
|
- }
|
|
+ size = compatr->size;
|
|
+ xt_entry_foreach(iter0, entry0, compatr->size)
|
|
+ compat_copy_entry_from_user(iter0, &pos, &size,
|
|
+ newinfo, entry1);
|
|
+
|
|
+ /* all module references in entry0 are now gone.
|
|
+ * entry1/newinfo contains a 64bit ruleset that looks exactly as
|
|
+ * generated by 64bit userspace.
|
|
+ *
|
|
+ * Call standard translate_table() to validate all hook_entrys,
|
|
+ * underflows, check for loops, etc.
|
|
+ */
|
|
xt_compat_flush_offsets(AF_INET);
|
|
xt_compat_unlock(AF_INET);
|
|
- if (ret)
|
|
- goto free_newinfo;
|
|
|
|
- ret = -ELOOP;
|
|
- if (!mark_source_chains(newinfo, valid_hooks, entry1))
|
|
- goto free_newinfo;
|
|
+ memcpy(&repl, compatr, sizeof(*compatr));
|
|
|
|
- i = 0;
|
|
- xt_entry_foreach(iter1, entry1, newinfo->size) {
|
|
- ret = compat_check_entry(iter1, net, name);
|
|
- if (ret != 0)
|
|
- break;
|
|
- ++i;
|
|
- if (strcmp(ipt_get_target(iter1)->u.user.name,
|
|
- XT_ERROR_TARGET) == 0)
|
|
- ++newinfo->stacksize;
|
|
- }
|
|
- if (ret) {
|
|
- /*
|
|
- * The first i matches need cleanup_entry (calls ->destroy)
|
|
- * because they had called ->check already. The other j-i
|
|
- * entries need only release.
|
|
- */
|
|
- int skip = i;
|
|
- j -= i;
|
|
- xt_entry_foreach(iter0, entry0, newinfo->size) {
|
|
- if (skip-- > 0)
|
|
- continue;
|
|
- if (j-- == 0)
|
|
- break;
|
|
- compat_release_entry(iter0);
|
|
- }
|
|
- xt_entry_foreach(iter1, entry1, newinfo->size) {
|
|
- if (i-- == 0)
|
|
- break;
|
|
- cleanup_entry(iter1, net);
|
|
- }
|
|
- xt_free_table_info(newinfo);
|
|
- return ret;
|
|
+ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
|
+ repl.hook_entry[i] = newinfo->hook_entry[i];
|
|
+ repl.underflow[i] = newinfo->underflow[i];
|
|
}
|
|
|
|
- /* And one copy for every other CPU */
|
|
- for_each_possible_cpu(i)
|
|
- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
|
|
- memcpy(newinfo->entries[i], entry1, newinfo->size);
|
|
+ repl.num_counters = 0;
|
|
+ repl.counters = NULL;
|
|
+ repl.size = newinfo->size;
|
|
+ ret = translate_table(net, newinfo, entry1, &repl);
|
|
+ if (ret)
|
|
+ goto free_newinfo;
|
|
|
|
*pinfo = newinfo;
|
|
*pentry0 = entry1;
|
|
@@ -1775,17 +1622,16 @@ translate_compat_table(struct net *net,
|
|
|
|
free_newinfo:
|
|
xt_free_table_info(newinfo);
|
|
-out:
|
|
- xt_entry_foreach(iter0, entry0, total_size) {
|
|
+ return ret;
|
|
+out_unlock:
|
|
+ xt_compat_flush_offsets(AF_INET);
|
|
+ xt_compat_unlock(AF_INET);
|
|
+ xt_entry_foreach(iter0, entry0, compatr->size) {
|
|
if (j-- == 0)
|
|
break;
|
|
compat_release_entry(iter0);
|
|
}
|
|
return ret;
|
|
-out_unlock:
|
|
- xt_compat_flush_offsets(AF_INET);
|
|
- xt_compat_unlock(AF_INET);
|
|
- goto out;
|
|
}
|
|
|
|
static int
|
|
@@ -1805,6 +1651,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
|
|
return -ENOMEM;
|
|
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
|
return -ENOMEM;
|
|
+ if (tmp.num_counters == 0)
|
|
+ return -EINVAL;
|
|
+
|
|
tmp.name[sizeof(tmp.name)-1] = 0;
|
|
|
|
newinfo = xt_alloc_table_info(tmp.size);
|
|
@@ -1819,10 +1668,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
|
|
goto free_newinfo;
|
|
}
|
|
|
|
- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
|
|
- &newinfo, &loc_cpu_entry, tmp.size,
|
|
- tmp.num_entries, tmp.hook_entry,
|
|
- tmp.underflow);
|
|
+ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
|
|
if (ret != 0)
|
|
goto free_newinfo;
|
|
|
|
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
|
|
index f89087c3cfc8..f3b15bb7fbec 100644
|
|
--- a/net/ipv4/tcp_input.c
|
|
+++ b/net/ipv4/tcp_input.c
|
|
@@ -68,6 +68,7 @@
|
|
#include <linux/module.h>
|
|
#include <linux/sysctl.h>
|
|
#include <linux/kernel.h>
|
|
+#include <linux/reciprocal_div.h>
|
|
#include <net/dst.h>
|
|
#include <net/tcp.h>
|
|
#include <net/inet_common.h>
|
|
@@ -87,7 +88,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
|
|
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
|
|
|
|
/* rfc5961 challenge ack rate limiting */
|
|
-int sysctl_tcp_challenge_ack_limit = 100;
|
|
+int sysctl_tcp_challenge_ack_limit = 1000;
|
|
|
|
int sysctl_tcp_stdurg __read_mostly;
|
|
int sysctl_tcp_rfc1337 __read_mostly;
|
|
@@ -3288,12 +3289,19 @@ static void tcp_send_challenge_ack(struct sock *sk)
|
|
static u32 challenge_timestamp;
|
|
static unsigned int challenge_count;
|
|
u32 now = jiffies / HZ;
|
|
+ u32 count;
|
|
|
|
if (now != challenge_timestamp) {
|
|
+ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
|
|
+
|
|
challenge_timestamp = now;
|
|
- challenge_count = 0;
|
|
+ ACCESS_ONCE(challenge_count) = half +
|
|
+ reciprocal_divide(prandom_u32(),
|
|
+ sysctl_tcp_challenge_ack_limit);
|
|
}
|
|
- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
|
|
+ count = ACCESS_ONCE(challenge_count);
|
|
+ if (count > 0) {
|
|
+ ACCESS_ONCE(challenge_count) = count - 1;
|
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
|
|
tcp_send_ack(sk);
|
|
}
|
|
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
|
|
index 76c80b59e80f..276b28301a6b 100644
|
|
--- a/net/ipv4/tcp_output.c
|
|
+++ b/net/ipv4/tcp_output.c
|
|
@@ -222,7 +222,8 @@ void tcp_select_initial_window(int __space, __u32 mss,
|
|
/* Set window scaling on max possible window
|
|
* See RFC1323 for an explanation of the limit to 14
|
|
*/
|
|
- space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
|
|
+ space = max_t(u32, space, sysctl_tcp_rmem[2]);
|
|
+ space = max_t(u32, space, sysctl_rmem_max);
|
|
space = min_t(u32, space, *window_clamp);
|
|
while (space > 65535 && (*rcv_wscale) < 14) {
|
|
space >>= 1;
|
|
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
|
|
index 63b536bbf0b0..68174e4d88c7 100644
|
|
--- a/net/ipv4/udp.c
|
|
+++ b/net/ipv4/udp.c
|
|
@@ -1208,6 +1208,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|
int peeked, off = 0;
|
|
int err;
|
|
int is_udplite = IS_UDPLITE(sk);
|
|
+ bool checksum_valid = false;
|
|
bool slow;
|
|
|
|
if (flags & MSG_ERRQUEUE)
|
|
@@ -1233,11 +1234,12 @@ try_again:
|
|
*/
|
|
|
|
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
|
|
- if (udp_lib_checksum_complete(skb))
|
|
+ checksum_valid = !udp_lib_checksum_complete(skb);
|
|
+ if (!checksum_valid)
|
|
goto csum_copy_err;
|
|
}
|
|
|
|
- if (skb_csum_unnecessary(skb))
|
|
+ if (checksum_valid || skb_csum_unnecessary(skb))
|
|
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
|
|
msg->msg_iov, copied);
|
|
else {
|
|
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
|
|
index 8d69df16f6a8..107f75283b1b 100644
|
|
--- a/net/ipv6/ip6mr.c
|
|
+++ b/net/ipv6/ip6mr.c
|
|
@@ -1077,6 +1077,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void)
|
|
struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
|
|
if (c == NULL)
|
|
return NULL;
|
|
+ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
|
|
c->mfc_un.res.minvif = MAXMIFS;
|
|
return c;
|
|
}
|
|
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
|
|
index 89a4e4ddd8bb..e214222cd06f 100644
|
|
--- a/net/ipv6/netfilter/ip6_tables.c
|
|
+++ b/net/ipv6/netfilter/ip6_tables.c
|
|
@@ -195,11 +195,12 @@ get_entry(const void *base, unsigned int offset)
|
|
|
|
/* All zeroes == unconditional rule. */
|
|
/* Mildly perf critical (only if packet tracing is on) */
|
|
-static inline bool unconditional(const struct ip6t_ip6 *ipv6)
|
|
+static inline bool unconditional(const struct ip6t_entry *e)
|
|
{
|
|
static const struct ip6t_ip6 uncond;
|
|
|
|
- return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
|
|
+ return e->target_offset == sizeof(struct ip6t_entry) &&
|
|
+ memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
|
|
}
|
|
|
|
static inline const struct xt_entry_target *
|
|
@@ -255,11 +256,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
|
|
} else if (s == e) {
|
|
(*rulenum)++;
|
|
|
|
- if (s->target_offset == sizeof(struct ip6t_entry) &&
|
|
+ if (unconditional(s) &&
|
|
strcmp(t->target.u.kernel.target->name,
|
|
XT_STANDARD_TARGET) == 0 &&
|
|
- t->verdict < 0 &&
|
|
- unconditional(&s->ipv6)) {
|
|
+ t->verdict < 0) {
|
|
/* Tail of chains: STANDARD target (return/policy) */
|
|
*comment = *chainname == hookname
|
|
? comments[NF_IP6_TRACE_COMMENT_POLICY]
|
|
@@ -477,11 +477,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
|
|
|
|
/* Unconditional return/END. */
|
|
- if ((e->target_offset == sizeof(struct ip6t_entry) &&
|
|
+ if ((unconditional(e) &&
|
|
(strcmp(t->target.u.user.name,
|
|
XT_STANDARD_TARGET) == 0) &&
|
|
- t->verdict < 0 &&
|
|
- unconditional(&e->ipv6)) || visited) {
|
|
+ t->verdict < 0) || visited) {
|
|
unsigned int oldpos, size;
|
|
|
|
if ((strcmp(t->target.u.user.name,
|
|
@@ -522,6 +521,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|
size = e->next_offset;
|
|
e = (struct ip6t_entry *)
|
|
(entry0 + pos + size);
|
|
+ if (pos + size >= newinfo->size)
|
|
+ return 0;
|
|
e->counters.pcnt = pos;
|
|
pos += size;
|
|
} else {
|
|
@@ -543,6 +544,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
|
|
} else {
|
|
/* ... this is a fallthru */
|
|
newpos = pos + e->next_offset;
|
|
+ if (newpos >= newinfo->size)
|
|
+ return 0;
|
|
}
|
|
e = (struct ip6t_entry *)
|
|
(entry0 + newpos);
|
|
@@ -569,27 +572,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
|
|
module_put(par.match->me);
|
|
}
|
|
|
|
-static int
|
|
-check_entry(const struct ip6t_entry *e, const char *name)
|
|
-{
|
|
- const struct xt_entry_target *t;
|
|
-
|
|
- if (!ip6_checkentry(&e->ipv6)) {
|
|
- duprintf("ip_tables: ip check failed %p %s.\n", e, name);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (e->target_offset + sizeof(struct xt_entry_target) >
|
|
- e->next_offset)
|
|
- return -EINVAL;
|
|
-
|
|
- t = ip6t_get_target_c(e);
|
|
- if (e->target_offset + t->u.target_size > e->next_offset)
|
|
- return -EINVAL;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
|
|
{
|
|
const struct ip6t_ip6 *ipv6 = par->entryinfo;
|
|
@@ -668,10 +650,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
|
|
struct xt_mtchk_param mtpar;
|
|
struct xt_entry_match *ematch;
|
|
|
|
- ret = check_entry(e, name);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
j = 0;
|
|
mtpar.net = net;
|
|
mtpar.table = name;
|
|
@@ -715,7 +693,7 @@ static bool check_underflow(const struct ip6t_entry *e)
|
|
const struct xt_entry_target *t;
|
|
unsigned int verdict;
|
|
|
|
- if (!unconditional(&e->ipv6))
|
|
+ if (!unconditional(e))
|
|
return false;
|
|
t = ip6t_get_target_c(e);
|
|
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
|
|
@@ -735,9 +713,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
|
|
unsigned int valid_hooks)
|
|
{
|
|
unsigned int h;
|
|
+ int err;
|
|
|
|
if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
|
|
- (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
|
|
+ (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
|
|
+ (unsigned char *)e + e->next_offset > limit) {
|
|
duprintf("Bad offset %p\n", e);
|
|
return -EINVAL;
|
|
}
|
|
@@ -749,6 +729,14 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (!ip6_checkentry(&e->ipv6))
|
|
+ return -EINVAL;
|
|
+
|
|
+ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
|
|
+ e->next_offset);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
/* Check hooks & underflows */
|
|
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
|
|
if (!(valid_hooks & (1 << h)))
|
|
@@ -757,9 +745,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
|
|
newinfo->hook_entry[h] = hook_entries[h];
|
|
if ((unsigned char *)e - base == underflows[h]) {
|
|
if (!check_underflow(e)) {
|
|
- pr_err("Underflows must be unconditional and "
|
|
- "use the STANDARD target with "
|
|
- "ACCEPT/DROP\n");
|
|
+ pr_debug("Underflows must be unconditional and "
|
|
+ "use the STANDARD target with "
|
|
+ "ACCEPT/DROP\n");
|
|
return -EINVAL;
|
|
}
|
|
newinfo->underflow[h] = underflows[h];
|
|
@@ -1268,6 +1256,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
|
|
/* overflow check */
|
|
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
|
return -ENOMEM;
|
|
+ if (tmp.num_counters == 0)
|
|
+ return -EINVAL;
|
|
+
|
|
tmp.name[sizeof(tmp.name)-1] = 0;
|
|
|
|
newinfo = xt_alloc_table_info(tmp.size);
|
|
@@ -1309,56 +1300,17 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
|
|
unsigned int i, curcpu;
|
|
struct xt_counters_info tmp;
|
|
struct xt_counters *paddc;
|
|
- unsigned int num_counters;
|
|
- char *name;
|
|
- int size;
|
|
- void *ptmp;
|
|
struct xt_table *t;
|
|
const struct xt_table_info *private;
|
|
int ret = 0;
|
|
const void *loc_cpu_entry;
|
|
struct ip6t_entry *iter;
|
|
unsigned int addend;
|
|
-#ifdef CONFIG_COMPAT
|
|
- struct compat_xt_counters_info compat_tmp;
|
|
-
|
|
- if (compat) {
|
|
- ptmp = &compat_tmp;
|
|
- size = sizeof(struct compat_xt_counters_info);
|
|
- } else
|
|
-#endif
|
|
- {
|
|
- ptmp = &tmp;
|
|
- size = sizeof(struct xt_counters_info);
|
|
- }
|
|
-
|
|
- if (copy_from_user(ptmp, user, size) != 0)
|
|
- return -EFAULT;
|
|
-
|
|
-#ifdef CONFIG_COMPAT
|
|
- if (compat) {
|
|
- num_counters = compat_tmp.num_counters;
|
|
- name = compat_tmp.name;
|
|
- } else
|
|
-#endif
|
|
- {
|
|
- num_counters = tmp.num_counters;
|
|
- name = tmp.name;
|
|
- }
|
|
|
|
- if (len != size + num_counters * sizeof(struct xt_counters))
|
|
- return -EINVAL;
|
|
-
|
|
- paddc = vmalloc(len - size);
|
|
- if (!paddc)
|
|
- return -ENOMEM;
|
|
-
|
|
- if (copy_from_user(paddc, user + size, len - size) != 0) {
|
|
- ret = -EFAULT;
|
|
- goto free;
|
|
- }
|
|
-
|
|
- t = xt_find_table_lock(net, AF_INET6, name);
|
|
+ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
|
|
+ if (IS_ERR(paddc))
|
|
+ return PTR_ERR(paddc);
|
|
+ t = xt_find_table_lock(net, AF_INET6, tmp.name);
|
|
if (IS_ERR_OR_NULL(t)) {
|
|
ret = t ? PTR_ERR(t) : -ENOENT;
|
|
goto free;
|
|
@@ -1367,7 +1319,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
|
|
|
|
local_bh_disable();
|
|
private = t->private;
|
|
- if (private->number != num_counters) {
|
|
+ if (private->number != tmp.num_counters) {
|
|
ret = -EINVAL;
|
|
goto unlock_up_free;
|
|
}
|
|
@@ -1447,7 +1399,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
|
|
|
|
static int
|
|
compat_find_calc_match(struct xt_entry_match *m,
|
|
- const char *name,
|
|
const struct ip6t_ip6 *ipv6,
|
|
unsigned int hookmask,
|
|
int *size)
|
|
@@ -1483,21 +1434,19 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
|
|
struct xt_table_info *newinfo,
|
|
unsigned int *size,
|
|
const unsigned char *base,
|
|
- const unsigned char *limit,
|
|
- const unsigned int *hook_entries,
|
|
- const unsigned int *underflows,
|
|
- const char *name)
|
|
+ const unsigned char *limit)
|
|
{
|
|
struct xt_entry_match *ematch;
|
|
struct xt_entry_target *t;
|
|
struct xt_target *target;
|
|
unsigned int entry_offset;
|
|
unsigned int j;
|
|
- int ret, off, h;
|
|
+ int ret, off;
|
|
|
|
duprintf("check_compat_entry_size_and_hooks %p\n", e);
|
|
if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
|
|
- (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
|
|
+ (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
|
|
+ (unsigned char *)e + e->next_offset > limit) {
|
|
duprintf("Bad offset %p, limit = %p\n", e, limit);
|
|
return -EINVAL;
|
|
}
|
|
@@ -1509,8 +1458,11 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- /* For purposes of check_entry casting the compat entry is fine */
|
|
- ret = check_entry((struct ip6t_entry *)e, name);
|
|
+ if (!ip6_checkentry(&e->ipv6))
|
|
+ return -EINVAL;
|
|
+
|
|
+ ret = xt_compat_check_entry_offsets(e, e->elems,
|
|
+ e->target_offset, e->next_offset);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -1518,8 +1470,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
|
|
entry_offset = (void *)e - (void *)base;
|
|
j = 0;
|
|
xt_ematch_foreach(ematch, e) {
|
|
- ret = compat_find_calc_match(ematch, name,
|
|
- &e->ipv6, e->comefrom, &off);
|
|
+ ret = compat_find_calc_match(ematch, &e->ipv6, e->comefrom,
|
|
+ &off);
|
|
if (ret != 0)
|
|
goto release_matches;
|
|
++j;
|
|
@@ -1542,17 +1494,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
|
|
if (ret)
|
|
goto out;
|
|
|
|
- /* Check hooks & underflows */
|
|
- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
|
|
- if ((unsigned char *)e - base == hook_entries[h])
|
|
- newinfo->hook_entry[h] = hook_entries[h];
|
|
- if ((unsigned char *)e - base == underflows[h])
|
|
- newinfo->underflow[h] = underflows[h];
|
|
- }
|
|
-
|
|
- /* Clear counters and comefrom */
|
|
- memset(&e->counters, 0, sizeof(e->counters));
|
|
- e->comefrom = 0;
|
|
return 0;
|
|
|
|
out:
|
|
@@ -1566,18 +1507,17 @@ release_matches:
|
|
return ret;
|
|
}
|
|
|
|
-static int
|
|
+static void
|
|
compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
|
|
- unsigned int *size, const char *name,
|
|
+ unsigned int *size,
|
|
struct xt_table_info *newinfo, unsigned char *base)
|
|
{
|
|
struct xt_entry_target *t;
|
|
struct ip6t_entry *de;
|
|
unsigned int origsize;
|
|
- int ret, h;
|
|
+ int h;
|
|
struct xt_entry_match *ematch;
|
|
|
|
- ret = 0;
|
|
origsize = *size;
|
|
de = (struct ip6t_entry *)*dstptr;
|
|
memcpy(de, e, sizeof(struct ip6t_entry));
|
|
@@ -1586,11 +1526,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
|
|
*dstptr += sizeof(struct ip6t_entry);
|
|
*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
|
|
|
|
- xt_ematch_foreach(ematch, e) {
|
|
- ret = xt_compat_match_from_user(ematch, dstptr, size);
|
|
- if (ret != 0)
|
|
- return ret;
|
|
- }
|
|
+ xt_ematch_foreach(ematch, e)
|
|
+ xt_compat_match_from_user(ematch, dstptr, size);
|
|
+
|
|
de->target_offset = e->target_offset - (origsize - *size);
|
|
t = compat_ip6t_get_target(e);
|
|
xt_compat_target_from_user(t, dstptr, size);
|
|
@@ -1602,181 +1540,82 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
|
|
if ((unsigned char *)de - base < newinfo->underflow[h])
|
|
newinfo->underflow[h] -= origsize - *size;
|
|
}
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int compat_check_entry(struct ip6t_entry *e, struct net *net,
|
|
- const char *name)
|
|
-{
|
|
- unsigned int j;
|
|
- int ret = 0;
|
|
- struct xt_mtchk_param mtpar;
|
|
- struct xt_entry_match *ematch;
|
|
-
|
|
- j = 0;
|
|
- mtpar.net = net;
|
|
- mtpar.table = name;
|
|
- mtpar.entryinfo = &e->ipv6;
|
|
- mtpar.hook_mask = e->comefrom;
|
|
- mtpar.family = NFPROTO_IPV6;
|
|
- xt_ematch_foreach(ematch, e) {
|
|
- ret = check_match(ematch, &mtpar);
|
|
- if (ret != 0)
|
|
- goto cleanup_matches;
|
|
- ++j;
|
|
- }
|
|
-
|
|
- ret = check_target(e, net, name);
|
|
- if (ret)
|
|
- goto cleanup_matches;
|
|
- return 0;
|
|
-
|
|
- cleanup_matches:
|
|
- xt_ematch_foreach(ematch, e) {
|
|
- if (j-- == 0)
|
|
- break;
|
|
- cleanup_match(ematch, net);
|
|
- }
|
|
- return ret;
|
|
}
|
|
|
|
static int
|
|
translate_compat_table(struct net *net,
|
|
- const char *name,
|
|
- unsigned int valid_hooks,
|
|
struct xt_table_info **pinfo,
|
|
void **pentry0,
|
|
- unsigned int total_size,
|
|
- unsigned int number,
|
|
- unsigned int *hook_entries,
|
|
- unsigned int *underflows)
|
|
+ const struct compat_ip6t_replace *compatr)
|
|
{
|
|
unsigned int i, j;
|
|
struct xt_table_info *newinfo, *info;
|
|
void *pos, *entry0, *entry1;
|
|
struct compat_ip6t_entry *iter0;
|
|
- struct ip6t_entry *iter1;
|
|
+ struct ip6t_replace repl;
|
|
unsigned int size;
|
|
int ret = 0;
|
|
|
|
info = *pinfo;
|
|
entry0 = *pentry0;
|
|
- size = total_size;
|
|
- info->number = number;
|
|
-
|
|
- /* Init all hooks to impossible value. */
|
|
- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
|
- info->hook_entry[i] = 0xFFFFFFFF;
|
|
- info->underflow[i] = 0xFFFFFFFF;
|
|
- }
|
|
+ size = compatr->size;
|
|
+ info->number = compatr->num_entries;
|
|
|
|
duprintf("translate_compat_table: size %u\n", info->size);
|
|
j = 0;
|
|
xt_compat_lock(AF_INET6);
|
|
- xt_compat_init_offsets(AF_INET6, number);
|
|
+ xt_compat_init_offsets(AF_INET6, compatr->num_entries);
|
|
/* Walk through entries, checking offsets. */
|
|
- xt_entry_foreach(iter0, entry0, total_size) {
|
|
+ xt_entry_foreach(iter0, entry0, compatr->size) {
|
|
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
|
|
entry0,
|
|
- entry0 + total_size,
|
|
- hook_entries,
|
|
- underflows,
|
|
- name);
|
|
+ entry0 + compatr->size);
|
|
if (ret != 0)
|
|
goto out_unlock;
|
|
++j;
|
|
}
|
|
|
|
ret = -EINVAL;
|
|
- if (j != number) {
|
|
+ if (j != compatr->num_entries) {
|
|
duprintf("translate_compat_table: %u not %u entries\n",
|
|
- j, number);
|
|
+ j, compatr->num_entries);
|
|
goto out_unlock;
|
|
}
|
|
|
|
- /* Check hooks all assigned */
|
|
- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
|
- /* Only hooks which are valid */
|
|
- if (!(valid_hooks & (1 << i)))
|
|
- continue;
|
|
- if (info->hook_entry[i] == 0xFFFFFFFF) {
|
|
- duprintf("Invalid hook entry %u %u\n",
|
|
- i, hook_entries[i]);
|
|
- goto out_unlock;
|
|
- }
|
|
- if (info->underflow[i] == 0xFFFFFFFF) {
|
|
- duprintf("Invalid underflow %u %u\n",
|
|
- i, underflows[i]);
|
|
- goto out_unlock;
|
|
- }
|
|
- }
|
|
-
|
|
ret = -ENOMEM;
|
|
newinfo = xt_alloc_table_info(size);
|
|
if (!newinfo)
|
|
goto out_unlock;
|
|
|
|
- newinfo->number = number;
|
|
+ newinfo->number = compatr->num_entries;
|
|
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
|
- newinfo->hook_entry[i] = info->hook_entry[i];
|
|
- newinfo->underflow[i] = info->underflow[i];
|
|
+ newinfo->hook_entry[i] = compatr->hook_entry[i];
|
|
+ newinfo->underflow[i] = compatr->underflow[i];
|
|
}
|
|
entry1 = newinfo->entries[raw_smp_processor_id()];
|
|
pos = entry1;
|
|
- size = total_size;
|
|
- xt_entry_foreach(iter0, entry0, total_size) {
|
|
- ret = compat_copy_entry_from_user(iter0, &pos, &size,
|
|
- name, newinfo, entry1);
|
|
- if (ret != 0)
|
|
- break;
|
|
- }
|
|
+ size = compatr->size;
|
|
+ xt_entry_foreach(iter0, entry0, compatr->size)
|
|
+ compat_copy_entry_from_user(iter0, &pos, &size,
|
|
+ newinfo, entry1);
|
|
+
|
|
+ /* all module references in entry0 are now gone. */
|
|
xt_compat_flush_offsets(AF_INET6);
|
|
xt_compat_unlock(AF_INET6);
|
|
- if (ret)
|
|
- goto free_newinfo;
|
|
|
|
- ret = -ELOOP;
|
|
- if (!mark_source_chains(newinfo, valid_hooks, entry1))
|
|
- goto free_newinfo;
|
|
+ memcpy(&repl, compatr, sizeof(*compatr));
|
|
|
|
- i = 0;
|
|
- xt_entry_foreach(iter1, entry1, newinfo->size) {
|
|
- ret = compat_check_entry(iter1, net, name);
|
|
- if (ret != 0)
|
|
- break;
|
|
- ++i;
|
|
- if (strcmp(ip6t_get_target(iter1)->u.user.name,
|
|
- XT_ERROR_TARGET) == 0)
|
|
- ++newinfo->stacksize;
|
|
- }
|
|
- if (ret) {
|
|
- /*
|
|
- * The first i matches need cleanup_entry (calls ->destroy)
|
|
- * because they had called ->check already. The other j-i
|
|
- * entries need only release.
|
|
- */
|
|
- int skip = i;
|
|
- j -= i;
|
|
- xt_entry_foreach(iter0, entry0, newinfo->size) {
|
|
- if (skip-- > 0)
|
|
- continue;
|
|
- if (j-- == 0)
|
|
- break;
|
|
- compat_release_entry(iter0);
|
|
- }
|
|
- xt_entry_foreach(iter1, entry1, newinfo->size) {
|
|
- if (i-- == 0)
|
|
- break;
|
|
- cleanup_entry(iter1, net);
|
|
- }
|
|
- xt_free_table_info(newinfo);
|
|
- return ret;
|
|
+ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
|
|
+ repl.hook_entry[i] = newinfo->hook_entry[i];
|
|
+ repl.underflow[i] = newinfo->underflow[i];
|
|
}
|
|
|
|
- /* And one copy for every other CPU */
|
|
- for_each_possible_cpu(i)
|
|
- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
|
|
- memcpy(newinfo->entries[i], entry1, newinfo->size);
|
|
+ repl.num_counters = 0;
|
|
+ repl.counters = NULL;
|
|
+ repl.size = newinfo->size;
|
|
+ ret = translate_table(net, newinfo, entry1, &repl);
|
|
+ if (ret)
|
|
+ goto free_newinfo;
|
|
|
|
*pinfo = newinfo;
|
|
*pentry0 = entry1;
|
|
@@ -1785,17 +1624,16 @@ translate_compat_table(struct net *net,
|
|
|
|
free_newinfo:
|
|
xt_free_table_info(newinfo);
|
|
-out:
|
|
- xt_entry_foreach(iter0, entry0, total_size) {
|
|
+ return ret;
|
|
+out_unlock:
|
|
+ xt_compat_flush_offsets(AF_INET6);
|
|
+ xt_compat_unlock(AF_INET6);
|
|
+ xt_entry_foreach(iter0, entry0, compatr->size) {
|
|
if (j-- == 0)
|
|
break;
|
|
compat_release_entry(iter0);
|
|
}
|
|
return ret;
|
|
-out_unlock:
|
|
- xt_compat_flush_offsets(AF_INET6);
|
|
- xt_compat_unlock(AF_INET6);
|
|
- goto out;
|
|
}
|
|
|
|
static int
|
|
@@ -1815,6 +1653,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
|
|
return -ENOMEM;
|
|
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
|
|
return -ENOMEM;
|
|
+ if (tmp.num_counters == 0)
|
|
+ return -EINVAL;
|
|
+
|
|
tmp.name[sizeof(tmp.name)-1] = 0;
|
|
|
|
newinfo = xt_alloc_table_info(tmp.size);
|
|
@@ -1829,10 +1670,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
|
|
goto free_newinfo;
|
|
}
|
|
|
|
- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
|
|
- &newinfo, &loc_cpu_entry, tmp.size,
|
|
- tmp.num_entries, tmp.hook_entry,
|
|
- tmp.underflow);
|
|
+ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
|
|
if (ret != 0)
|
|
goto free_newinfo;
|
|
|
|
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
|
|
index 4ddf67c6355b..d9535bb8fe2e 100644
|
|
--- a/net/ipv6/sit.c
|
|
+++ b/net/ipv6/sit.c
|
|
@@ -530,13 +530,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
|
|
|
|
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
|
|
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
|
|
- t->parms.link, 0, IPPROTO_IPV6, 0);
|
|
+ t->parms.link, 0, iph->protocol, 0);
|
|
err = 0;
|
|
goto out;
|
|
}
|
|
if (type == ICMP_REDIRECT) {
|
|
ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
|
|
- IPPROTO_IPV6, 0);
|
|
+ iph->protocol, 0);
|
|
err = 0;
|
|
goto out;
|
|
}
|
|
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
|
|
index 4659b8ab55d9..41c026f11edc 100644
|
|
--- a/net/ipv6/tcp_ipv6.c
|
|
+++ b/net/ipv6/tcp_ipv6.c
|
|
@@ -1767,7 +1767,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
|
destp = ntohs(inet->inet_dport);
|
|
srcp = ntohs(inet->inet_sport);
|
|
|
|
- if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
|
|
+ if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
|
|
+ icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
|
|
+ icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
|
|
timer_active = 1;
|
|
timer_expires = icsk->icsk_timeout;
|
|
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
|
|
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
|
|
index 3046d0244393..d234e6f80570 100644
|
|
--- a/net/ipv6/udp.c
|
|
+++ b/net/ipv6/udp.c
|
|
@@ -370,6 +370,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
|
int peeked, off = 0;
|
|
int err;
|
|
int is_udplite = IS_UDPLITE(sk);
|
|
+ bool checksum_valid = false;
|
|
int is_udp4;
|
|
bool slow;
|
|
|
|
@@ -401,11 +402,12 @@ try_again:
|
|
*/
|
|
|
|
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
|
|
- if (udp_lib_checksum_complete(skb))
|
|
+ checksum_valid = !udp_lib_checksum_complete(skb);
|
|
+ if (!checksum_valid)
|
|
goto csum_copy_err;
|
|
}
|
|
|
|
- if (skb_csum_unnecessary(skb))
|
|
+ if (checksum_valid || skb_csum_unnecessary(skb))
|
|
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
|
|
msg->msg_iov, copied);
|
|
else {
|
|
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
|
|
index f8133ff5b081..c95bafa65f5b 100644
|
|
--- a/net/irda/af_irda.c
|
|
+++ b/net/irda/af_irda.c
|
|
@@ -1039,8 +1039,11 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
|
|
}
|
|
|
|
/* Check if we have opened a local TSAP */
|
|
- if (!self->tsap)
|
|
- irda_open_tsap(self, LSAP_ANY, addr->sir_name);
|
|
+ if (!self->tsap) {
|
|
+ err = irda_open_tsap(self, LSAP_ANY, addr->sir_name);
|
|
+ if (err)
|
|
+ goto out;
|
|
+ }
|
|
|
|
/* Move to connecting socket, start sending Connect Requests */
|
|
sock->state = SS_CONNECTING;
|
|
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
|
|
index 6952760881c8..f8765cc84e47 100644
|
|
--- a/net/mac80211/mesh.c
|
|
+++ b/net/mac80211/mesh.c
|
|
@@ -161,6 +161,10 @@ void mesh_sta_cleanup(struct sta_info *sta)
|
|
del_timer_sync(&sta->plink_timer);
|
|
}
|
|
|
|
+ /* make sure no readers can access nexthop sta from here on */
|
|
+ mesh_path_flush_by_nexthop(sta);
|
|
+ synchronize_net();
|
|
+
|
|
if (changed)
|
|
ieee80211_mbss_info_change_notify(sdata, changed);
|
|
}
|
|
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
|
|
index 8b03028cca69..51c141b09dba 100644
|
|
--- a/net/netfilter/x_tables.c
|
|
+++ b/net/netfilter/x_tables.c
|
|
@@ -435,6 +435,47 @@ int xt_check_match(struct xt_mtchk_param *par,
|
|
}
|
|
EXPORT_SYMBOL_GPL(xt_check_match);
|
|
|
|
+/** xt_check_entry_match - check that matches end before start of target
|
|
+ *
|
|
+ * @match: beginning of xt_entry_match
|
|
+ * @target: beginning of this rules target (alleged end of matches)
|
|
+ * @alignment: alignment requirement of match structures
|
|
+ *
|
|
+ * Validates that all matches add up to the beginning of the target,
|
|
+ * and that each match covers at least the base structure size.
|
|
+ *
|
|
+ * Return: 0 on success, negative errno on failure.
|
|
+ */
|
|
+static int xt_check_entry_match(const char *match, const char *target,
|
|
+ const size_t alignment)
|
|
+{
|
|
+ const struct xt_entry_match *pos;
|
|
+ int length = target - match;
|
|
+
|
|
+ if (length == 0) /* no matches */
|
|
+ return 0;
|
|
+
|
|
+ pos = (struct xt_entry_match *)match;
|
|
+ do {
|
|
+ if ((unsigned long)pos % alignment)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (length < (int)sizeof(struct xt_entry_match))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (pos->u.match_size < sizeof(struct xt_entry_match))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (pos->u.match_size > length)
|
|
+ return -EINVAL;
|
|
+
|
|
+ length -= pos->u.match_size;
|
|
+ pos = ((void *)((char *)(pos) + (pos)->u.match_size));
|
|
+ } while (length > 0);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
#ifdef CONFIG_COMPAT
|
|
int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
|
|
{
|
|
@@ -504,13 +545,14 @@ int xt_compat_match_offset(const struct xt_match *match)
|
|
}
|
|
EXPORT_SYMBOL_GPL(xt_compat_match_offset);
|
|
|
|
-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
|
|
- unsigned int *size)
|
|
+void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
|
|
+ unsigned int *size)
|
|
{
|
|
const struct xt_match *match = m->u.kernel.match;
|
|
struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
|
|
int pad, off = xt_compat_match_offset(match);
|
|
u_int16_t msize = cm->u.user.match_size;
|
|
+ char name[sizeof(m->u.user.name)];
|
|
|
|
m = *dstptr;
|
|
memcpy(m, cm, sizeof(*cm));
|
|
@@ -524,10 +566,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
|
|
|
|
msize += off;
|
|
m->u.user.match_size = msize;
|
|
+ strlcpy(name, match->name, sizeof(name));
|
|
+ module_put(match->me);
|
|
+ strncpy(m->u.user.name, name, sizeof(m->u.user.name));
|
|
|
|
*size += off;
|
|
*dstptr += msize;
|
|
- return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
|
|
|
|
@@ -558,8 +602,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
|
|
+
|
|
+/* non-compat version may have padding after verdict */
|
|
+struct compat_xt_standard_target {
|
|
+ struct compat_xt_entry_target t;
|
|
+ compat_uint_t verdict;
|
|
+};
|
|
+
|
|
+int xt_compat_check_entry_offsets(const void *base, const char *elems,
|
|
+ unsigned int target_offset,
|
|
+ unsigned int next_offset)
|
|
+{
|
|
+ long size_of_base_struct = elems - (const char *)base;
|
|
+ const struct compat_xt_entry_target *t;
|
|
+ const char *e = base;
|
|
+
|
|
+ if (target_offset < size_of_base_struct)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (target_offset + sizeof(*t) > next_offset)
|
|
+ return -EINVAL;
|
|
+
|
|
+ t = (void *)(e + target_offset);
|
|
+ if (t->u.target_size < sizeof(*t))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (target_offset + t->u.target_size > next_offset)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
|
|
+ COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
|
|
+ return -EINVAL;
|
|
+
|
|
+ /* compat_xt_entry match has less strict aligment requirements,
|
|
+ * otherwise they are identical. In case of padding differences
|
|
+ * we need to add compat version of xt_check_entry_match.
|
|
+ */
|
|
+ BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
|
|
+
|
|
+ return xt_check_entry_match(elems, base + target_offset,
|
|
+ __alignof__(struct compat_xt_entry_match));
|
|
+}
|
|
+EXPORT_SYMBOL(xt_compat_check_entry_offsets);
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
+/**
|
|
+ * xt_check_entry_offsets - validate arp/ip/ip6t_entry
|
|
+ *
|
|
+ * @base: pointer to arp/ip/ip6t_entry
|
|
+ * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
|
|
+ * @target_offset: the arp/ip/ip6_t->target_offset
|
|
+ * @next_offset: the arp/ip/ip6_t->next_offset
|
|
+ *
|
|
+ * validates that target_offset and next_offset are sane and that all
|
|
+ * match sizes (if any) align with the target offset.
|
|
+ *
|
|
+ * This function does not validate the targets or matches themselves, it
|
|
+ * only tests that all the offsets and sizes are correct, that all
|
|
+ * match structures are aligned, and that the last structure ends where
|
|
+ * the target structure begins.
|
|
+ *
|
|
+ * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
|
|
+ *
|
|
+ * The arp/ip/ip6t_entry structure @base must have passed following tests:
|
|
+ * - it must point to a valid memory location
|
|
+ * - base to base + next_offset must be accessible, i.e. not exceed allocated
|
|
+ * length.
|
|
+ *
|
|
+ * A well-formed entry looks like this:
|
|
+ *
|
|
+ * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
|
|
+ * e->elems[]-----' | |
|
|
+ * matchsize | |
|
|
+ * matchsize | |
|
|
+ * | |
|
|
+ * target_offset---------------------------------' |
|
|
+ * next_offset---------------------------------------------------'
|
|
+ *
|
|
+ * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
|
|
+ * This is where matches (if any) and the target reside.
|
|
+ * target_offset: beginning of target.
|
|
+ * next_offset: start of the next rule; also: size of this rule.
|
|
+ * Since targets have a minimum size, target_offset + minlen <= next_offset.
|
|
+ *
|
|
+ * Every match stores its size, sum of sizes must not exceed target_offset.
|
|
+ *
|
|
+ * Return: 0 on success, negative errno on failure.
|
|
+ */
|
|
+int xt_check_entry_offsets(const void *base,
|
|
+ const char *elems,
|
|
+ unsigned int target_offset,
|
|
+ unsigned int next_offset)
|
|
+{
|
|
+ long size_of_base_struct = elems - (const char *)base;
|
|
+ const struct xt_entry_target *t;
|
|
+ const char *e = base;
|
|
+
|
|
+ /* target start is within the ip/ip6/arpt_entry struct */
|
|
+ if (target_offset < size_of_base_struct)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (target_offset + sizeof(*t) > next_offset)
|
|
+ return -EINVAL;
|
|
+
|
|
+ t = (void *)(e + target_offset);
|
|
+ if (t->u.target_size < sizeof(*t))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (target_offset + t->u.target_size > next_offset)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
|
|
+ XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
|
|
+ return -EINVAL;
|
|
+
|
|
+ return xt_check_entry_match(elems, base + target_offset,
|
|
+ __alignof__(struct xt_entry_match));
|
|
+}
|
|
+EXPORT_SYMBOL(xt_check_entry_offsets);
|
|
+
|
|
int xt_check_target(struct xt_tgchk_param *par,
|
|
unsigned int size, u_int8_t proto, bool inv_proto)
|
|
{
|
|
@@ -610,6 +771,80 @@ int xt_check_target(struct xt_tgchk_param *par,
|
|
}
|
|
EXPORT_SYMBOL_GPL(xt_check_target);
|
|
|
|
+/**
|
|
+ * xt_copy_counters_from_user - copy counters and metadata from userspace
|
|
+ *
|
|
+ * @user: src pointer to userspace memory
|
|
+ * @len: alleged size of userspace memory
|
|
+ * @info: where to store the xt_counters_info metadata
|
|
+ * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
|
|
+ *
|
|
+ * Copies counter meta data from @user and stores it in @info.
|
|
+ *
|
|
+ * vmallocs memory to hold the counters, then copies the counter data
|
|
+ * from @user to the new memory and returns a pointer to it.
|
|
+ *
|
|
+ * If @compat is true, @info gets converted automatically to the 64bit
|
|
+ * representation.
|
|
+ *
|
|
+ * The metadata associated with the counters is stored in @info.
|
|
+ *
|
|
+ * Return: returns pointer that caller has to test via IS_ERR().
|
|
+ * If IS_ERR is false, caller has to vfree the pointer.
|
|
+ */
|
|
+void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
|
+ struct xt_counters_info *info, bool compat)
|
|
+{
|
|
+ void *mem;
|
|
+ u64 size;
|
|
+
|
|
+#ifdef CONFIG_COMPAT
|
|
+ if (compat) {
|
|
+ /* structures only differ in size due to alignment */
|
|
+ struct compat_xt_counters_info compat_tmp;
|
|
+
|
|
+ if (len <= sizeof(compat_tmp))
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ len -= sizeof(compat_tmp);
|
|
+ if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
|
|
+ return ERR_PTR(-EFAULT);
|
|
+
|
|
+ strlcpy(info->name, compat_tmp.name, sizeof(info->name));
|
|
+ info->num_counters = compat_tmp.num_counters;
|
|
+ user += sizeof(compat_tmp);
|
|
+ } else
|
|
+#endif
|
|
+ {
|
|
+ if (len <= sizeof(*info))
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ len -= sizeof(*info);
|
|
+ if (copy_from_user(info, user, sizeof(*info)) != 0)
|
|
+ return ERR_PTR(-EFAULT);
|
|
+
|
|
+ info->name[sizeof(info->name) - 1] = '\0';
|
|
+ user += sizeof(*info);
|
|
+ }
|
|
+
|
|
+ size = sizeof(struct xt_counters);
|
|
+ size *= info->num_counters;
|
|
+
|
|
+ if (size != (u64)len)
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ mem = vmalloc(len);
|
|
+ if (!mem)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ if (copy_from_user(mem, user, len) == 0)
|
|
+ return mem;
|
|
+
|
|
+ vfree(mem);
|
|
+ return ERR_PTR(-EFAULT);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
|
|
+
|
|
#ifdef CONFIG_COMPAT
|
|
int xt_compat_target_offset(const struct xt_target *target)
|
|
{
|
|
@@ -625,6 +860,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
|
|
struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
|
|
int pad, off = xt_compat_target_offset(target);
|
|
u_int16_t tsize = ct->u.user.target_size;
|
|
+ char name[sizeof(t->u.user.name)];
|
|
|
|
t = *dstptr;
|
|
memcpy(t, ct, sizeof(*ct));
|
|
@@ -638,6 +874,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
|
|
|
|
tsize += off;
|
|
t->u.user.target_size = tsize;
|
|
+ strlcpy(name, target->name, sizeof(name));
|
|
+ module_put(target->me);
|
|
+ strncpy(t->u.user.name, name, sizeof(t->u.user.name));
|
|
|
|
*size += off;
|
|
*dstptr += tsize;
|
|
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
|
|
index 7c94aedd0912..5b1fbe45ff0b 100644
|
|
--- a/net/netlabel/netlabel_kapi.c
|
|
+++ b/net/netlabel/netlabel_kapi.c
|
|
@@ -700,7 +700,11 @@ socket_setattr_return:
|
|
*/
|
|
void netlbl_sock_delattr(struct sock *sk)
|
|
{
|
|
- cipso_v4_sock_delattr(sk);
|
|
+ switch (sk->sk_family) {
|
|
+ case AF_INET:
|
|
+ cipso_v4_sock_delattr(sk);
|
|
+ break;
|
|
+ }
|
|
}
|
|
|
|
/**
|
|
@@ -879,7 +883,11 @@ req_setattr_return:
|
|
*/
|
|
void netlbl_req_delattr(struct request_sock *req)
|
|
{
|
|
- cipso_v4_req_delattr(req);
|
|
+ switch (req->rsk_ops->family) {
|
|
+ case AF_INET:
|
|
+ cipso_v4_req_delattr(req);
|
|
+ break;
|
|
+ }
|
|
}
|
|
|
|
/**
|
|
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
|
|
index d11ac79246e4..cf5b145902e5 100644
|
|
--- a/net/rfkill/rfkill-regulator.c
|
|
+++ b/net/rfkill/rfkill-regulator.c
|
|
@@ -30,6 +30,7 @@ struct rfkill_regulator_data {
|
|
static int rfkill_regulator_set_block(void *data, bool blocked)
|
|
{
|
|
struct rfkill_regulator_data *rfkill_data = data;
|
|
+ int ret = 0;
|
|
|
|
pr_debug("%s: blocked: %d\n", __func__, blocked);
|
|
|
|
@@ -40,15 +41,16 @@ static int rfkill_regulator_set_block(void *data, bool blocked)
|
|
}
|
|
} else {
|
|
if (!rfkill_data->reg_enabled) {
|
|
- regulator_enable(rfkill_data->vcc);
|
|
- rfkill_data->reg_enabled = true;
|
|
+ ret = regulator_enable(rfkill_data->vcc);
|
|
+ if (!ret)
|
|
+ rfkill_data->reg_enabled = true;
|
|
}
|
|
}
|
|
|
|
pr_debug("%s: regulator_is_enabled after set_block: %d\n", __func__,
|
|
regulator_is_enabled(rfkill_data->vcc));
|
|
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
static struct rfkill_ops rfkill_regulator_ops = {
|
|
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
|
|
index 8aab894aeabe..730914cdb7a1 100644
|
|
--- a/net/sctp/sm_sideeffect.c
|
|
+++ b/net/sctp/sm_sideeffect.c
|
|
@@ -251,12 +251,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
|
|
int error;
|
|
struct sctp_transport *transport = (struct sctp_transport *) peer;
|
|
struct sctp_association *asoc = transport->asoc;
|
|
- struct net *net = sock_net(asoc->base.sk);
|
|
+ struct sock *sk = asoc->base.sk;
|
|
+ struct net *net = sock_net(sk);
|
|
|
|
/* Check whether a task is in the sock. */
|
|
|
|
- sctp_bh_lock_sock(asoc->base.sk);
|
|
- if (sock_owned_by_user(asoc->base.sk)) {
|
|
+ sctp_bh_lock_sock(sk);
|
|
+ if (sock_owned_by_user(sk)) {
|
|
SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
|
|
|
|
/* Try again later. */
|
|
@@ -279,10 +280,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
|
|
transport, GFP_ATOMIC);
|
|
|
|
if (error)
|
|
- asoc->base.sk->sk_err = -error;
|
|
+ sk->sk_err = -error;
|
|
|
|
out_unlock:
|
|
- sctp_bh_unlock_sock(asoc->base.sk);
|
|
+ sctp_bh_unlock_sock(sk);
|
|
sctp_transport_put(transport);
|
|
}
|
|
|
|
@@ -292,11 +293,12 @@ out_unlock:
|
|
static void sctp_generate_timeout_event(struct sctp_association *asoc,
|
|
sctp_event_timeout_t timeout_type)
|
|
{
|
|
- struct net *net = sock_net(asoc->base.sk);
|
|
+ struct sock *sk = asoc->base.sk;
|
|
+ struct net *net = sock_net(sk);
|
|
int error = 0;
|
|
|
|
- sctp_bh_lock_sock(asoc->base.sk);
|
|
- if (sock_owned_by_user(asoc->base.sk)) {
|
|
+ sctp_bh_lock_sock(sk);
|
|
+ if (sock_owned_by_user(sk)) {
|
|
SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n",
|
|
__func__,
|
|
timeout_type);
|
|
@@ -320,10 +322,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
|
|
(void *)timeout_type, GFP_ATOMIC);
|
|
|
|
if (error)
|
|
- asoc->base.sk->sk_err = -error;
|
|
+ sk->sk_err = -error;
|
|
|
|
out_unlock:
|
|
- sctp_bh_unlock_sock(asoc->base.sk);
|
|
+ sctp_bh_unlock_sock(sk);
|
|
sctp_association_put(asoc);
|
|
}
|
|
|
|
@@ -373,10 +375,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
|
|
int error = 0;
|
|
struct sctp_transport *transport = (struct sctp_transport *) data;
|
|
struct sctp_association *asoc = transport->asoc;
|
|
- struct net *net = sock_net(asoc->base.sk);
|
|
+ struct sock *sk = asoc->base.sk;
|
|
+ struct net *net = sock_net(sk);
|
|
|
|
- sctp_bh_lock_sock(asoc->base.sk);
|
|
- if (sock_owned_by_user(asoc->base.sk)) {
|
|
+ sctp_bh_lock_sock(sk);
|
|
+ if (sock_owned_by_user(sk)) {
|
|
SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
|
|
|
|
/* Try again later. */
|
|
@@ -397,10 +400,10 @@ void sctp_generate_heartbeat_event(unsigned long data)
|
|
transport, GFP_ATOMIC);
|
|
|
|
if (error)
|
|
- asoc->base.sk->sk_err = -error;
|
|
+ sk->sk_err = -error;
|
|
|
|
out_unlock:
|
|
- sctp_bh_unlock_sock(asoc->base.sk);
|
|
+ sctp_bh_unlock_sock(sk);
|
|
sctp_transport_put(transport);
|
|
}
|
|
|
|
@@ -411,10 +414,11 @@ void sctp_generate_proto_unreach_event(unsigned long data)
|
|
{
|
|
struct sctp_transport *transport = (struct sctp_transport *) data;
|
|
struct sctp_association *asoc = transport->asoc;
|
|
- struct net *net = sock_net(asoc->base.sk);
|
|
+ struct sock *sk = asoc->base.sk;
|
|
+ struct net *net = sock_net(sk);
|
|
|
|
- sctp_bh_lock_sock(asoc->base.sk);
|
|
- if (sock_owned_by_user(asoc->base.sk)) {
|
|
+ sctp_bh_lock_sock(sk);
|
|
+ if (sock_owned_by_user(sk)) {
|
|
SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__);
|
|
|
|
/* Try again later. */
|
|
@@ -435,7 +439,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
|
|
asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
|
|
|
|
out_unlock:
|
|
- sctp_bh_unlock_sock(asoc->base.sk);
|
|
+ sctp_bh_unlock_sock(sk);
|
|
sctp_association_put(asoc);
|
|
}
|
|
|
|
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
|
|
index 29b4ba93ab3c..62663a08ffbd 100644
|
|
--- a/net/sunrpc/auth_gss/svcauth_gss.c
|
|
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
|
|
@@ -859,8 +859,8 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
|
|
goto out;
|
|
if (svc_getnl(&buf->head[0]) != seq)
|
|
goto out;
|
|
- /* trim off the mic at the end before returning */
|
|
- xdr_buf_trim(buf, mic.len + 4);
|
|
+ /* trim off the mic and padding at the end before returning */
|
|
+ xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
|
|
stat = 0;
|
|
out:
|
|
kfree(mic.data);
|
|
diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
|
|
index db0e5cd34c70..91c4117637ae 100644
|
|
--- a/scripts/asn1_compiler.c
|
|
+++ b/scripts/asn1_compiler.c
|
|
@@ -1353,6 +1353,8 @@ static void render_out_of_line_list(FILE *out)
|
|
render_opcode(out, "ASN1_OP_END_SET_OF%s,\n", act);
|
|
render_opcode(out, "_jump_target(%u),\n", entry);
|
|
break;
|
|
+ default:
|
|
+ break;
|
|
}
|
|
if (e->action)
|
|
render_opcode(out, "_action(ACT_%s),\n",
|
|
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
|
|
index ee625e3a56ba..4f7d13da04a5 100644
|
|
--- a/scripts/recordmcount.c
|
|
+++ b/scripts/recordmcount.c
|
|
@@ -33,10 +33,17 @@
|
|
#include <string.h>
|
|
#include <unistd.h>
|
|
|
|
+/*
|
|
+ * glibc synced up and added the metag number but didn't add the relocations.
|
|
+ * Work around this in a crude manner for now.
|
|
+ */
|
|
#ifndef EM_METAG
|
|
-/* Remove this when these make it to the standard system elf.h. */
|
|
#define EM_METAG 174
|
|
+#endif
|
|
+#ifndef R_METAG_ADDR32
|
|
#define R_METAG_ADDR32 2
|
|
+#endif
|
|
+#ifndef R_METAG_NONE
|
|
#define R_METAG_NONE 3
|
|
#endif
|
|
|
|
diff --git a/security/keys/key.c b/security/keys/key.c
|
|
index 8fb7c7bd4657..6595b2dd89fe 100644
|
|
--- a/security/keys/key.c
|
|
+++ b/security/keys/key.c
|
|
@@ -580,7 +580,7 @@ int key_reject_and_link(struct key *key,
|
|
|
|
mutex_unlock(&key_construction_mutex);
|
|
|
|
- if (keyring)
|
|
+ if (keyring && link_ret == 0)
|
|
__key_link_end(keyring, key->type, prealloc);
|
|
|
|
/* wake up anyone waiting for a key to be constructed */
|
|
diff --git a/sound/core/control.c b/sound/core/control.c
|
|
index 3fcead61f0ef..251bc575f5c3 100644
|
|
--- a/sound/core/control.c
|
|
+++ b/sound/core/control.c
|
|
@@ -150,6 +150,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
|
|
|
|
if (snd_BUG_ON(!card || !id))
|
|
return;
|
|
+ if (card->shutdown)
|
|
+ return;
|
|
read_lock(&card->ctl_files_rwlock);
|
|
#if defined(CONFIG_SND_MIXER_OSS) || defined(CONFIG_SND_MIXER_OSS_MODULE)
|
|
card->mixer_oss_change_count++;
|
|
diff --git a/sound/core/timer.c b/sound/core/timer.c
|
|
index 38742e826900..3476895ee1fb 100644
|
|
--- a/sound/core/timer.c
|
|
+++ b/sound/core/timer.c
|
|
@@ -1208,6 +1208,7 @@ static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
|
|
tu->tstamp = *tstamp;
|
|
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
|
|
return;
|
|
+ memset(&r1, 0, sizeof(r1));
|
|
r1.event = event;
|
|
r1.tstamp = *tstamp;
|
|
r1.val = resolution;
|
|
@@ -1242,6 +1243,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
|
|
}
|
|
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
|
|
tu->last_resolution != resolution) {
|
|
+ memset(&r1, 0, sizeof(r1));
|
|
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
|
|
r1.tstamp = tstamp;
|
|
r1.val = resolution;
|
|
@@ -1707,6 +1709,7 @@ static int snd_timer_user_params(struct file *file,
|
|
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
|
|
if (tu->tread) {
|
|
struct snd_timer_tread tread;
|
|
+ memset(&tread, 0, sizeof(tread));
|
|
tread.event = SNDRV_TIMER_EVENT_EARLY;
|
|
tread.tstamp.tv_sec = 0;
|
|
tread.tstamp.tv_nsec = 0;
|
|
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
|
|
index 982a2c2faf24..7f400a1d42e4 100644
|
|
--- a/sound/drivers/dummy.c
|
|
+++ b/sound/drivers/dummy.c
|
|
@@ -422,6 +422,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
|
|
|
|
static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
|
|
{
|
|
+ hrtimer_cancel(&dpcm->timer);
|
|
tasklet_kill(&dpcm->tasklet);
|
|
}
|
|
|
|
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
|
|
index ae59dbaa53d9..42d4b13f1fa7 100644
|
|
--- a/sound/pci/au88x0/au88x0_core.c
|
|
+++ b/sound/pci/au88x0/au88x0_core.c
|
|
@@ -1442,9 +1442,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
|
|
int page, p, pp, delta, i;
|
|
|
|
page =
|
|
- (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
|
|
- WT_SUBBUF_MASK)
|
|
- >> WT_SUBBUF_SHIFT;
|
|
+ (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
|
|
+ >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
|
|
if (dma->nr_periods >= 4)
|
|
delta = (page - dma->period_real) & 3;
|
|
else {
|
|
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
|
|
index c0dbb52d45be..1e4bcb900fc6 100644
|
|
--- a/sound/pci/oxygen/oxygen_mixer.c
|
|
+++ b/sound/pci/oxygen/oxygen_mixer.c
|
|
@@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl,
|
|
int changed;
|
|
|
|
mutex_lock(&chip->mutex);
|
|
- changed = !value->value.integer.value[0] != chip->dac_mute;
|
|
+ changed = (!value->value.integer.value[0]) != chip->dac_mute;
|
|
if (changed) {
|
|
chip->dac_mute = !value->value.integer.value[0];
|
|
chip->model.update_dac_mute(chip);
|
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
index 4f865e122c21..f71c4ad425c6 100644
|
|
--- a/virt/kvm/kvm_main.c
|
|
+++ b/virt/kvm/kvm_main.c
|
|
@@ -2447,7 +2447,7 @@ static long kvm_vm_ioctl(struct file *filp,
|
|
if (copy_from_user(&routing, argp, sizeof(routing)))
|
|
goto out;
|
|
r = -EINVAL;
|
|
- if (routing.nr >= KVM_MAX_IRQ_ROUTES)
|
|
+ if (routing.nr > KVM_MAX_IRQ_ROUTES)
|
|
goto out;
|
|
if (routing.flags)
|
|
goto out;
|