mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-27 09:11:49 +00:00
4277 lines
138 KiB
Diff
4277 lines
138 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index ee80efa38844..e677b662f8c5 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 3
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 82
|
|
+SUBLEVEL = 83
|
|
EXTRAVERSION =
|
|
NAME = Saber-toothed Squirrel
|
|
|
|
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
|
|
index 0a5e8a512ee2..420b7d20d520 100644
|
|
--- a/arch/arm/include/asm/cacheflush.h
|
|
+++ b/arch/arm/include/asm/cacheflush.h
|
|
@@ -202,6 +202,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
|
|
static inline void __flush_icache_all(void)
|
|
{
|
|
__flush_icache_preferred();
|
|
+ dsb();
|
|
}
|
|
|
|
#define flush_cache_all() __cpuc_flush_kern_all()
|
|
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
|
|
index 897486c5d5f4..4d2e4473b931 100644
|
|
--- a/arch/arm/mm/proc-v6.S
|
|
+++ b/arch/arm/mm/proc-v6.S
|
|
@@ -202,7 +202,6 @@ __v6_setup:
|
|
mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
|
|
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
|
|
mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
|
|
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
|
#ifdef CONFIG_MMU
|
|
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
|
|
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
|
|
@@ -212,6 +211,8 @@ __v6_setup:
|
|
ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
|
|
mcr p15, 0, r8, c2, c0, 1 @ load TTB1
|
|
#endif /* CONFIG_MMU */
|
|
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and
|
|
+ @ complete invalidations
|
|
adr r5, v6_crval
|
|
ldmia r5, {r5, r6}
|
|
#ifdef CONFIG_CPU_ENDIAN_BE8
|
|
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
|
|
index c2e2b66f72b5..fb489cc56713 100644
|
|
--- a/arch/arm/mm/proc-v7.S
|
|
+++ b/arch/arm/mm/proc-v7.S
|
|
@@ -246,7 +246,6 @@ __v7_setup:
|
|
|
|
3: mov r10, #0
|
|
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
|
|
- dsb
|
|
#ifdef CONFIG_MMU
|
|
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
|
|
v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
|
|
@@ -255,6 +254,7 @@ __v7_setup:
|
|
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
|
|
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
|
|
#endif
|
|
+ dsb @ Complete invalidations
|
|
#ifndef CONFIG_ARM_THUMBEE
|
|
mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
|
|
and r0, r0, #(0xf << 12) @ ThumbEE enabled field
|
|
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
|
|
index 22fb66590dcd..dba48a5d5bb9 100644
|
|
--- a/arch/avr32/Makefile
|
|
+++ b/arch/avr32/Makefile
|
|
@@ -11,7 +11,7 @@ all: uImage vmlinux.elf
|
|
|
|
KBUILD_DEFCONFIG := atstk1002_defconfig
|
|
|
|
-KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic
|
|
+KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__
|
|
KBUILD_AFLAGS += -mrelax -mno-pic
|
|
KBUILD_CFLAGS_MODULE += -mno-relax
|
|
LDFLAGS_vmlinux += --relax
|
|
diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c
|
|
index 9764a1a1073e..c1466a872b9c 100644
|
|
--- a/arch/avr32/boards/mimc200/fram.c
|
|
+++ b/arch/avr32/boards/mimc200/fram.c
|
|
@@ -11,6 +11,7 @@
|
|
#define FRAM_VERSION "1.0"
|
|
|
|
#include <linux/miscdevice.h>
|
|
+#include <linux/module.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/io.h>
|
|
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
|
|
index b3ba5163eae2..9d6a4a4b37fb 100644
|
|
--- a/arch/powerpc/kernel/crash_dump.c
|
|
+++ b/arch/powerpc/kernel/crash_dump.c
|
|
@@ -108,17 +108,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
|
size_t csize, unsigned long offset, int userbuf)
|
|
{
|
|
void *vaddr;
|
|
+ phys_addr_t paddr;
|
|
|
|
if (!csize)
|
|
return 0;
|
|
|
|
csize = min_t(size_t, csize, PAGE_SIZE);
|
|
+ paddr = pfn << PAGE_SHIFT;
|
|
|
|
- if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
|
|
- vaddr = __va(pfn << PAGE_SHIFT);
|
|
+ if (memblock_is_region_memory(paddr, csize)) {
|
|
+ vaddr = __va(paddr);
|
|
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
|
|
} else {
|
|
- vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
|
|
+ vaddr = __ioremap(paddr, PAGE_SIZE, 0);
|
|
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
|
|
iounmap(vaddr);
|
|
}
|
|
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
|
|
index 968f40101883..59a1edd56d46 100644
|
|
--- a/arch/powerpc/kvm/emulate.c
|
|
+++ b/arch/powerpc/kvm/emulate.c
|
|
@@ -36,6 +36,7 @@
|
|
#define OP_TRAP_64 2
|
|
|
|
#define OP_31_XOP_LWZX 23
|
|
+#define OP_31_XOP_DCBF 86
|
|
#define OP_31_XOP_LBZX 87
|
|
#define OP_31_XOP_STWX 151
|
|
#define OP_31_XOP_STBX 215
|
|
@@ -373,6 +374,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|
kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
|
|
break;
|
|
|
|
+ case OP_31_XOP_DCBF:
|
|
case OP_31_XOP_DCBI:
|
|
/* Do nothing. The guest is performing dcbi because
|
|
* hardware DMA is not snooped by the dcache, but
|
|
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
|
|
index e00accf9523e..53c973ebc0c8 100644
|
|
--- a/arch/s390/kvm/kvm-s390.c
|
|
+++ b/arch/s390/kvm/kvm-s390.c
|
|
@@ -525,13 +525,18 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
|
if (!kvm_is_ucontrol(vcpu->kvm))
|
|
kvm_s390_deliver_pending_interrupts(vcpu);
|
|
|
|
+ VCPU_EVENT(vcpu, 6, "entering sie flags %x",
|
|
+ atomic_read(&vcpu->arch.sie_block->cpuflags));
|
|
+
|
|
vcpu->arch.sie_block->icptcode = 0;
|
|
local_irq_disable();
|
|
kvm_guest_enter();
|
|
local_irq_enable();
|
|
- VCPU_EVENT(vcpu, 6, "entering sie flags %x",
|
|
- atomic_read(&vcpu->arch.sie_block->cpuflags));
|
|
rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
|
|
+ local_irq_disable();
|
|
+ kvm_guest_exit();
|
|
+ local_irq_enable();
|
|
+
|
|
if (rc) {
|
|
if (kvm_is_ucontrol(vcpu->kvm)) {
|
|
rc = SIE_INTERCEPT_UCONTROL;
|
|
@@ -543,9 +548,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
|
}
|
|
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
|
|
vcpu->arch.sie_block->icptcode);
|
|
- local_irq_disable();
|
|
- kvm_guest_exit();
|
|
- local_irq_enable();
|
|
|
|
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
|
|
return rc;
|
|
@@ -902,7 +904,7 @@ static int __init kvm_s390_init(void)
|
|
}
|
|
memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
|
|
facilities[0] &= 0xff00fff3f47c0000ULL;
|
|
- facilities[1] &= 0x201c000000000000ULL;
|
|
+ facilities[1] &= 0x001c000000000000ULL;
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
|
|
index bb8e03407e18..87477a10e1c8 100644
|
|
--- a/arch/x86/kernel/cpu/perf_event.c
|
|
+++ b/arch/x86/kernel/cpu/perf_event.c
|
|
@@ -1165,6 +1165,9 @@ static void x86_pmu_del(struct perf_event *event, int flags)
|
|
for (i = 0; i < cpuc->n_events; i++) {
|
|
if (event == cpuc->event_list[i]) {
|
|
|
|
+ if (i >= cpuc->n_events - cpuc->n_added)
|
|
+ --cpuc->n_added;
|
|
+
|
|
if (x86_pmu.put_event_constraints)
|
|
x86_pmu.put_event_constraints(cpuc, event);
|
|
|
|
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
|
|
index a7678fa29fe7..95980387eeb5 100644
|
|
--- a/arch/x86/xen/enlighten.c
|
|
+++ b/arch/x86/xen/enlighten.c
|
|
@@ -1448,6 +1448,10 @@ asmlinkage void __init xen_start_kernel(void)
|
|
|
|
/* Make sure ACS will be enabled */
|
|
pci_request_acs();
|
|
+
|
|
+ /* Avoid searching for BIOS MP tables */
|
|
+ x86_init.mpparse.find_smp_config = x86_init_noop;
|
|
+ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
|
|
}
|
|
#ifdef CONFIG_PCI
|
|
/* PCI BIOS service won't work from a PV guest. */
|
|
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
|
|
index 0503c0c493a9..a7f580623e18 100644
|
|
--- a/arch/x86/xen/smp.c
|
|
+++ b/arch/x86/xen/smp.c
|
|
@@ -576,6 +576,8 @@ static void xen_hvm_cpu_die(unsigned int cpu)
|
|
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
|
|
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
|
|
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
|
|
+ xen_uninit_lock_cpu(cpu);
|
|
+ xen_teardown_timer(cpu);
|
|
native_cpu_die(cpu);
|
|
}
|
|
|
|
diff --git a/block/blk-exec.c b/block/blk-exec.c
|
|
index fb2cbd551621..1b5cb66ef854 100644
|
|
--- a/block/blk-exec.c
|
|
+++ b/block/blk-exec.c
|
|
@@ -49,8 +49,18 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|
rq_end_io_fn *done)
|
|
{
|
|
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
|
+ bool is_pm_resume;
|
|
|
|
WARN_ON(irqs_disabled());
|
|
+
|
|
+ rq->rq_disk = bd_disk;
|
|
+ rq->end_io = done;
|
|
+ /*
|
|
+ * need to check this before __blk_run_queue(), because rq can
|
|
+ * be freed before that returns.
|
|
+ */
|
|
+ is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
|
|
+
|
|
spin_lock_irq(q->queue_lock);
|
|
|
|
if (unlikely(blk_queue_dead(q))) {
|
|
@@ -66,7 +76,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|
__elv_add_request(q, rq, where);
|
|
__blk_run_queue(q);
|
|
/* the queue is stopped so it won't be run */
|
|
- if (rq->cmd_type == REQ_TYPE_PM_RESUME)
|
|
+ if (is_pm_resume)
|
|
q->request_fn(q);
|
|
spin_unlock_irq(q->queue_lock);
|
|
}
|
|
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
|
|
index 1d02b7b5ade0..c653cc2d85c4 100644
|
|
--- a/drivers/acpi/processor_throttling.c
|
|
+++ b/drivers/acpi/processor_throttling.c
|
|
@@ -59,6 +59,12 @@ struct throttling_tstate {
|
|
int target_state; /* target T-state */
|
|
};
|
|
|
|
+struct acpi_processor_throttling_arg {
|
|
+ struct acpi_processor *pr;
|
|
+ int target_state;
|
|
+ bool force;
|
|
+};
|
|
+
|
|
#define THROTTLING_PRECHANGE (1)
|
|
#define THROTTLING_POSTCHANGE (2)
|
|
|
|
@@ -1062,16 +1068,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
|
|
return 0;
|
|
}
|
|
|
|
+static long acpi_processor_throttling_fn(void *data)
|
|
+{
|
|
+ struct acpi_processor_throttling_arg *arg = data;
|
|
+ struct acpi_processor *pr = arg->pr;
|
|
+
|
|
+ return pr->throttling.acpi_processor_set_throttling(pr,
|
|
+ arg->target_state, arg->force);
|
|
+}
|
|
+
|
|
int acpi_processor_set_throttling(struct acpi_processor *pr,
|
|
int state, bool force)
|
|
{
|
|
- cpumask_var_t saved_mask;
|
|
int ret = 0;
|
|
unsigned int i;
|
|
struct acpi_processor *match_pr;
|
|
struct acpi_processor_throttling *p_throttling;
|
|
+ struct acpi_processor_throttling_arg arg;
|
|
struct throttling_tstate t_state;
|
|
- cpumask_var_t online_throttling_cpus;
|
|
|
|
if (!pr)
|
|
return -EINVAL;
|
|
@@ -1082,14 +1096,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
|
|
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
|
|
return -EINVAL;
|
|
|
|
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
|
|
- return -ENOMEM;
|
|
-
|
|
- if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
|
|
- free_cpumask_var(saved_mask);
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
if (cpu_is_offline(pr->id)) {
|
|
/*
|
|
* the cpu pointed by pr->id is offline. Unnecessary to change
|
|
@@ -1098,17 +1104,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
|
|
return -ENODEV;
|
|
}
|
|
|
|
- cpumask_copy(saved_mask, ¤t->cpus_allowed);
|
|
t_state.target_state = state;
|
|
p_throttling = &(pr->throttling);
|
|
- cpumask_and(online_throttling_cpus, cpu_online_mask,
|
|
- p_throttling->shared_cpu_map);
|
|
+
|
|
/*
|
|
* The throttling notifier will be called for every
|
|
* affected cpu in order to get one proper T-state.
|
|
* The notifier event is THROTTLING_PRECHANGE.
|
|
*/
|
|
- for_each_cpu(i, online_throttling_cpus) {
|
|
+ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
|
|
t_state.cpu = i;
|
|
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
|
|
&t_state);
|
|
@@ -1120,21 +1124,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
|
|
* it can be called only for the cpu pointed by pr.
|
|
*/
|
|
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
|
|
- /* FIXME: use work_on_cpu() */
|
|
- if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
|
|
- /* Can't migrate to the pr->id CPU. Exit */
|
|
- ret = -ENODEV;
|
|
- goto exit;
|
|
- }
|
|
- ret = p_throttling->acpi_processor_set_throttling(pr,
|
|
- t_state.target_state, force);
|
|
+ arg.pr = pr;
|
|
+ arg.target_state = state;
|
|
+ arg.force = force;
|
|
+ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
|
|
} else {
|
|
/*
|
|
* When the T-state coordination is SW_ALL or HW_ALL,
|
|
* it is necessary to set T-state for every affected
|
|
* cpus.
|
|
*/
|
|
- for_each_cpu(i, online_throttling_cpus) {
|
|
+ for_each_cpu_and(i, cpu_online_mask,
|
|
+ p_throttling->shared_cpu_map) {
|
|
match_pr = per_cpu(processors, i);
|
|
/*
|
|
* If the pointer is invalid, we will report the
|
|
@@ -1155,13 +1156,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
|
|
"on CPU %d\n", i));
|
|
continue;
|
|
}
|
|
- t_state.cpu = i;
|
|
- /* FIXME: use work_on_cpu() */
|
|
- if (set_cpus_allowed_ptr(current, cpumask_of(i)))
|
|
- continue;
|
|
- ret = match_pr->throttling.
|
|
- acpi_processor_set_throttling(
|
|
- match_pr, t_state.target_state, force);
|
|
+
|
|
+ arg.pr = match_pr;
|
|
+ arg.target_state = state;
|
|
+ arg.force = force;
|
|
+ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
|
|
+ &arg);
|
|
}
|
|
}
|
|
/*
|
|
@@ -1170,17 +1170,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
|
|
* affected cpu to update the T-states.
|
|
* The notifier event is THROTTLING_POSTCHANGE
|
|
*/
|
|
- for_each_cpu(i, online_throttling_cpus) {
|
|
+ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
|
|
t_state.cpu = i;
|
|
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
|
|
&t_state);
|
|
}
|
|
- /* restore the previous state */
|
|
- /* FIXME: use work_on_cpu() */
|
|
- set_cpus_allowed_ptr(current, saved_mask);
|
|
-exit:
|
|
- free_cpumask_var(online_throttling_cpus);
|
|
- free_cpumask_var(saved_mask);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
|
|
index 6d2c49ba47a9..415448af36bf 100644
|
|
--- a/drivers/acpi/video.c
|
|
+++ b/drivers/acpi/video.c
|
|
@@ -632,6 +632,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
|
|
union acpi_object *o;
|
|
struct acpi_video_device_brightness *br = NULL;
|
|
int result = -EINVAL;
|
|
+ u32 value;
|
|
|
|
if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
|
|
@@ -662,7 +663,12 @@ acpi_video_init_brightness(struct acpi_video_device *device)
|
|
printk(KERN_ERR PREFIX "Invalid data\n");
|
|
continue;
|
|
}
|
|
- br->levels[count] = (u32) o->integer.value;
|
|
+ value = (u32) o->integer.value;
|
|
+ /* Skip duplicate entries */
|
|
+ if (count > 2 && br->levels[count - 1] == value)
|
|
+ continue;
|
|
+
|
|
+ br->levels[count] = value;
|
|
|
|
if (br->levels[count] > max_level)
|
|
max_level = br->levels[count];
|
|
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
|
|
index f5c35beadc65..0ba32fe00d13 100644
|
|
--- a/drivers/ata/libata-pmp.c
|
|
+++ b/drivers/ata/libata-pmp.c
|
|
@@ -447,8 +447,11 @@ static void sata_pmp_quirks(struct ata_port *ap)
|
|
* otherwise. Don't try hard to recover it.
|
|
*/
|
|
ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
|
|
- } else if (vendor == 0x197b && devid == 0x2352) {
|
|
- /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
|
|
+ } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
|
|
+ /*
|
|
+ * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
|
|
+ * 0x0325: jmicron JMB394.
|
|
+ */
|
|
ata_for_each_link(link, ap, EDGE) {
|
|
/* SRST breaks detection and disks get misclassified
|
|
* LPM disabled to avoid potential problems
|
|
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
|
|
index 9dfb40b8c2c9..0c4ed89b6aa5 100644
|
|
--- a/drivers/ata/sata_sil.c
|
|
+++ b/drivers/ata/sata_sil.c
|
|
@@ -157,6 +157,7 @@ static const struct sil_drivelist {
|
|
{ "ST380011ASL", SIL_QUIRK_MOD15WRITE },
|
|
{ "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
|
|
{ "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
|
|
+ { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
|
|
{ "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
|
|
{ }
|
|
};
|
|
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
|
|
index 386d40e3cf48..d724da52153b 100644
|
|
--- a/drivers/block/nbd.c
|
|
+++ b/drivers/block/nbd.c
|
|
@@ -590,8 +590,11 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
|
nbd_cmd(&sreq) = NBD_CMD_DISC;
|
|
if (!nbd->sock)
|
|
return -EINVAL;
|
|
+
|
|
+ nbd->disconnect = 1;
|
|
+
|
|
nbd_send_req(nbd, &sreq);
|
|
- return 0;
|
|
+ return 0;
|
|
}
|
|
|
|
case NBD_CLEAR_SOCK: {
|
|
@@ -620,6 +623,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
|
nbd->sock = SOCKET_I(inode);
|
|
if (max_part > 0)
|
|
bdev->bd_invalidated = 1;
|
|
+ nbd->disconnect = 0; /* we're connected now */
|
|
return 0;
|
|
} else {
|
|
fput(file);
|
|
@@ -691,6 +695,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
|
set_capacity(nbd->disk, 0);
|
|
if (max_part > 0)
|
|
ioctl_by_bdev(bdev, BLKRRPART, 0);
|
|
+ if (nbd->disconnect) /* user requested, ignore socket errors */
|
|
+ return 0;
|
|
return nbd->harderror;
|
|
}
|
|
|
|
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
|
|
index 0ed5df4d3e9f..ae1b0c4cfc5b 100644
|
|
--- a/drivers/block/xen-blkback/blkback.c
|
|
+++ b/drivers/block/xen-blkback/blkback.c
|
|
@@ -274,6 +274,7 @@ int xen_blkif_schedule(void *arg)
|
|
{
|
|
struct xen_blkif *blkif = arg;
|
|
struct xen_vbd *vbd = &blkif->vbd;
|
|
+ int ret;
|
|
|
|
xen_blkif_get(blkif);
|
|
|
|
@@ -294,8 +295,12 @@ int xen_blkif_schedule(void *arg)
|
|
blkif->waiting_reqs = 0;
|
|
smp_mb(); /* clear flag *before* checking for work */
|
|
|
|
- if (do_block_io_op(blkif))
|
|
+ ret = do_block_io_op(blkif);
|
|
+ if (ret > 0)
|
|
blkif->waiting_reqs = 1;
|
|
+ if (ret == -EACCES)
|
|
+ wait_event_interruptible(blkif->shutdown_wq,
|
|
+ kthread_should_stop());
|
|
|
|
if (log_stats && time_after(jiffies, blkif->st_print))
|
|
print_stats(blkif);
|
|
@@ -531,6 +536,12 @@ __do_block_io_op(struct xen_blkif *blkif)
|
|
rp = blk_rings->common.sring->req_prod;
|
|
rmb(); /* Ensure we see queued requests up to 'rp'. */
|
|
|
|
+ if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
|
|
+ rc = blk_rings->common.rsp_prod_pvt;
|
|
+ pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
|
|
+ rp, rc, rp - rc, blkif->vbd.pdevice);
|
|
+ return -EACCES;
|
|
+ }
|
|
while (rc != rp) {
|
|
|
|
if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
|
|
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
|
|
index fc2a486d4a8e..933adc5a2637 100644
|
|
--- a/drivers/block/xen-blkback/common.h
|
|
+++ b/drivers/block/xen-blkback/common.h
|
|
@@ -216,6 +216,8 @@ struct xen_blkif {
|
|
int st_wr_sect;
|
|
|
|
wait_queue_head_t waiting_to_free;
|
|
+ /* Thread shutdown wait queue. */
|
|
+ wait_queue_head_t shutdown_wq;
|
|
};
|
|
|
|
|
|
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
|
|
index a155254f1338..5a0062f1e15c 100644
|
|
--- a/drivers/block/xen-blkback/xenbus.c
|
|
+++ b/drivers/block/xen-blkback/xenbus.c
|
|
@@ -118,6 +118,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
|
|
atomic_set(&blkif->drain, 0);
|
|
blkif->st_print = jiffies;
|
|
init_waitqueue_head(&blkif->waiting_to_free);
|
|
+ init_waitqueue_head(&blkif->shutdown_wq);
|
|
|
|
return blkif;
|
|
}
|
|
@@ -178,6 +179,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
|
|
{
|
|
if (blkif->xenblkd) {
|
|
kthread_stop(blkif->xenblkd);
|
|
+ wake_up(&blkif->shutdown_wq);
|
|
blkif->xenblkd = NULL;
|
|
}
|
|
|
|
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
|
|
index ff18e41f5b22..094a7107301d 100644
|
|
--- a/drivers/connector/cn_proc.c
|
|
+++ b/drivers/connector/cn_proc.c
|
|
@@ -331,6 +331,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
|
|
if (msg->len != sizeof(*mc_op))
|
|
return;
|
|
|
|
+ /* Can only change if privileged. */
|
|
+ if (!capable(CAP_NET_ADMIN)) {
|
|
+ err = EPERM;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
mc_op = (enum proc_cn_mcast_op*)msg->data;
|
|
switch (*mc_op) {
|
|
case PROC_CN_MCAST_LISTEN:
|
|
@@ -343,6 +349,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
|
|
err = EINVAL;
|
|
break;
|
|
}
|
|
+
|
|
+out:
|
|
cn_proc_ack(err, msg->seq, msg->ack);
|
|
}
|
|
|
|
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
|
|
index 2ed1ac3513f3..28a96141bd20 100644
|
|
--- a/drivers/dma/ste_dma40.c
|
|
+++ b/drivers/dma/ste_dma40.c
|
|
@@ -1409,6 +1409,7 @@ static void dma_tasklet(unsigned long data)
|
|
struct d40_chan *d40c = (struct d40_chan *) data;
|
|
struct d40_desc *d40d;
|
|
unsigned long flags;
|
|
+ bool callback_active;
|
|
dma_async_tx_callback callback;
|
|
void *callback_param;
|
|
|
|
@@ -1432,6 +1433,7 @@ static void dma_tasklet(unsigned long data)
|
|
}
|
|
|
|
/* Callback to client */
|
|
+ callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
|
|
callback = d40d->txd.callback;
|
|
callback_param = d40d->txd.callback_param;
|
|
|
|
@@ -1456,7 +1458,7 @@ static void dma_tasklet(unsigned long data)
|
|
|
|
spin_unlock_irqrestore(&d40c->lock, flags);
|
|
|
|
- if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
|
|
+ if (callback_active && callback)
|
|
callback(callback_param);
|
|
|
|
return;
|
|
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
|
|
index 0fe2277d8672..ddb725d79fc8 100644
|
|
--- a/drivers/edac/i7core_edac.c
|
|
+++ b/drivers/edac/i7core_edac.c
|
|
@@ -1365,14 +1365,19 @@ static int i7core_get_onedevice(struct pci_dev **prev,
|
|
* is at addr 8086:2c40, instead of 8086:2c41. So, we need
|
|
* to probe for the alternate address in case of failure
|
|
*/
|
|
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
|
|
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
|
|
+ pci_dev_get(*prev); /* pci_get_device will put it */
|
|
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
|
|
PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
|
|
+ }
|
|
|
|
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
|
|
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
|
|
+ !pdev) {
|
|
+ pci_dev_get(*prev); /* pci_get_device will put it */
|
|
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
|
|
PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
|
|
*prev);
|
|
+ }
|
|
|
|
if (!pdev) {
|
|
if (*prev) {
|
|
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
|
|
index 335b183d7c02..f058886ca892 100644
|
|
--- a/drivers/hwmon/max1668.c
|
|
+++ b/drivers/hwmon/max1668.c
|
|
@@ -243,7 +243,7 @@ static ssize_t set_temp_min(struct device *dev,
|
|
data->temp_min[index] = SENSORS_LIMIT(temp/1000, -128, 127);
|
|
if (i2c_smbus_write_byte_data(client,
|
|
MAX1668_REG_LIML_WR(index),
|
|
- data->temp_max[index]))
|
|
+ data->temp_min[index]))
|
|
count = -EIO;
|
|
mutex_unlock(&data->update_lock);
|
|
|
|
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
|
|
index b74cb796c17c..9147569545e2 100644
|
|
--- a/drivers/md/dm-mpath.c
|
|
+++ b/drivers/md/dm-mpath.c
|
|
@@ -1541,8 +1541,11 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
|
|
/*
|
|
* Only pass ioctls through if the device sizes match exactly.
|
|
*/
|
|
- if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
|
|
- r = scsi_verify_blk_ioctl(NULL, cmd);
|
|
+ if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
|
|
+ int err = scsi_verify_blk_ioctl(NULL, cmd);
|
|
+ if (err)
|
|
+ r = err;
|
|
+ }
|
|
|
|
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
|
}
|
|
diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c
|
|
index 10460fd3ce39..dbcdfbf8aed0 100644
|
|
--- a/drivers/media/video/saa7134/saa7134-alsa.c
|
|
+++ b/drivers/media/video/saa7134/saa7134-alsa.c
|
|
@@ -172,7 +172,9 @@ static void saa7134_irq_alsa_done(struct saa7134_dev *dev,
|
|
dprintk("irq: overrun [full=%d/%d] - Blocks in %d\n",dev->dmasound.read_count,
|
|
dev->dmasound.bufsize, dev->dmasound.blocks);
|
|
spin_unlock(&dev->slock);
|
|
+ snd_pcm_stream_lock(dev->dmasound.substream);
|
|
snd_pcm_stop(dev->dmasound.substream,SNDRV_PCM_STATE_XRUN);
|
|
+ snd_pcm_stream_unlock(dev->dmasound.substream);
|
|
return;
|
|
}
|
|
|
|
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
|
|
index 3463b469e657..184be29238aa 100644
|
|
--- a/drivers/net/bonding/bond_3ad.c
|
|
+++ b/drivers/net/bonding/bond_3ad.c
|
|
@@ -1854,8 +1854,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
|
|
BOND_AD_INFO(bond).agg_select_timer = timeout;
|
|
}
|
|
|
|
-static u16 aggregator_identifier;
|
|
-
|
|
/**
|
|
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
|
|
* @bond: bonding struct to work on
|
|
@@ -1869,7 +1867,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
|
|
if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
|
|
bond->dev->dev_addr)) {
|
|
|
|
- aggregator_identifier = 0;
|
|
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
|
|
|
|
BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
|
|
BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
|
|
@@ -1941,7 +1939,7 @@ int bond_3ad_bind_slave(struct slave *slave)
|
|
ad_initialize_agg(aggregator);
|
|
|
|
aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
|
|
- aggregator->aggregator_identifier = (++aggregator_identifier);
|
|
+ aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
|
|
aggregator->slave = slave;
|
|
aggregator->is_active = 0;
|
|
aggregator->num_of_ports = 0;
|
|
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
|
|
index 5ee7e3c45db7..656b2f546780 100644
|
|
--- a/drivers/net/bonding/bond_3ad.h
|
|
+++ b/drivers/net/bonding/bond_3ad.h
|
|
@@ -253,6 +253,7 @@ struct ad_system {
|
|
struct ad_bond_info {
|
|
struct ad_system system; /* 802.3ad system structure */
|
|
u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
|
|
+ u16 aggregator_identifier;
|
|
};
|
|
|
|
struct ad_slave_info {
|
|
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
|
|
index 650286be8da1..558974f466de 100644
|
|
--- a/drivers/net/ethernet/broadcom/tg3.c
|
|
+++ b/drivers/net/ethernet/broadcom/tg3.c
|
|
@@ -12343,12 +12343,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
|
tg3_netif_stop(tp);
|
|
|
|
+ tg3_set_mtu(dev, tp, new_mtu);
|
|
+
|
|
tg3_full_lock(tp, 1);
|
|
|
|
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
|
|
|
|
- tg3_set_mtu(dev, tp, new_mtu);
|
|
-
|
|
/* Reset PHY, otherwise the read DMA engine will be in a mode that
|
|
* breaks all requests to 256 bytes.
|
|
*/
|
|
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
|
|
index ef84a586c176..e3e0ca1e92e2 100644
|
|
--- a/drivers/net/usb/asix.c
|
|
+++ b/drivers/net/usb/asix.c
|
|
@@ -183,6 +183,17 @@ struct ax88172_int_data {
|
|
__le16 res3;
|
|
} __packed;
|
|
|
|
+struct asix_rx_fixup_info {
|
|
+ struct sk_buff *ax_skb;
|
|
+ u32 header;
|
|
+ u16 size;
|
|
+ bool split_head;
|
|
+};
|
|
+
|
|
+struct asix_common_private {
|
|
+ struct asix_rx_fixup_info rx_fixup_info;
|
|
+};
|
|
+
|
|
static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
|
|
u16 size, void *data)
|
|
{
|
|
@@ -304,49 +315,89 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
|
|
}
|
|
}
|
|
|
|
-static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|
+static int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
|
|
+ struct asix_rx_fixup_info *rx)
|
|
{
|
|
int offset = 0;
|
|
|
|
- while (offset + sizeof(u32) < skb->len) {
|
|
- struct sk_buff *ax_skb;
|
|
- u16 size;
|
|
- u32 header = get_unaligned_le32(skb->data + offset);
|
|
-
|
|
- offset += sizeof(u32);
|
|
-
|
|
- /* get the packet length */
|
|
- size = (u16) (header & 0x7ff);
|
|
- if (size != ((~header >> 16) & 0x07ff)) {
|
|
- netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
|
|
- return 0;
|
|
+ while (offset + sizeof(u16) <= skb->len) {
|
|
+ u16 remaining = 0;
|
|
+ unsigned char *data;
|
|
+
|
|
+ if (!rx->size) {
|
|
+ if ((skb->len - offset == sizeof(u16)) ||
|
|
+ rx->split_head) {
|
|
+ if (!rx->split_head) {
|
|
+ rx->header = get_unaligned_le16(
|
|
+ skb->data + offset);
|
|
+ rx->split_head = true;
|
|
+ offset += sizeof(u16);
|
|
+ break;
|
|
+ } else {
|
|
+ rx->header |= (get_unaligned_le16(
|
|
+ skb->data + offset)
|
|
+ << 16);
|
|
+ rx->split_head = false;
|
|
+ offset += sizeof(u16);
|
|
+ }
|
|
+ } else {
|
|
+ rx->header = get_unaligned_le32(skb->data +
|
|
+ offset);
|
|
+ offset += sizeof(u32);
|
|
+ }
|
|
+
|
|
+ /* get the packet length */
|
|
+ rx->size = (u16) (rx->header & 0x7ff);
|
|
+ if (rx->size != ((~rx->header >> 16) & 0x7ff)) {
|
|
+ netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
|
|
+ rx->header, offset);
|
|
+ rx->size = 0;
|
|
+ return 0;
|
|
+ }
|
|
+ rx->ax_skb = netdev_alloc_skb_ip_align(dev->net,
|
|
+ rx->size);
|
|
+ if (!rx->ax_skb)
|
|
+ return 0;
|
|
}
|
|
|
|
- if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
|
|
- (size + offset > skb->len)) {
|
|
+ if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
|
|
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
|
|
- size);
|
|
+ rx->size);
|
|
+ kfree_skb(rx->ax_skb);
|
|
return 0;
|
|
}
|
|
- ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
|
|
- if (!ax_skb)
|
|
- return 0;
|
|
|
|
- skb_put(ax_skb, size);
|
|
- memcpy(ax_skb->data, skb->data + offset, size);
|
|
- usbnet_skb_return(dev, ax_skb);
|
|
+ if (rx->size > skb->len - offset) {
|
|
+ remaining = rx->size - (skb->len - offset);
|
|
+ rx->size = skb->len - offset;
|
|
+ }
|
|
+
|
|
+ data = skb_put(rx->ax_skb, rx->size);
|
|
+ memcpy(data, skb->data + offset, rx->size);
|
|
+ if (!remaining)
|
|
+ usbnet_skb_return(dev, rx->ax_skb);
|
|
|
|
- offset += (size + 1) & 0xfffe;
|
|
+ offset += (rx->size + 1) & 0xfffe;
|
|
+ rx->size = remaining;
|
|
}
|
|
|
|
if (skb->len != offset) {
|
|
- netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
|
|
- skb->len);
|
|
+ netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
|
|
+ skb->len, offset);
|
|
return 0;
|
|
}
|
|
+
|
|
return 1;
|
|
}
|
|
|
|
+static int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
|
|
+{
|
|
+ struct asix_common_private *dp = dev->driver_priv;
|
|
+ struct asix_rx_fixup_info *rx = &dp->rx_fixup_info;
|
|
+
|
|
+ return asix_rx_fixup_internal(dev, skb, rx);
|
|
+}
|
|
+
|
|
static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
|
gfp_t flags)
|
|
{
|
|
@@ -1110,9 +1161,19 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
|
|
dev->rx_urb_size = 2048;
|
|
}
|
|
|
|
+ dev->driver_priv = kzalloc(sizeof(struct asix_common_private),
|
|
+ GFP_KERNEL);
|
|
+ if (!dev->driver_priv)
|
|
+ return -ENOMEM;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
+static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
|
|
+{
|
|
+ kfree(dev->driver_priv);
|
|
+}
|
|
+
|
|
static const struct ethtool_ops ax88178_ethtool_ops = {
|
|
.get_drvinfo = asix_get_drvinfo,
|
|
.get_link = asix_get_link,
|
|
@@ -1445,6 +1506,11 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
|
|
dev->rx_urb_size = 2048;
|
|
}
|
|
|
|
+ dev->driver_priv = kzalloc(sizeof(struct asix_common_private),
|
|
+ GFP_KERNEL);
|
|
+ if (!dev->driver_priv)
|
|
+ return -ENOMEM;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -1491,22 +1557,26 @@ static const struct driver_info hawking_uf200_info = {
|
|
static const struct driver_info ax88772_info = {
|
|
.description = "ASIX AX88772 USB 2.0 Ethernet",
|
|
.bind = ax88772_bind,
|
|
+ .unbind = ax88772_unbind,
|
|
.status = asix_status,
|
|
.link_reset = ax88772_link_reset,
|
|
.reset = ax88772_reset,
|
|
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
|
|
- .rx_fixup = asix_rx_fixup,
|
|
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
|
|
+ FLAG_MULTI_PACKET,
|
|
+ .rx_fixup = asix_rx_fixup_common,
|
|
.tx_fixup = asix_tx_fixup,
|
|
};
|
|
|
|
static const struct driver_info ax88178_info = {
|
|
.description = "ASIX AX88178 USB 2.0 Ethernet",
|
|
.bind = ax88178_bind,
|
|
+ .unbind = ax88772_unbind,
|
|
.status = asix_status,
|
|
.link_reset = ax88178_link_reset,
|
|
.reset = ax88178_reset,
|
|
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
|
|
- .rx_fixup = asix_rx_fixup,
|
|
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
|
|
+ FLAG_MULTI_PACKET,
|
|
+ .rx_fixup = asix_rx_fixup_common,
|
|
.tx_fixup = asix_tx_fixup,
|
|
};
|
|
|
|
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
|
|
index 38266bdae26b..71e0b9938602 100644
|
|
--- a/drivers/net/usb/gl620a.c
|
|
+++ b/drivers/net/usb/gl620a.c
|
|
@@ -86,6 +86,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|
u32 size;
|
|
u32 count;
|
|
|
|
+ /* This check is no longer done by usbnet */
|
|
+ if (skb->len < dev->net->hard_header_len)
|
|
+ return 0;
|
|
+
|
|
header = (struct gl_header *) skb->data;
|
|
|
|
// get the packet count of the received skb
|
|
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
|
|
index c434b6ba0337..21a93775271e 100644
|
|
--- a/drivers/net/usb/mcs7830.c
|
|
+++ b/drivers/net/usb/mcs7830.c
|
|
@@ -601,8 +601,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|
{
|
|
u8 status;
|
|
|
|
- if (skb->len == 0) {
|
|
- dev_err(&dev->udev->dev, "unexpected empty rx frame\n");
|
|
+ /* This check is no longer done by usbnet */
|
|
+ if (skb->len < dev->net->hard_header_len) {
|
|
+ dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
|
|
index 83f965cb69e7..006a14244a32 100644
|
|
--- a/drivers/net/usb/net1080.c
|
|
+++ b/drivers/net/usb/net1080.c
|
|
@@ -419,6 +419,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|
struct nc_trailer *trailer;
|
|
u16 hdr_len, packet_len;
|
|
|
|
+ /* This check is no longer done by usbnet */
|
|
+ if (skb->len < dev->net->hard_header_len)
|
|
+ return 0;
|
|
+
|
|
if (!(skb->len & 0x01)) {
|
|
#ifdef DEBUG
|
|
struct net_device *net = dev->net;
|
|
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
|
|
index 3b6e932890bb..387ececab6e8 100644
|
|
--- a/drivers/net/usb/qmi_wwan.c
|
|
+++ b/drivers/net/usb/qmi_wwan.c
|
|
@@ -202,10 +202,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|
{
|
|
__be16 proto;
|
|
|
|
- /* usbnet rx_complete guarantees that skb->len is at least
|
|
- * hard_header_len, so we can inspect the dest address without
|
|
- * checking skb->len
|
|
- */
|
|
+ /* This check is no longer done by usbnet */
|
|
+ if (skb->len < dev->net->hard_header_len)
|
|
+ return 0;
|
|
+
|
|
switch (skb->data[0] & 0xf0) {
|
|
case 0x40:
|
|
proto = htons(ETH_P_IP);
|
|
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
|
|
index c8f1b5b3aff3..bf4c0979c484 100644
|
|
--- a/drivers/net/usb/rndis_host.c
|
|
+++ b/drivers/net/usb/rndis_host.c
|
|
@@ -490,6 +490,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
|
|
*/
|
|
int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|
{
|
|
+ /* This check is no longer done by usbnet */
|
|
+ if (skb->len < dev->net->hard_header_len)
|
|
+ return 0;
|
|
+
|
|
/* peripheral may have batched packets to us... */
|
|
while (likely(skb->len)) {
|
|
struct rndis_data_hdr *hdr = (void *)skb->data;
|
|
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
|
|
index d89747aa3c4b..5a82fcdd206b 100644
|
|
--- a/drivers/net/usb/smsc75xx.c
|
|
+++ b/drivers/net/usb/smsc75xx.c
|
|
@@ -1093,6 +1093,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
|
|
|
|
static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|
{
|
|
+ /* This check is no longer done by usbnet */
|
|
+ if (skb->len < dev->net->hard_header_len)
|
|
+ return 0;
|
|
+
|
|
while (skb->len > 0) {
|
|
u32 rx_cmd_a, rx_cmd_b, align_count, size;
|
|
struct sk_buff *ax_skb;
|
|
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
|
|
index 94ae66999f59..13f50713bf7e 100644
|
|
--- a/drivers/net/usb/smsc95xx.c
|
|
+++ b/drivers/net/usb/smsc95xx.c
|
|
@@ -1041,6 +1041,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
|
|
|
|
static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|
{
|
|
+ /* This check is no longer done by usbnet */
|
|
+ if (skb->len < dev->net->hard_header_len)
|
|
+ return 0;
|
|
+
|
|
while (skb->len > 0) {
|
|
u32 header, align_count;
|
|
struct sk_buff *ax_skb;
|
|
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
|
|
index 174aece3b90a..1283de5b31f3 100644
|
|
--- a/drivers/net/usb/usbnet.c
|
|
+++ b/drivers/net/usb/usbnet.c
|
|
@@ -415,17 +415,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
|
|
}
|
|
// else network stack removes extra byte if we forced a short packet
|
|
|
|
- if (skb->len) {
|
|
- /* all data was already cloned from skb inside the driver */
|
|
- if (dev->driver_info->flags & FLAG_MULTI_PACKET)
|
|
- dev_kfree_skb_any(skb);
|
|
- else
|
|
- usbnet_skb_return(dev, skb);
|
|
+ /* all data was already cloned from skb inside the driver */
|
|
+ if (dev->driver_info->flags & FLAG_MULTI_PACKET)
|
|
+ goto done;
|
|
+
|
|
+ if (skb->len < ETH_HLEN) {
|
|
+ dev->net->stats.rx_errors++;
|
|
+ dev->net->stats.rx_length_errors++;
|
|
+ netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
|
|
+ } else {
|
|
+ usbnet_skb_return(dev, skb);
|
|
return;
|
|
}
|
|
|
|
- netif_dbg(dev, rx_err, dev->net, "drop\n");
|
|
- dev->net->stats.rx_errors++;
|
|
done:
|
|
skb_queue_tail(&dev->done, skb);
|
|
}
|
|
@@ -447,13 +449,6 @@ static void rx_complete (struct urb *urb)
|
|
switch (urb_status) {
|
|
/* success */
|
|
case 0:
|
|
- if (skb->len < dev->net->hard_header_len) {
|
|
- state = rx_cleanup;
|
|
- dev->net->stats.rx_errors++;
|
|
- dev->net->stats.rx_length_errors++;
|
|
- netif_dbg(dev, rx_err, dev->net,
|
|
- "rx length %d\n", skb->len);
|
|
- }
|
|
break;
|
|
|
|
/* stalls need manual reset. this is rare ... except that
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
|
|
index 2067bdff83ab..dd6dce2bb472 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
|
|
@@ -459,6 +459,12 @@ const struct iwl_cfg iwl6035_2agn_cfg = {
|
|
.ht_params = &iwl6000_ht_params,
|
|
};
|
|
|
|
+const struct iwl_cfg iwl6035_2agn_sff_cfg = {
|
|
+ .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
|
|
+ IWL_DEVICE_6035,
|
|
+ .ht_params = &iwl6000_ht_params,
|
|
+};
|
|
+
|
|
const struct iwl_cfg iwl1030_bgn_cfg = {
|
|
.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
|
|
IWL_DEVICE_6030,
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
|
|
index 2a9a16f901c3..69d2de11c827 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
|
|
@@ -680,7 +680,7 @@ int iwl_alive_start(struct iwl_priv *priv)
|
|
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
|
|
if (ret)
|
|
return ret;
|
|
- } else {
|
|
+ } else if (priv->shrd->cfg->bt_params) {
|
|
/*
|
|
* default is 2-wire BT coexexistence support
|
|
*/
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h
|
|
index 82152311d73b..a133e9e01f0b 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h
|
|
@@ -106,6 +106,7 @@ extern const struct iwl_cfg iwl2000_2bgn_cfg;
|
|
extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
|
|
extern const struct iwl_cfg iwl2030_2bgn_cfg;
|
|
extern const struct iwl_cfg iwl6035_2agn_cfg;
|
|
+extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
|
|
extern const struct iwl_cfg iwl105_bgn_cfg;
|
|
extern const struct iwl_cfg iwl105_bgn_d_cfg;
|
|
extern const struct iwl_cfg iwl135_bgn_cfg;
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
|
|
index 46490d3b95b9..75ebb829c327 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
|
|
@@ -801,7 +801,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
|
|
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
|
return;
|
|
|
|
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
|
|
+ if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
|
|
+ return;
|
|
+
|
|
+ if (ctx->vif)
|
|
ieee80211_chswitch_done(ctx->vif, is_success);
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
|
|
index 2bbaebd99ad4..d587bcdda015 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
|
|
@@ -227,6 +227,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
|
|
const struct fw_img *img;
|
|
size_t bufsz;
|
|
|
|
+ if (!iwl_is_ready_rf(priv))
|
|
+ return -EAGAIN;
|
|
+
|
|
/* default is to dump the entire data segment */
|
|
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
|
|
priv->dbgfs_sram_offset = 0x800000;
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
|
|
index c5e339ee918b..1b9047139f65 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
|
|
@@ -138,13 +138,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
|
|
|
|
/* 6x00 Series */
|
|
{IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
|
|
{IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
|
|
{IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
|
|
{IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
|
|
{IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
|
|
{IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
|
|
{IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
|
|
{IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
|
|
{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
|
|
{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
|
|
|
|
@@ -152,12 +155,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
|
|
{IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
|
|
{IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
|
|
{IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
|
|
{IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
|
|
{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
|
|
{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
|
|
{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
|
|
{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
|
|
{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
|
|
{IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
|
|
{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
|
|
{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
|
|
@@ -239,8 +246,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
|
|
|
|
/* 6x35 Series */
|
|
{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
|
|
{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
|
|
{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
|
|
+ {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
|
|
{IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
|
|
|
|
/* 105 Series */
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
|
|
index 3b844b79b14b..9ae2b49270cd 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
|
|
@@ -209,6 +209,15 @@ struct iwl_queue {
|
|
#define TFD_TX_CMD_SLOTS 256
|
|
#define TFD_CMD_SLOTS 32
|
|
|
|
+/*
|
|
+ * The FH will write back to the first TB only, so we need
|
|
+ * to copy some data into the buffer regardless of whether
|
|
+ * it should be mapped or not. This indicates how much to
|
|
+ * copy, even for HCMDs it must be big enough to fit the
|
|
+ * DRAM scratch from the TX cmd, at least 16 bytes.
|
|
+ */
|
|
+#define IWL_HCMD_MIN_COPY_SIZE 16
|
|
+
|
|
struct iwl_tx_queue {
|
|
struct iwl_queue q;
|
|
struct iwl_tfd *tfds;
|
|
@@ -352,7 +361,7 @@ int iwl_queue_space(const struct iwl_queue *q);
|
|
******************************************************/
|
|
int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
|
|
char **buf, bool display);
|
|
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
|
|
+int iwl_dump_fh(struct iwl_trans *trans, char **buf);
|
|
void iwl_dump_csr(struct iwl_trans *trans);
|
|
|
|
/*****************************************************
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
|
|
index aa7aea168138..7bcaeaf4d42e 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
|
|
@@ -315,6 +315,14 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
|
|
rxb->page_dma = dma_map_page(trans->dev, page, 0,
|
|
PAGE_SIZE << hw_params(trans).rx_page_order,
|
|
DMA_FROM_DEVICE);
|
|
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
|
|
+ rxb->page = NULL;
|
|
+ spin_lock_irqsave(&rxq->lock, flags);
|
|
+ list_add(&rxb->list, &rxq->rx_used);
|
|
+ spin_unlock_irqrestore(&rxq->lock, flags);
|
|
+ __free_pages(page, hw_params(trans).rx_page_order);
|
|
+ return;
|
|
+ }
|
|
/* dma address must be no more than 36 bits */
|
|
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
|
|
/* and also 256 byte aligned! */
|
|
@@ -450,8 +458,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
|
|
dma_map_page(trans->dev, rxb->page, 0,
|
|
PAGE_SIZE << hw_params(trans).rx_page_order,
|
|
DMA_FROM_DEVICE);
|
|
- list_add_tail(&rxb->list, &rxq->rx_free);
|
|
- rxq->free_count++;
|
|
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
|
|
+ /*
|
|
+ * free the page(s) as well to not break
|
|
+ * the invariant that the items on the used
|
|
+ * list have no page(s)
|
|
+ */
|
|
+ __free_pages(rxb->page, hw_params(trans).rx_page_order);
|
|
+ rxb->page = NULL;
|
|
+ list_add_tail(&rxb->list, &rxq->rx_used);
|
|
+ } else {
|
|
+ list_add_tail(&rxb->list, &rxq->rx_free);
|
|
+ rxq->free_count++;
|
|
+ }
|
|
} else
|
|
list_add_tail(&rxb->list, &rxq->rx_used);
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
@@ -695,7 +714,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
|
|
|
|
iwl_dump_nic_error_log(trans);
|
|
iwl_dump_csr(trans);
|
|
- iwl_dump_fh(trans, NULL, false);
|
|
+ iwl_dump_fh(trans, NULL);
|
|
iwl_dump_nic_event_log(trans, false, NULL, false);
|
|
|
|
iwl_op_mode_nic_error(trans->op_mode);
|
|
@@ -1264,12 +1283,20 @@ static irqreturn_t iwl_isr(int irq, void *data)
|
|
* back-to-back ISRs and sporadic interrupts from our NIC.
|
|
* If we have something to service, the tasklet will re-enable ints.
|
|
* If we *don't* have something, we'll re-enable before leaving here. */
|
|
- inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
|
|
+ inta_mask = iwl_read32(trans, CSR_INT_MASK);
|
|
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
|
|
|
|
/* Discover which interrupts are active/pending */
|
|
inta = iwl_read32(trans, CSR_INT);
|
|
|
|
+ if (inta & (~inta_mask)) {
|
|
+ IWL_DEBUG_ISR(trans,
|
|
+ "We got a masked interrupt (0x%08x)...Ack and ignore\n",
|
|
+ inta & (~inta_mask));
|
|
+ iwl_write32(trans, CSR_INT, inta & (~inta_mask));
|
|
+ inta &= inta_mask;
|
|
+ }
|
|
+
|
|
/* Ignore interrupt if there's nothing in NIC to service.
|
|
* This may be due to IRQ shared with another device,
|
|
* or due to sporadic interrupts thrown from our NIC. */
|
|
@@ -1353,7 +1380,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
|
|
* If we have something to service, the tasklet will re-enable ints.
|
|
* If we *don't* have something, we'll re-enable before leaving here.
|
|
*/
|
|
- inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
|
|
+ inta_mask = iwl_read32(trans, CSR_INT_MASK);
|
|
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
|
|
|
|
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
|
|
index d7964b12ef11..91bad2f23842 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
|
|
@@ -677,10 +677,12 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
|
struct iwl_cmd_meta *out_meta;
|
|
dma_addr_t phys_addr;
|
|
u32 idx;
|
|
- u16 copy_size, cmd_size;
|
|
+ u16 copy_size, cmd_size, dma_size;
|
|
bool had_nocopy = false;
|
|
int i;
|
|
u8 *cmd_dest;
|
|
+ const u8 *cmddata[IWL_MAX_CMD_TFDS];
|
|
+ u16 cmdlen[IWL_MAX_CMD_TFDS];
|
|
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
|
|
const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
|
|
int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
|
|
@@ -699,15 +701,30 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
|
BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
|
|
|
|
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
|
+ cmddata[i] = cmd->data[i];
|
|
+ cmdlen[i] = cmd->len[i];
|
|
+
|
|
if (!cmd->len[i])
|
|
continue;
|
|
+
|
|
+ /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
|
|
+ if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
|
|
+ int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
|
|
+
|
|
+ if (copy > cmdlen[i])
|
|
+ copy = cmdlen[i];
|
|
+ cmdlen[i] -= copy;
|
|
+ cmddata[i] += copy;
|
|
+ copy_size += copy;
|
|
+ }
|
|
+
|
|
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
|
|
had_nocopy = true;
|
|
} else {
|
|
/* NOCOPY must not be followed by normal! */
|
|
if (WARN_ON(had_nocopy))
|
|
return -EINVAL;
|
|
- copy_size += cmd->len[i];
|
|
+ copy_size += cmdlen[i];
|
|
}
|
|
cmd_size += cmd->len[i];
|
|
}
|
|
@@ -750,13 +767,30 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
|
/* and copy the data that needs to be copied */
|
|
|
|
cmd_dest = out_cmd->payload;
|
|
+ copy_size = sizeof(out_cmd->hdr);
|
|
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
|
- if (!cmd->len[i])
|
|
+ int copy = 0;
|
|
+
|
|
+ if (!cmd->len)
|
|
continue;
|
|
- if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
|
|
- break;
|
|
- memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
|
|
- cmd_dest += cmd->len[i];
|
|
+
|
|
+ /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
|
|
+ if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
|
|
+ copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
|
|
+
|
|
+ if (copy > cmd->len[i])
|
|
+ copy = cmd->len[i];
|
|
+ }
|
|
+
|
|
+ /* copy everything if not nocopy/dup */
|
|
+ if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
|
|
+ copy = cmd->len[i];
|
|
+
|
|
+ if (copy) {
|
|
+ memcpy(cmd_dest, cmd->data[i], copy);
|
|
+ cmd_dest += copy;
|
|
+ copy_size += copy;
|
|
+ }
|
|
}
|
|
|
|
IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
|
|
@@ -766,7 +800,14 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
|
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
|
|
q->write_ptr, idx, trans_pcie->cmd_queue);
|
|
|
|
- phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
|
|
+ /*
|
|
+ * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
|
|
+ * still map at least that many bytes for the hardware to write back to.
|
|
+ * We have enough space, so that's not a problem.
|
|
+ */
|
|
+ dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);
|
|
+
|
|
+ phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size,
|
|
DMA_BIDIRECTIONAL);
|
|
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
|
|
idx = -ENOMEM;
|
|
@@ -774,7 +815,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
|
}
|
|
|
|
dma_unmap_addr_set(out_meta, mapping, phys_addr);
|
|
- dma_unmap_len_set(out_meta, len, copy_size);
|
|
+ dma_unmap_len_set(out_meta, len, dma_size);
|
|
|
|
iwlagn_txq_attach_buf_to_tfd(trans, txq,
|
|
phys_addr, copy_size, 1);
|
|
@@ -801,10 +842,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
|
}
|
|
|
|
iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
|
|
- cmd->len[i], 0);
|
|
+ cmdlen[i], 0);
|
|
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
|
|
- trace_bufs[trace_idx] = cmd->data[i];
|
|
- trace_lens[trace_idx] = cmd->len[i];
|
|
+ trace_bufs[trace_idx] = cmddata[i];
|
|
+ trace_lens[trace_idx] = cmdlen[i];
|
|
trace_idx++;
|
|
#endif
|
|
}
|
|
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
|
|
index 8741048eb858..1780d3f06073 100644
|
|
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
|
|
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
|
|
@@ -1768,13 +1768,9 @@ static const char *get_fh_string(int cmd)
|
|
}
|
|
}
|
|
|
|
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
|
|
+int iwl_dump_fh(struct iwl_trans *trans, char **buf)
|
|
{
|
|
int i;
|
|
-#ifdef CONFIG_IWLWIFI_DEBUG
|
|
- int pos = 0;
|
|
- size_t bufsz = 0;
|
|
-#endif
|
|
static const u32 fh_tbl[] = {
|
|
FH_RSCSR_CHNL0_STTS_WPTR_REG,
|
|
FH_RSCSR_CHNL0_RBDCB_BASE_REG,
|
|
@@ -1786,29 +1782,34 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
|
|
FH_TSSR_TX_STATUS_REG,
|
|
FH_TSSR_TX_ERROR_REG
|
|
};
|
|
-#ifdef CONFIG_IWLWIFI_DEBUG
|
|
- if (display) {
|
|
+
|
|
+#ifdef CONFIG_IWLWIFI_DEBUGFS
|
|
+ if (buf) {
|
|
+ int pos = 0;
|
|
+ size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
|
|
+
|
|
bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
|
|
*buf = kmalloc(bufsz, GFP_KERNEL);
|
|
if (!*buf)
|
|
return -ENOMEM;
|
|
pos += scnprintf(*buf + pos, bufsz - pos,
|
|
"FH register values:\n");
|
|
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
|
|
pos += scnprintf(*buf + pos, bufsz - pos,
|
|
" %34s: 0X%08x\n",
|
|
get_fh_string(fh_tbl[i]),
|
|
iwl_read_direct32(trans, fh_tbl[i]));
|
|
- }
|
|
+
|
|
return pos;
|
|
}
|
|
#endif
|
|
IWL_ERR(trans, "FH register values:\n");
|
|
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
|
|
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
|
|
IWL_ERR(trans, " %34s: 0X%08x\n",
|
|
get_fh_string(fh_tbl[i]),
|
|
iwl_read_direct32(trans, fh_tbl[i]));
|
|
- }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -2152,11 +2153,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
struct iwl_trans *trans = file->private_data;
|
|
- char *buf;
|
|
+ char *buf = NULL;
|
|
int pos = 0;
|
|
ssize_t ret = -EFAULT;
|
|
|
|
- ret = pos = iwl_dump_fh(trans, &buf, true);
|
|
+ ret = pos = iwl_dump_fh(trans, &buf);
|
|
if (buf) {
|
|
ret = simple_read_from_buffer(user_buf,
|
|
count, ppos, buf, pos);
|
|
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
|
|
index e19a20a8e955..ecd1ac424047 100644
|
|
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
|
|
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
|
|
@@ -15,6 +15,8 @@
|
|
#ifndef RTL8187_H
|
|
#define RTL8187_H
|
|
|
|
+#include <linux/cache.h>
|
|
+
|
|
#include "rtl818x.h"
|
|
#include "leds.h"
|
|
|
|
@@ -139,7 +141,10 @@ struct rtl8187_priv {
|
|
u8 aifsn[4];
|
|
u8 rfkill_mask;
|
|
struct {
|
|
- __le64 buf;
|
|
+ union {
|
|
+ __le64 buf;
|
|
+ u8 dummy1[L1_CACHE_BYTES];
|
|
+ } ____cacheline_aligned;
|
|
struct sk_buff_head queue;
|
|
} b_tx_status; /* This queue is used by both -b and non-b devices */
|
|
struct mutex io_mutex;
|
|
@@ -147,7 +152,8 @@ struct rtl8187_priv {
|
|
u8 bits8;
|
|
__le16 bits16;
|
|
__le32 bits32;
|
|
- } *io_dmabuf;
|
|
+ u8 dummy2[L1_CACHE_BYTES];
|
|
+ } *io_dmabuf ____cacheline_aligned;
|
|
bool rfkill_off;
|
|
u16 seqno;
|
|
};
|
|
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
|
|
index 18d9eb3ad7af..b1757930aa64 100644
|
|
--- a/drivers/net/wireless/rtlwifi/base.c
|
|
+++ b/drivers/net/wireless/rtlwifi/base.c
|
|
@@ -37,6 +37,7 @@
|
|
|
|
#include <linux/ip.h>
|
|
#include <linux/module.h>
|
|
+#include <linux/udp.h>
|
|
|
|
/*
|
|
*NOTICE!!!: This file will be very big, we should
|
|
@@ -957,61 +958,51 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
|
|
if (!ieee80211_is_data(fc))
|
|
return false;
|
|
|
|
+ ip = (const struct iphdr *)(skb->data + mac_hdr_len +
|
|
+ SNAP_SIZE + PROTOC_TYPE_SIZE);
|
|
+ ether_type = be16_to_cpup((__be16 *)
|
|
+ (skb->data + mac_hdr_len + SNAP_SIZE));
|
|
|
|
- ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
|
|
- SNAP_SIZE + PROTOC_TYPE_SIZE);
|
|
- ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
|
|
- /* ether_type = ntohs(ether_type); */
|
|
-
|
|
- if (ETH_P_IP == ether_type) {
|
|
- if (IPPROTO_UDP == ip->protocol) {
|
|
- struct udphdr *udp = (struct udphdr *)((u8 *) ip +
|
|
- (ip->ihl << 2));
|
|
- if (((((u8 *) udp)[1] == 68) &&
|
|
- (((u8 *) udp)[3] == 67)) ||
|
|
- ((((u8 *) udp)[1] == 67) &&
|
|
- (((u8 *) udp)[3] == 68))) {
|
|
- /*
|
|
- * 68 : UDP BOOTP client
|
|
- * 67 : UDP BOOTP server
|
|
- */
|
|
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
|
|
- DBG_DMESG, "dhcp %s !!\n",
|
|
- is_tx ? "Tx" : "Rx");
|
|
-
|
|
- if (is_tx) {
|
|
- schedule_work(&rtlpriv->
|
|
- works.lps_leave_work);
|
|
- ppsc->last_delaylps_stamp_jiffies =
|
|
- jiffies;
|
|
- }
|
|
+ switch (ether_type) {
|
|
+ case ETH_P_IP: {
|
|
+ struct udphdr *udp;
|
|
+ u16 src;
|
|
+ u16 dst;
|
|
|
|
- return true;
|
|
- }
|
|
- }
|
|
- } else if (ETH_P_ARP == ether_type) {
|
|
- if (is_tx) {
|
|
- schedule_work(&rtlpriv->works.lps_leave_work);
|
|
- ppsc->last_delaylps_stamp_jiffies = jiffies;
|
|
- }
|
|
+ if (ip->protocol != IPPROTO_UDP)
|
|
+ return false;
|
|
+ udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
|
|
+ src = be16_to_cpu(udp->source);
|
|
+ dst = be16_to_cpu(udp->dest);
|
|
|
|
- return true;
|
|
- } else if (ETH_P_PAE == ether_type) {
|
|
+ /* If this case involves port 68 (UDP BOOTP client) connecting
|
|
+ * with port 67 (UDP BOOTP server), then return true so that
|
|
+ * the lowest speed is used.
|
|
+ */
|
|
+ if (!((src == 68 && dst == 67) || (src == 67 && dst == 68)))
|
|
+ return false;
|
|
+
|
|
+ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
|
|
+ "dhcp %s !!\n", is_tx ? "Tx" : "Rx");
|
|
+ break;
|
|
+ }
|
|
+ case ETH_P_ARP:
|
|
+ break;
|
|
+ case ETH_P_PAE:
|
|
RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
|
|
"802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
|
|
-
|
|
- if (is_tx) {
|
|
- schedule_work(&rtlpriv->works.lps_leave_work);
|
|
- ppsc->last_delaylps_stamp_jiffies = jiffies;
|
|
- }
|
|
-
|
|
- return true;
|
|
- } else if (ETH_P_IPV6 == ether_type) {
|
|
- /* IPv6 */
|
|
- return true;
|
|
+ break;
|
|
+ case ETH_P_IPV6:
|
|
+ /* TODO: Is this right? */
|
|
+ return false;
|
|
+ default:
|
|
+ return false;
|
|
}
|
|
-
|
|
- return false;
|
|
+ if (is_tx) {
|
|
+ schedule_work(&rtlpriv->works.lps_leave_work);
|
|
+ ppsc->last_delaylps_stamp_jiffies = jiffies;
|
|
+ }
|
|
+ return true;
|
|
}
|
|
|
|
/*********************************************************
|
|
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
|
|
index 5b9c3b5e8c92..7fe4f91e0e41 100644
|
|
--- a/drivers/net/wireless/rtlwifi/ps.c
|
|
+++ b/drivers/net/wireless/rtlwifi/ps.c
|
|
@@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
|
|
|
|
/*<2> Enable Adapter */
|
|
if (rtlpriv->cfg->ops->hw_init(hw))
|
|
- return 1;
|
|
+ return false;
|
|
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
|
|
|
|
/*<3> Enable Interrupt */
|
|
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
|
|
index 509f66195f2f..18ddf57fb863 100644
|
|
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
|
|
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
|
|
@@ -902,14 +902,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
|
|
bool is92c;
|
|
int err;
|
|
u8 tmp_u1b;
|
|
+ unsigned long flags;
|
|
|
|
rtlpci->being_init_adapter = true;
|
|
+
|
|
+ /* Since this function can take a very long time (up to 350 ms)
|
|
+ * and can be called with irqs disabled, reenable the irqs
|
|
+ * to let the other devices continue being serviced.
|
|
+ *
|
|
+ * It is safe doing so since our own interrupts will only be enabled
|
|
+ * in a subsequent step.
|
|
+ */
|
|
+ local_save_flags(flags);
|
|
+ local_irq_enable();
|
|
+
|
|
rtlpriv->intf_ops->disable_aspm(hw);
|
|
rtstatus = _rtl92ce_init_mac(hw);
|
|
if (!rtstatus) {
|
|
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
|
|
err = 1;
|
|
- return err;
|
|
+ goto exit;
|
|
}
|
|
|
|
err = rtl92c_download_fw(hw);
|
|
@@ -917,7 +929,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
|
|
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
|
|
"Failed to download FW. Init HW without FW now..\n");
|
|
err = 1;
|
|
- return err;
|
|
+ goto exit;
|
|
}
|
|
|
|
rtlhal->last_hmeboxnum = 0;
|
|
@@ -978,6 +990,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
|
|
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
|
|
}
|
|
rtl92c_dm_init(hw);
|
|
+exit:
|
|
+ local_irq_restore(flags);
|
|
rtlpci->being_init_adapter = false;
|
|
return err;
|
|
}
|
|
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
|
|
index 38c51ea483af..905fec4cad06 100644
|
|
--- a/drivers/net/wireless/rtlwifi/wifi.h
|
|
+++ b/drivers/net/wireless/rtlwifi/wifi.h
|
|
@@ -77,11 +77,7 @@
|
|
#define RTL_SLOT_TIME_9 9
|
|
#define RTL_SLOT_TIME_20 20
|
|
|
|
-/*related with tcp/ip. */
|
|
-/*if_ehther.h*/
|
|
-#define ETH_P_PAE 0x888E /*Port Access Entity (IEEE 802.1X) */
|
|
-#define ETH_P_IP 0x0800 /*Internet Protocol packet */
|
|
-#define ETH_P_ARP 0x0806 /*Address Resolution packet */
|
|
+/*related to tcp/ip. */
|
|
#define SNAP_SIZE 6
|
|
#define PROTOC_TYPE_SIZE 2
|
|
|
|
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
|
|
index d8ffbf84e3df..a3e5cdd1cf56 100644
|
|
--- a/drivers/net/xen-netback/netback.c
|
|
+++ b/drivers/net/xen-netback/netback.c
|
|
@@ -46,11 +46,25 @@
|
|
#include <asm/xen/hypercall.h>
|
|
#include <asm/xen/page.h>
|
|
|
|
+/*
|
|
+ * This is the maximum slots a skb can have. If a guest sends a skb
|
|
+ * which exceeds this limit it is considered malicious.
|
|
+ */
|
|
+#define MAX_SKB_SLOTS_DEFAULT 20
|
|
+static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT;
|
|
+module_param(max_skb_slots, uint, 0444);
|
|
+
|
|
+typedef unsigned int pending_ring_idx_t;
|
|
+#define INVALID_PENDING_RING_IDX (~0U)
|
|
+
|
|
struct pending_tx_info {
|
|
- struct xen_netif_tx_request req;
|
|
+ struct xen_netif_tx_request req; /* coalesced tx request */
|
|
struct xenvif *vif;
|
|
+ pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
|
|
+ * if it is head of one or more tx
|
|
+ * reqs
|
|
+ */
|
|
};
|
|
-typedef unsigned int pending_ring_idx_t;
|
|
|
|
struct netbk_rx_meta {
|
|
int id;
|
|
@@ -101,7 +115,11 @@ struct xen_netbk {
|
|
atomic_t netfront_count;
|
|
|
|
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
|
|
- struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
|
|
+ /* Coalescing tx requests before copying makes number of grant
|
|
+ * copy ops greater or equal to number of slots required. In
|
|
+ * worst case a tx request consumes 2 gnttab_copy.
|
|
+ */
|
|
+ struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
|
|
|
|
u16 pending_ring[MAX_PENDING_REQS];
|
|
|
|
@@ -117,6 +135,16 @@ struct xen_netbk {
|
|
static struct xen_netbk *xen_netbk;
|
|
static int xen_netbk_group_nr;
|
|
|
|
+/*
|
|
+ * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
|
|
+ * one or more merged tx requests, otherwise it is the continuation of
|
|
+ * previous tx request.
|
|
+ */
|
|
+static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
|
|
+{
|
|
+ return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
|
|
+}
|
|
+
|
|
void xen_netbk_add_xenvif(struct xenvif *vif)
|
|
{
|
|
int i;
|
|
@@ -249,6 +277,7 @@ static int max_required_rx_slots(struct xenvif *vif)
|
|
{
|
|
int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
|
|
|
|
+ /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
|
|
if (vif->can_sg || vif->gso || vif->gso_prefix)
|
|
max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
|
|
|
|
@@ -627,6 +656,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
|
|
__skb_queue_tail(&rxq, skb);
|
|
|
|
/* Filled the batch queue? */
|
|
+ /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
|
|
if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
|
|
break;
|
|
}
|
|
@@ -870,47 +900,88 @@ static void netbk_fatal_tx_err(struct xenvif *vif)
|
|
|
|
static int netbk_count_requests(struct xenvif *vif,
|
|
struct xen_netif_tx_request *first,
|
|
+ RING_IDX first_idx,
|
|
struct xen_netif_tx_request *txp,
|
|
int work_to_do)
|
|
{
|
|
RING_IDX cons = vif->tx.req_cons;
|
|
- int frags = 0;
|
|
+ int slots = 0;
|
|
+ int drop_err = 0;
|
|
|
|
if (!(first->flags & XEN_NETTXF_more_data))
|
|
return 0;
|
|
|
|
do {
|
|
- if (frags >= work_to_do) {
|
|
- netdev_err(vif->dev, "Need more frags\n");
|
|
+ if (slots >= work_to_do) {
|
|
+ netdev_err(vif->dev,
|
|
+ "Asked for %d slots but exceeds this limit\n",
|
|
+ work_to_do);
|
|
netbk_fatal_tx_err(vif);
|
|
return -ENODATA;
|
|
}
|
|
|
|
- if (unlikely(frags >= MAX_SKB_FRAGS)) {
|
|
- netdev_err(vif->dev, "Too many frags\n");
|
|
+ /* This guest is really using too many slots and
|
|
+ * considered malicious.
|
|
+ */
|
|
+ if (unlikely(slots >= max_skb_slots)) {
|
|
+ netdev_err(vif->dev,
|
|
+ "Malicious frontend using %d slots, threshold %u\n",
|
|
+ slots, max_skb_slots);
|
|
netbk_fatal_tx_err(vif);
|
|
return -E2BIG;
|
|
}
|
|
|
|
- memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
|
|
+ /* Xen network protocol had implicit dependency on
|
|
+ * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the
|
|
+ * historical MAX_SKB_FRAGS value 18 to honor the same
|
|
+ * behavior as before. Any packet using more than 18
|
|
+ * slots but less than max_skb_slots slots is dropped
|
|
+ */
|
|
+ if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) {
|
|
+ if (net_ratelimit())
|
|
+ netdev_dbg(vif->dev,
|
|
+ "Too many slots (%d) exceeding limit (%d), dropping packet\n",
|
|
+ slots, XEN_NETIF_NR_SLOTS_MIN);
|
|
+ drop_err = -E2BIG;
|
|
+ }
|
|
+
|
|
+ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
|
|
sizeof(*txp));
|
|
- if (txp->size > first->size) {
|
|
- netdev_err(vif->dev, "Frag is bigger than frame.\n");
|
|
- netbk_fatal_tx_err(vif);
|
|
- return -EIO;
|
|
+
|
|
+ /* If the guest submitted a frame >= 64 KiB then
|
|
+ * first->size overflowed and following slots will
|
|
+ * appear to be larger than the frame.
|
|
+ *
|
|
+ * This cannot be fatal error as there are buggy
|
|
+ * frontends that do this.
|
|
+ *
|
|
+ * Consume all slots and drop the packet.
|
|
+ */
|
|
+ if (!drop_err && txp->size > first->size) {
|
|
+ if (net_ratelimit())
|
|
+ netdev_dbg(vif->dev,
|
|
+ "Invalid tx request, slot size %u > remaining size %u\n",
|
|
+ txp->size, first->size);
|
|
+ drop_err = -EIO;
|
|
}
|
|
|
|
first->size -= txp->size;
|
|
- frags++;
|
|
+ slots++;
|
|
|
|
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
|
|
- netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
|
|
+ netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
|
|
txp->offset, txp->size);
|
|
netbk_fatal_tx_err(vif);
|
|
return -EINVAL;
|
|
}
|
|
} while ((txp++)->flags & XEN_NETTXF_more_data);
|
|
- return frags;
|
|
+
|
|
+ if (drop_err) {
|
|
+ netbk_tx_err(vif, first, first_idx + slots);
|
|
+ return drop_err;
|
|
+ }
|
|
+
|
|
+ return slots;
|
|
}
|
|
|
|
static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
|
|
@@ -934,48 +1005,114 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
|
|
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
skb_frag_t *frags = shinfo->frags;
|
|
u16 pending_idx = *((u16 *)skb->data);
|
|
- int i, start;
|
|
+ u16 head_idx = 0;
|
|
+ int slot, start;
|
|
+ struct page *page;
|
|
+ pending_ring_idx_t index, start_idx = 0;
|
|
+ uint16_t dst_offset;
|
|
+ unsigned int nr_slots;
|
|
+ struct pending_tx_info *first = NULL;
|
|
+
|
|
+ /* At this point shinfo->nr_frags is in fact the number of
|
|
+ * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN.
|
|
+ */
|
|
+ nr_slots = shinfo->nr_frags;
|
|
|
|
/* Skip first skb fragment if it is on same page as header fragment. */
|
|
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
|
|
|
|
- for (i = start; i < shinfo->nr_frags; i++, txp++) {
|
|
- struct page *page;
|
|
- pending_ring_idx_t index;
|
|
+ /* Coalesce tx requests, at this point the packet passed in
|
|
+ * should be <= 64K. Any packets larger than 64K have been
|
|
+ * handled in netbk_count_requests().
|
|
+ */
|
|
+ for (shinfo->nr_frags = slot = start; slot < nr_slots;
|
|
+ shinfo->nr_frags++) {
|
|
struct pending_tx_info *pending_tx_info =
|
|
netbk->pending_tx_info;
|
|
|
|
- index = pending_index(netbk->pending_cons++);
|
|
- pending_idx = netbk->pending_ring[index];
|
|
- page = xen_netbk_alloc_page(netbk, pending_idx);
|
|
+ page = alloc_page(GFP_KERNEL|__GFP_COLD);
|
|
if (!page)
|
|
goto err;
|
|
|
|
- gop->source.u.ref = txp->gref;
|
|
- gop->source.domid = vif->domid;
|
|
- gop->source.offset = txp->offset;
|
|
-
|
|
- gop->dest.u.gmfn = virt_to_mfn(page_address(page));
|
|
- gop->dest.domid = DOMID_SELF;
|
|
- gop->dest.offset = txp->offset;
|
|
-
|
|
- gop->len = txp->size;
|
|
- gop->flags = GNTCOPY_source_gref;
|
|
+ dst_offset = 0;
|
|
+ first = NULL;
|
|
+ while (dst_offset < PAGE_SIZE && slot < nr_slots) {
|
|
+ gop->flags = GNTCOPY_source_gref;
|
|
+
|
|
+ gop->source.u.ref = txp->gref;
|
|
+ gop->source.domid = vif->domid;
|
|
+ gop->source.offset = txp->offset;
|
|
+
|
|
+ gop->dest.domid = DOMID_SELF;
|
|
+
|
|
+ gop->dest.offset = dst_offset;
|
|
+ gop->dest.u.gmfn = virt_to_mfn(page_address(page));
|
|
+
|
|
+ if (dst_offset + txp->size > PAGE_SIZE) {
|
|
+ /* This page can only merge a portion
|
|
+ * of tx request. Do not increment any
|
|
+ * pointer / counter here. The txp
|
|
+ * will be dealt with in future
|
|
+ * rounds, eventually hitting the
|
|
+ * `else` branch.
|
|
+ */
|
|
+ gop->len = PAGE_SIZE - dst_offset;
|
|
+ txp->offset += gop->len;
|
|
+ txp->size -= gop->len;
|
|
+ dst_offset += gop->len; /* quit loop */
|
|
+ } else {
|
|
+ /* This tx request can be merged in the page */
|
|
+ gop->len = txp->size;
|
|
+ dst_offset += gop->len;
|
|
+
|
|
+ index = pending_index(netbk->pending_cons++);
|
|
+
|
|
+ pending_idx = netbk->pending_ring[index];
|
|
+
|
|
+ memcpy(&pending_tx_info[pending_idx].req, txp,
|
|
+ sizeof(*txp));
|
|
+ xenvif_get(vif);
|
|
+
|
|
+ pending_tx_info[pending_idx].vif = vif;
|
|
+
|
|
+ /* Poison these fields, corresponding
|
|
+ * fields for head tx req will be set
|
|
+ * to correct values after the loop.
|
|
+ */
|
|
+ netbk->mmap_pages[pending_idx] = (void *)(~0UL);
|
|
+ pending_tx_info[pending_idx].head =
|
|
+ INVALID_PENDING_RING_IDX;
|
|
+
|
|
+ if (!first) {
|
|
+ first = &pending_tx_info[pending_idx];
|
|
+ start_idx = index;
|
|
+ head_idx = pending_idx;
|
|
+ }
|
|
+
|
|
+ txp++;
|
|
+ slot++;
|
|
+ }
|
|
|
|
- gop++;
|
|
+ gop++;
|
|
+ }
|
|
|
|
- memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
|
|
- xenvif_get(vif);
|
|
- pending_tx_info[pending_idx].vif = vif;
|
|
- frag_set_pending_idx(&frags[i], pending_idx);
|
|
+ first->req.offset = 0;
|
|
+ first->req.size = dst_offset;
|
|
+ first->head = start_idx;
|
|
+ set_page_ext(page, netbk, head_idx);
|
|
+ netbk->mmap_pages[head_idx] = page;
|
|
+ frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
|
|
}
|
|
|
|
+ BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
|
|
+
|
|
return gop;
|
|
err:
|
|
/* Unwind, freeing all pages and sending error responses. */
|
|
- while (i-- > start) {
|
|
- xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
|
|
- XEN_NETIF_RSP_ERROR);
|
|
+ while (shinfo->nr_frags-- > start) {
|
|
+ xen_netbk_idx_release(netbk,
|
|
+ frag_get_pending_idx(&frags[shinfo->nr_frags]),
|
|
+ XEN_NETIF_RSP_ERROR);
|
|
}
|
|
/* The head too, if necessary. */
|
|
if (start)
|
|
@@ -991,8 +1128,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
|
|
struct gnttab_copy *gop = *gopp;
|
|
u16 pending_idx = *((u16 *)skb->data);
|
|
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
+ struct pending_tx_info *tx_info;
|
|
int nr_frags = shinfo->nr_frags;
|
|
int i, err, start;
|
|
+ u16 peek; /* peek into next tx request */
|
|
|
|
/* Check status of header. */
|
|
err = gop->status;
|
|
@@ -1004,11 +1143,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
|
|
|
|
for (i = start; i < nr_frags; i++) {
|
|
int j, newerr;
|
|
+ pending_ring_idx_t head;
|
|
|
|
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
|
|
+ tx_info = &netbk->pending_tx_info[pending_idx];
|
|
+ head = tx_info->head;
|
|
|
|
/* Check error status: if okay then remember grant handle. */
|
|
- newerr = (++gop)->status;
|
|
+ do {
|
|
+ newerr = (++gop)->status;
|
|
+ if (newerr)
|
|
+ break;
|
|
+ peek = netbk->pending_ring[pending_index(++head)];
|
|
+ } while (!pending_tx_is_head(netbk, peek));
|
|
+
|
|
if (likely(!newerr)) {
|
|
/* Had a previous error? Invalidate this fragment. */
|
|
if (unlikely(err))
|
|
@@ -1233,11 +1381,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
|
struct sk_buff *skb;
|
|
int ret;
|
|
|
|
- while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
|
|
+ while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
|
|
+ < MAX_PENDING_REQS) &&
|
|
!list_empty(&netbk->net_schedule_list)) {
|
|
struct xenvif *vif;
|
|
struct xen_netif_tx_request txreq;
|
|
- struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
|
|
+ struct xen_netif_tx_request txfrags[max_skb_slots];
|
|
struct page *page;
|
|
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
|
|
u16 pending_idx;
|
|
@@ -1298,7 +1447,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
|
continue;
|
|
}
|
|
|
|
- ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
|
|
+ ret = netbk_count_requests(vif, &txreq, idx,
|
|
+ txfrags, work_to_do);
|
|
if (unlikely(ret < 0))
|
|
continue;
|
|
|
|
@@ -1325,7 +1475,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
|
pending_idx = netbk->pending_ring[index];
|
|
|
|
data_len = (txreq.size > PKT_PROT_LEN &&
|
|
- ret < MAX_SKB_FRAGS) ?
|
|
+ ret < XEN_NETIF_NR_SLOTS_MIN) ?
|
|
PKT_PROT_LEN : txreq.size;
|
|
|
|
skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
|
|
@@ -1375,6 +1525,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
|
memcpy(&netbk->pending_tx_info[pending_idx].req,
|
|
&txreq, sizeof(txreq));
|
|
netbk->pending_tx_info[pending_idx].vif = vif;
|
|
+ netbk->pending_tx_info[pending_idx].head = index;
|
|
*((u16 *)skb->data) = pending_idx;
|
|
|
|
__skb_put(skb, data_len);
|
|
@@ -1505,7 +1656,10 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
|
|
{
|
|
struct xenvif *vif;
|
|
struct pending_tx_info *pending_tx_info;
|
|
- pending_ring_idx_t index;
|
|
+ pending_ring_idx_t head;
|
|
+ u16 peek; /* peek into next tx request */
|
|
+
|
|
+ BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
|
|
|
|
/* Already complete? */
|
|
if (netbk->mmap_pages[pending_idx] == NULL)
|
|
@@ -1514,19 +1668,40 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
|
|
pending_tx_info = &netbk->pending_tx_info[pending_idx];
|
|
|
|
vif = pending_tx_info->vif;
|
|
+ head = pending_tx_info->head;
|
|
|
|
- make_tx_response(vif, &pending_tx_info->req, status);
|
|
+ BUG_ON(!pending_tx_is_head(netbk, head));
|
|
+ BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
|
|
|
|
- index = pending_index(netbk->pending_prod++);
|
|
- netbk->pending_ring[index] = pending_idx;
|
|
+ do {
|
|
+ pending_ring_idx_t index;
|
|
+ pending_ring_idx_t idx = pending_index(head);
|
|
+ u16 info_idx = netbk->pending_ring[idx];
|
|
|
|
- xenvif_put(vif);
|
|
+ pending_tx_info = &netbk->pending_tx_info[info_idx];
|
|
+ make_tx_response(vif, &pending_tx_info->req, status);
|
|
+
|
|
+ /* Setting any number other than
|
|
+ * INVALID_PENDING_RING_IDX indicates this slot is
|
|
+ * starting a new packet / ending a previous packet.
|
|
+ */
|
|
+ pending_tx_info->head = 0;
|
|
+
|
|
+ index = pending_index(netbk->pending_prod++);
|
|
+ netbk->pending_ring[index] = netbk->pending_ring[info_idx];
|
|
+
|
|
+ xenvif_put(vif);
|
|
+
|
|
+ peek = netbk->pending_ring[pending_index(++head)];
|
|
+
|
|
+ } while (!pending_tx_is_head(netbk, peek));
|
|
|
|
netbk->mmap_pages[pending_idx]->mapping = 0;
|
|
put_page(netbk->mmap_pages[pending_idx]);
|
|
netbk->mmap_pages[pending_idx] = NULL;
|
|
}
|
|
|
|
+
|
|
static void make_tx_response(struct xenvif *vif,
|
|
struct xen_netif_tx_request *txp,
|
|
s8 st)
|
|
@@ -1579,8 +1754,9 @@ static inline int rx_work_todo(struct xen_netbk *netbk)
|
|
static inline int tx_work_todo(struct xen_netbk *netbk)
|
|
{
|
|
|
|
- if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
|
|
- !list_empty(&netbk->net_schedule_list))
|
|
+ if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
|
|
+ < MAX_PENDING_REQS) &&
|
|
+ !list_empty(&netbk->net_schedule_list))
|
|
return 1;
|
|
|
|
return 0;
|
|
@@ -1663,6 +1839,13 @@ static int __init netback_init(void)
|
|
if (!xen_domain())
|
|
return -ENODEV;
|
|
|
|
+ if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) {
|
|
+ printk(KERN_INFO
|
|
+ "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n",
|
|
+ max_skb_slots, XEN_NETIF_NR_SLOTS_MIN);
|
|
+ max_skb_slots = XEN_NETIF_NR_SLOTS_MIN;
|
|
+ }
|
|
+
|
|
xen_netbk_group_nr = num_online_cpus();
|
|
xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
|
|
if (!xen_netbk)
|
|
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
|
|
index 90d2d4475cb4..3f71e9ea051a 100644
|
|
--- a/drivers/staging/line6/pcm.c
|
|
+++ b/drivers/staging/line6/pcm.c
|
|
@@ -378,8 +378,11 @@ static int snd_line6_pcm_free(struct snd_device *device)
|
|
*/
|
|
static void pcm_disconnect_substream(struct snd_pcm_substream *substream)
|
|
{
|
|
- if (substream->runtime && snd_pcm_running(substream))
|
|
+ if (substream->runtime && snd_pcm_running(substream)) {
|
|
+ snd_pcm_stream_lock_irq(substream);
|
|
snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
|
|
+ snd_pcm_stream_unlock_irq(substream);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
|
|
index 84bd4593455e..907135c0d161 100644
|
|
--- a/drivers/usb/serial/ftdi_sio.c
|
|
+++ b/drivers/usb/serial/ftdi_sio.c
|
|
@@ -920,6 +920,8 @@ static struct usb_device_id id_table_combined [] = {
|
|
/* Crucible Devices */
|
|
{ USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
|
|
{ USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
|
|
+ /* Cressi Devices */
|
|
+ { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
|
|
{ }, /* Optional parameter entry */
|
|
{ } /* Terminating entry */
|
|
};
|
|
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
|
|
index 1e2d369df86e..e599fbfcde5f 100644
|
|
--- a/drivers/usb/serial/ftdi_sio_ids.h
|
|
+++ b/drivers/usb/serial/ftdi_sio_ids.h
|
|
@@ -1320,3 +1320,9 @@
|
|
* Manufacturer: Smart GSM Team
|
|
*/
|
|
#define FTDI_Z3X_PID 0x0011
|
|
+
|
|
+/*
|
|
+ * Product: Cressi PC Interface
|
|
+ * Manufacturer: Cressi
|
|
+ */
|
|
+#define FTDI_CRESSI_PID 0x87d0
|
|
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
|
|
index abfb45b3940a..8b5c8e5d78d8 100644
|
|
--- a/drivers/usb/serial/option.c
|
|
+++ b/drivers/usb/serial/option.c
|
|
@@ -1546,7 +1546,8 @@ static const struct usb_device_id option_ids[] = {
|
|
/* Cinterion */
|
|
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
|
|
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
|
|
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
|
|
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
|
|
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
|
|
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
|
|
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
|
|
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
|
|
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
|
|
index 33dcad6371f5..9161f06564eb 100644
|
|
--- a/drivers/xen/events.c
|
|
+++ b/drivers/xen/events.c
|
|
@@ -1422,8 +1422,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
|
|
/* Rebind an evtchn so that it gets delivered to a specific cpu */
|
|
static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
|
|
{
|
|
+ struct shared_info *s = HYPERVISOR_shared_info;
|
|
struct evtchn_bind_vcpu bind_vcpu;
|
|
int evtchn = evtchn_from_irq(irq);
|
|
+ int masked;
|
|
|
|
if (!VALID_EVTCHN(evtchn))
|
|
return -1;
|
|
@@ -1440,6 +1442,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
|
|
bind_vcpu.vcpu = tcpu;
|
|
|
|
/*
|
|
+ * Mask the event while changing the VCPU binding to prevent
|
|
+ * it being delivered on an unexpected VCPU.
|
|
+ */
|
|
+ masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
|
|
+
|
|
+ /*
|
|
* If this fails, it usually just indicates that we're dealing with a
|
|
* virq or IPI channel, which don't actually need to be rebound. Ignore
|
|
* it, but don't do the xenlinux-level rebind in that case.
|
|
@@ -1447,6 +1455,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
|
|
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
|
|
bind_evtchn_to_cpu(evtchn, tcpu);
|
|
|
|
+ if (!masked)
|
|
+ unmask_evtchn(evtchn);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
|
|
index e7ebb5a2ed1c..9ace37521510 100644
|
|
--- a/fs/cifs/file.c
|
|
+++ b/fs/cifs/file.c
|
|
@@ -2185,7 +2185,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
|
|
unsigned long nr_segs, loff_t *poffset)
|
|
{
|
|
unsigned long nr_pages, i;
|
|
- size_t copied, len, cur_len;
|
|
+ size_t bytes, copied, len, cur_len;
|
|
ssize_t total_written = 0;
|
|
loff_t offset;
|
|
struct iov_iter it;
|
|
@@ -2236,14 +2236,45 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
|
|
|
|
save_len = cur_len;
|
|
for (i = 0; i < nr_pages; i++) {
|
|
- copied = min_t(const size_t, cur_len, PAGE_SIZE);
|
|
+ bytes = min_t(const size_t, cur_len, PAGE_SIZE);
|
|
copied = iov_iter_copy_from_user(wdata->pages[i], &it,
|
|
- 0, copied);
|
|
+ 0, bytes);
|
|
cur_len -= copied;
|
|
iov_iter_advance(&it, copied);
|
|
+ /*
|
|
+ * If we didn't copy as much as we expected, then that
|
|
+ * may mean we trod into an unmapped area. Stop copying
|
|
+ * at that point. On the next pass through the big
|
|
+ * loop, we'll likely end up getting a zero-length
|
|
+ * write and bailing out of it.
|
|
+ */
|
|
+ if (copied < bytes)
|
|
+ break;
|
|
}
|
|
cur_len = save_len - cur_len;
|
|
|
|
+ /*
|
|
+ * If we have no data to send, then that probably means that
|
|
+ * the copy above failed altogether. That's most likely because
|
|
+ * the address in the iovec was bogus. Set the rc to -EFAULT,
|
|
+ * free anything we allocated and bail out.
|
|
+ */
|
|
+ if (!cur_len) {
|
|
+ for (i = 0; i < nr_pages; i++)
|
|
+ put_page(wdata->pages[i]);
|
|
+ kfree(wdata);
|
|
+ rc = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * i + 1 now represents the number of pages we actually used in
|
|
+ * the copy phase above. Bring nr_pages down to that, and free
|
|
+ * any pages that we didn't use.
|
|
+ */
|
|
+ for ( ; nr_pages > i + 1; nr_pages--)
|
|
+ put_page(wdata->pages[nr_pages - 1]);
|
|
+
|
|
wdata->sync_mode = WB_SYNC_ALL;
|
|
wdata->nr_pages = nr_pages;
|
|
wdata->offset = (__u64)offset;
|
|
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
|
|
index a4217f02fab2..6406ac902de9 100644
|
|
--- a/fs/cifs/readdir.c
|
|
+++ b/fs/cifs/readdir.c
|
|
@@ -96,6 +96,14 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
|
|
dput(dentry);
|
|
}
|
|
|
|
+ /*
|
|
+ * If we know that the inode will need to be revalidated immediately,
|
|
+ * then don't create a new dentry for it. We'll end up doing an on
|
|
+ * the wire call either way and this spares us an invalidation.
|
|
+ */
|
|
+ if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
|
|
+ return NULL;
|
|
+
|
|
dentry = d_alloc(parent, name);
|
|
if (dentry == NULL)
|
|
return NULL;
|
|
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
|
|
index 0961336513d5..103e56ceb38d 100644
|
|
--- a/fs/cifs/transport.c
|
|
+++ b/fs/cifs/transport.c
|
|
@@ -511,6 +511,13 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
|
|
mutex_unlock(&server->srv_mutex);
|
|
return rc;
|
|
}
|
|
+
|
|
+ /*
|
|
+ * The response to this call was already factored into the sequence
|
|
+ * number when the call went out, so we must adjust it back downward
|
|
+ * after signing here.
|
|
+ */
|
|
+ --server->sequence_number;
|
|
rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
|
|
mutex_unlock(&server->srv_mutex);
|
|
|
|
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
|
|
index d918b55f009c..29224866f743 100644
|
|
--- a/fs/ext4/ext4.h
|
|
+++ b/fs/ext4/ext4.h
|
|
@@ -751,6 +751,8 @@ do { \
|
|
if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
|
|
(einode)->xtime.tv_sec = \
|
|
(signed)le32_to_cpu((raw_inode)->xtime); \
|
|
+ else \
|
|
+ (einode)->xtime.tv_sec = 0; \
|
|
if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
|
|
ext4_decode_extra_time(&(einode)->xtime, \
|
|
raw_inode->xtime ## _extra); \
|
|
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
|
|
index c883561e9b61..4296a6f800a0 100644
|
|
--- a/fs/ext4/extents.c
|
|
+++ b/fs/ext4/extents.c
|
|
@@ -670,6 +670,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
|
|
struct ext4_extent_header *eh;
|
|
struct buffer_head *bh;
|
|
short int depth, i, ppos = 0, alloc = 0;
|
|
+ int ret;
|
|
|
|
eh = ext_inode_hdr(inode);
|
|
depth = ext_depth(inode);
|
|
@@ -699,12 +700,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
|
|
path[ppos].p_ext = NULL;
|
|
|
|
bh = sb_getblk(inode->i_sb, path[ppos].p_block);
|
|
- if (unlikely(!bh))
|
|
+ if (unlikely(!bh)) {
|
|
+ ret = -ENOMEM;
|
|
goto err;
|
|
+ }
|
|
if (!bh_uptodate_or_lock(bh)) {
|
|
trace_ext4_ext_load_extent(inode, block,
|
|
path[ppos].p_block);
|
|
- if (bh_submit_read(bh) < 0) {
|
|
+ ret = bh_submit_read(bh);
|
|
+ if (ret < 0) {
|
|
put_bh(bh);
|
|
goto err;
|
|
}
|
|
@@ -717,13 +721,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
|
|
put_bh(bh);
|
|
EXT4_ERROR_INODE(inode,
|
|
"ppos %d > depth %d", ppos, depth);
|
|
+ ret = -EIO;
|
|
goto err;
|
|
}
|
|
path[ppos].p_bh = bh;
|
|
path[ppos].p_hdr = eh;
|
|
i--;
|
|
|
|
- if (need_to_validate && ext4_ext_check(inode, eh, i))
|
|
+ ret = need_to_validate ? ext4_ext_check(inode, eh, i) : 0;
|
|
+ if (ret < 0)
|
|
goto err;
|
|
}
|
|
|
|
@@ -745,7 +751,7 @@ err:
|
|
ext4_ext_drop_refs(path);
|
|
if (alloc)
|
|
kfree(path);
|
|
- return ERR_PTR(-EIO);
|
|
+ return ERR_PTR(ret);
|
|
}
|
|
|
|
/*
|
|
@@ -900,7 +906,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
|
|
}
|
|
bh = sb_getblk(inode->i_sb, newblock);
|
|
if (!bh) {
|
|
- err = -EIO;
|
|
+ err = -ENOMEM;
|
|
goto cleanup;
|
|
}
|
|
lock_buffer(bh);
|
|
@@ -972,7 +978,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
|
|
newblock = ablocks[--a];
|
|
bh = sb_getblk(inode->i_sb, newblock);
|
|
if (!bh) {
|
|
- err = -EIO;
|
|
+ err = -ENOMEM;
|
|
goto cleanup;
|
|
}
|
|
lock_buffer(bh);
|
|
@@ -1083,11 +1089,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
|
|
return err;
|
|
|
|
bh = sb_getblk(inode->i_sb, newblock);
|
|
- if (!bh) {
|
|
- err = -EIO;
|
|
- ext4_std_error(inode->i_sb, err);
|
|
- return err;
|
|
- }
|
|
+ if (!bh)
|
|
+ return -ENOMEM;
|
|
lock_buffer(bh);
|
|
|
|
err = ext4_journal_get_create_access(handle, bh);
|
|
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
|
|
index bb6c7d811313..a8d03a4d3f8b 100644
|
|
--- a/fs/ext4/fsync.c
|
|
+++ b/fs/ext4/fsync.c
|
|
@@ -260,8 +260,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|
if (journal->j_flags & JBD2_BARRIER &&
|
|
!jbd2_trans_will_send_data_barrier(journal, commit_tid))
|
|
needs_barrier = true;
|
|
- jbd2_log_start_commit(journal, commit_tid);
|
|
- ret = jbd2_log_wait_commit(journal, commit_tid);
|
|
+ ret = jbd2_complete_transaction(journal, commit_tid);
|
|
if (needs_barrier)
|
|
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
|
|
out:
|
|
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
|
|
index 830e1b2bf145..6dc6153dc462 100644
|
|
--- a/fs/ext4/indirect.c
|
|
+++ b/fs/ext4/indirect.c
|
|
@@ -145,6 +145,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
|
|
struct super_block *sb = inode->i_sb;
|
|
Indirect *p = chain;
|
|
struct buffer_head *bh;
|
|
+ int ret = -EIO;
|
|
|
|
*err = 0;
|
|
/* i_data is not going away, no lock needed */
|
|
@@ -153,8 +154,10 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
|
|
goto no_block;
|
|
while (--depth) {
|
|
bh = sb_getblk(sb, le32_to_cpu(p->key));
|
|
- if (unlikely(!bh))
|
|
+ if (unlikely(!bh)) {
|
|
+ ret = -ENOMEM;
|
|
goto failure;
|
|
+ }
|
|
|
|
if (!bh_uptodate_or_lock(bh)) {
|
|
if (bh_submit_read(bh) < 0) {
|
|
@@ -176,7 +179,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
|
|
return NULL;
|
|
|
|
failure:
|
|
- *err = -EIO;
|
|
+ *err = ret;
|
|
no_block:
|
|
return p;
|
|
}
|
|
@@ -470,7 +473,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
|
|
*/
|
|
bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
|
|
if (unlikely(!bh)) {
|
|
- err = -EIO;
|
|
+ err = -ENOMEM;
|
|
goto failed;
|
|
}
|
|
|
|
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
|
|
index 98bff01eed6e..25264c4d390f 100644
|
|
--- a/fs/ext4/inode.c
|
|
+++ b/fs/ext4/inode.c
|
|
@@ -149,8 +149,7 @@ void ext4_evict_inode(struct inode *inode)
|
|
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
|
|
tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
|
|
|
|
- jbd2_log_start_commit(journal, commit_tid);
|
|
- jbd2_log_wait_commit(journal, commit_tid);
|
|
+ jbd2_complete_transaction(journal, commit_tid);
|
|
filemap_write_and_wait(&inode->i_data);
|
|
}
|
|
truncate_inode_pages(&inode->i_data, 0);
|
|
@@ -664,7 +663,7 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
|
|
|
|
bh = sb_getblk(inode->i_sb, map.m_pblk);
|
|
if (!bh) {
|
|
- *errp = -EIO;
|
|
+ *errp = -ENOMEM;
|
|
return NULL;
|
|
}
|
|
if (map.m_flags & EXT4_MAP_NEW) {
|
|
@@ -2801,9 +2800,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
|
|
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
|
|
ext4_free_io_end(io_end);
|
|
out:
|
|
+ inode_dio_done(inode);
|
|
if (is_async)
|
|
aio_complete(iocb, ret, 0);
|
|
- inode_dio_done(inode);
|
|
return;
|
|
}
|
|
|
|
@@ -3462,11 +3461,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
|
|
iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
|
|
|
|
bh = sb_getblk(sb, block);
|
|
- if (!bh) {
|
|
- EXT4_ERROR_INODE_BLOCK(inode, block,
|
|
- "unable to read itable block");
|
|
- return -EIO;
|
|
- }
|
|
+ if (!bh)
|
|
+ return -ENOMEM;
|
|
if (!buffer_uptodate(bh)) {
|
|
lock_buffer(bh);
|
|
|
|
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
|
|
index ed6548d89165..444b7016d32d 100644
|
|
--- a/fs/ext4/mmp.c
|
|
+++ b/fs/ext4/mmp.c
|
|
@@ -41,6 +41,8 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
|
|
* is not blocked in the elevator. */
|
|
if (!*bh)
|
|
*bh = sb_getblk(sb, mmp_block);
|
|
+ if (!*bh)
|
|
+ return -ENOMEM;
|
|
if (*bh) {
|
|
get_bh(*bh);
|
|
lock_buffer(*bh);
|
|
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
|
|
index dcdeef169a69..6822ad70b55c 100644
|
|
--- a/fs/ext4/page-io.c
|
|
+++ b/fs/ext4/page-io.c
|
|
@@ -107,14 +107,13 @@ int ext4_end_io_nolock(ext4_io_end_t *io)
|
|
inode->i_ino, offset, size, ret);
|
|
}
|
|
|
|
- if (io->iocb)
|
|
- aio_complete(io->iocb, io->result, 0);
|
|
-
|
|
- if (io->flag & EXT4_IO_END_DIRECT)
|
|
- inode_dio_done(inode);
|
|
/* Wake up anyone waiting on unwritten extent conversion */
|
|
if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
|
|
wake_up_all(ext4_ioend_wq(io->inode));
|
|
+ if (io->flag & EXT4_IO_END_DIRECT)
|
|
+ inode_dio_done(inode);
|
|
+ if (io->iocb)
|
|
+ aio_complete(io->iocb, io->result, 0);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
|
|
index 86a8c88dc29a..50992c3025c2 100644
|
|
--- a/fs/ext4/resize.c
|
|
+++ b/fs/ext4/resize.c
|
|
@@ -315,7 +315,7 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
|
|
|
|
bh = sb_getblk(sb, blk);
|
|
if (!bh)
|
|
- return ERR_PTR(-EIO);
|
|
+ return ERR_PTR(-ENOMEM);
|
|
if ((err = ext4_journal_get_write_access(handle, bh))) {
|
|
brelse(bh);
|
|
bh = ERR_PTR(err);
|
|
@@ -377,7 +377,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
|
|
start = ext4_group_first_block_no(sb, group);
|
|
group -= flex_gd->groups[0].group;
|
|
|
|
- count2 = sb->s_blocksize * 8 - (block - start);
|
|
+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
|
|
if (count2 > count)
|
|
count2 = count;
|
|
|
|
@@ -392,7 +392,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
|
|
|
|
bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
|
|
if (!bh)
|
|
- return -EIO;
|
|
+ return -ENOMEM;
|
|
|
|
err = ext4_journal_get_write_access(handle, bh);
|
|
if (err)
|
|
@@ -470,7 +470,7 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
|
|
|
|
gdb = sb_getblk(sb, block);
|
|
if (!gdb) {
|
|
- err = -EIO;
|
|
+ err = -ENOMEM;
|
|
goto out;
|
|
}
|
|
|
|
@@ -991,7 +991,7 @@ static void update_backups(struct super_block *sb,
|
|
|
|
bh = sb_getblk(sb, group * bpg + blk_off);
|
|
if (!bh) {
|
|
- err = -EIO;
|
|
+ err = -ENOMEM;
|
|
break;
|
|
}
|
|
ext4_debug("update metadata backup %#04lx\n",
|
|
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
|
|
index 8bbb14c5f774..0b0c03c878e5 100644
|
|
--- a/fs/ext4/super.c
|
|
+++ b/fs/ext4/super.c
|
|
@@ -3370,16 +3370,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|
for (i = 0; i < 4; i++)
|
|
sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
|
|
sbi->s_def_hash_version = es->s_def_hash_version;
|
|
- i = le32_to_cpu(es->s_flags);
|
|
- if (i & EXT2_FLAGS_UNSIGNED_HASH)
|
|
- sbi->s_hash_unsigned = 3;
|
|
- else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
|
|
+ if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
|
|
+ i = le32_to_cpu(es->s_flags);
|
|
+ if (i & EXT2_FLAGS_UNSIGNED_HASH)
|
|
+ sbi->s_hash_unsigned = 3;
|
|
+ else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
|
|
#ifdef __CHAR_UNSIGNED__
|
|
- es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
|
|
- sbi->s_hash_unsigned = 3;
|
|
+ if (!(sb->s_flags & MS_RDONLY))
|
|
+ es->s_flags |=
|
|
+ cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
|
|
+ sbi->s_hash_unsigned = 3;
|
|
#else
|
|
- es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
|
|
+ if (!(sb->s_flags & MS_RDONLY))
|
|
+ es->s_flags |=
|
|
+ cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
|
|
#endif
|
|
+ }
|
|
}
|
|
|
|
/* Handle clustersize */
|
|
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
|
|
index 01f2cf3409fb..5743e9db8027 100644
|
|
--- a/fs/ext4/xattr.c
|
|
+++ b/fs/ext4/xattr.c
|
|
@@ -840,16 +840,17 @@ inserted:
|
|
|
|
new_bh = sb_getblk(sb, block);
|
|
if (!new_bh) {
|
|
+ error = -ENOMEM;
|
|
getblk_failed:
|
|
ext4_free_blocks(handle, inode, NULL, block, 1,
|
|
EXT4_FREE_BLOCKS_METADATA);
|
|
- error = -EIO;
|
|
goto cleanup;
|
|
}
|
|
lock_buffer(new_bh);
|
|
error = ext4_journal_get_create_access(handle, new_bh);
|
|
if (error) {
|
|
unlock_buffer(new_bh);
|
|
+ error = -EIO;
|
|
goto getblk_failed;
|
|
}
|
|
memcpy(new_bh->b_data, s->base, new_bh->b_size);
|
|
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
|
|
index 373b25145e01..f31c1365a013 100644
|
|
--- a/fs/fuse/dir.c
|
|
+++ b/fs/fuse/dir.c
|
|
@@ -1103,6 +1103,8 @@ static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
|
|
return -EIO;
|
|
if (reclen > nbytes)
|
|
break;
|
|
+ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
|
|
+ return -EIO;
|
|
|
|
over = filldir(dstbuf, dirent->name, dirent->namelen,
|
|
file->f_pos, dirent->ino, dirent->type);
|
|
@@ -1346,6 +1348,7 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
|
|
{
|
|
struct inode *inode = entry->d_inode;
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
+ struct fuse_inode *fi = get_fuse_inode(inode);
|
|
struct fuse_req *req;
|
|
struct fuse_setattr_in inarg;
|
|
struct fuse_attr_out outarg;
|
|
@@ -1376,8 +1379,10 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
|
|
if (IS_ERR(req))
|
|
return PTR_ERR(req);
|
|
|
|
- if (is_truncate)
|
|
+ if (is_truncate) {
|
|
fuse_set_nowrite(inode);
|
|
+ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
|
|
+ }
|
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
|
memset(&outarg, 0, sizeof(outarg));
|
|
@@ -1439,12 +1444,14 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
|
|
invalidate_inode_pages2(inode->i_mapping);
|
|
}
|
|
|
|
+ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
|
|
return 0;
|
|
|
|
error:
|
|
if (is_truncate)
|
|
fuse_release_nowrite(inode);
|
|
|
|
+ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
|
|
index df25454eadf1..e4f1f1ace347 100644
|
|
--- a/fs/fuse/file.c
|
|
+++ b/fs/fuse/file.c
|
|
@@ -515,7 +515,8 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
spin_lock(&fc->lock);
|
|
- if (attr_ver == fi->attr_version && size < inode->i_size) {
|
|
+ if (attr_ver == fi->attr_version && size < inode->i_size &&
|
|
+ !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
|
|
fi->attr_version = ++fc->attr_version;
|
|
i_size_write(inode, size);
|
|
}
|
|
@@ -877,12 +878,16 @@ static ssize_t fuse_perform_write(struct file *file,
|
|
{
|
|
struct inode *inode = mapping->host;
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
+ struct fuse_inode *fi = get_fuse_inode(inode);
|
|
int err = 0;
|
|
ssize_t res = 0;
|
|
|
|
if (is_bad_inode(inode))
|
|
return -EIO;
|
|
|
|
+ if (inode->i_size < pos + iov_iter_count(ii))
|
|
+ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
|
|
+
|
|
do {
|
|
struct fuse_req *req;
|
|
ssize_t count;
|
|
@@ -917,6 +922,7 @@ static ssize_t fuse_perform_write(struct file *file,
|
|
if (res > 0)
|
|
fuse_write_update_size(inode, pos);
|
|
|
|
+ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
|
|
fuse_invalidate_attr(inode);
|
|
|
|
return res > 0 ? res : err;
|
|
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
|
|
index d1819269aab0..81b96c58569c 100644
|
|
--- a/fs/fuse/fuse_i.h
|
|
+++ b/fs/fuse/fuse_i.h
|
|
@@ -103,6 +103,15 @@ struct fuse_inode {
|
|
|
|
/** List of writepage requestst (pending or sent) */
|
|
struct list_head writepages;
|
|
+
|
|
+ /** Miscellaneous bits describing inode state */
|
|
+ unsigned long state;
|
|
+};
|
|
+
|
|
+/** FUSE inode state bits */
|
|
+enum {
|
|
+ /** An operation changing file size is in progress */
|
|
+ FUSE_I_SIZE_UNSTABLE,
|
|
};
|
|
|
|
struct fuse_conn;
|
|
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
|
|
index a59cf5e673d7..a5c8b343a156 100644
|
|
--- a/fs/fuse/inode.c
|
|
+++ b/fs/fuse/inode.c
|
|
@@ -92,6 +92,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
|
|
fi->attr_version = 0;
|
|
fi->writectr = 0;
|
|
fi->orig_ino = 0;
|
|
+ fi->state = 0;
|
|
INIT_LIST_HEAD(&fi->write_files);
|
|
INIT_LIST_HEAD(&fi->queued_writes);
|
|
INIT_LIST_HEAD(&fi->writepages);
|
|
@@ -199,7 +200,8 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
|
|
loff_t oldsize;
|
|
|
|
spin_lock(&fc->lock);
|
|
- if (attr_version != 0 && fi->attr_version > attr_version) {
|
|
+ if ((attr_version != 0 && fi->attr_version > attr_version) ||
|
|
+ test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
|
|
spin_unlock(&fc->lock);
|
|
return;
|
|
}
|
|
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
|
|
index e5bfb1150cf5..f6974688e89f 100644
|
|
--- a/fs/jbd2/journal.c
|
|
+++ b/fs/jbd2/journal.c
|
|
@@ -662,6 +662,37 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
|
|
}
|
|
|
|
/*
|
|
+ * When this function returns the transaction corresponding to tid
|
|
+ * will be completed. If the transaction has currently running, start
|
|
+ * committing that transaction before waiting for it to complete. If
|
|
+ * the transaction id is stale, it is by definition already completed,
|
|
+ * so just return SUCCESS.
|
|
+ */
|
|
+int jbd2_complete_transaction(journal_t *journal, tid_t tid)
|
|
+{
|
|
+ int need_to_wait = 1;
|
|
+
|
|
+ read_lock(&journal->j_state_lock);
|
|
+ if (journal->j_running_transaction &&
|
|
+ journal->j_running_transaction->t_tid == tid) {
|
|
+ if (journal->j_commit_request != tid) {
|
|
+ /* transaction not yet started, so request it */
|
|
+ read_unlock(&journal->j_state_lock);
|
|
+ jbd2_log_start_commit(journal, tid);
|
|
+ goto wait_commit;
|
|
+ }
|
|
+ } else if (!(journal->j_committing_transaction &&
|
|
+ journal->j_committing_transaction->t_tid == tid))
|
|
+ need_to_wait = 0;
|
|
+ read_unlock(&journal->j_state_lock);
|
|
+ if (!need_to_wait)
|
|
+ return 0;
|
|
+wait_commit:
|
|
+ return jbd2_log_wait_commit(journal, tid);
|
|
+}
|
|
+EXPORT_SYMBOL(jbd2_complete_transaction);
|
|
+
|
|
+/*
|
|
* Log buffer allocation routines:
|
|
*/
|
|
|
|
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
|
|
index aeed93a6bde0..9560fd7f5c7a 100644
|
|
--- a/fs/ncpfs/dir.c
|
|
+++ b/fs/ncpfs/dir.c
|
|
@@ -1033,15 +1033,6 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
|
|
DPRINTK("ncp_rmdir: removing %s/%s\n",
|
|
dentry->d_parent->d_name.name, dentry->d_name.name);
|
|
|
|
- /*
|
|
- * fail with EBUSY if there are still references to this
|
|
- * directory.
|
|
- */
|
|
- dentry_unhash(dentry);
|
|
- error = -EBUSY;
|
|
- if (!d_unhashed(dentry))
|
|
- goto out;
|
|
-
|
|
len = sizeof(__name);
|
|
error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
|
|
dentry->d_name.len, !ncp_preserve_case(dir));
|
|
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
|
|
index 3e7b2a0dc0c8..f45b83f38b1a 100644
|
|
--- a/fs/nilfs2/page.c
|
|
+++ b/fs/nilfs2/page.c
|
|
@@ -94,6 +94,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
|
|
clear_buffer_nilfs_volatile(bh);
|
|
clear_buffer_nilfs_checked(bh);
|
|
clear_buffer_nilfs_redirected(bh);
|
|
+ clear_buffer_async_write(bh);
|
|
clear_buffer_dirty(bh);
|
|
if (nilfs_page_buffers_clean(page))
|
|
__nilfs_clear_page_dirty(page);
|
|
@@ -390,6 +391,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
|
|
bh = head = page_buffers(page);
|
|
do {
|
|
lock_buffer(bh);
|
|
+ clear_buffer_async_write(bh);
|
|
clear_buffer_dirty(bh);
|
|
clear_buffer_nilfs_volatile(bh);
|
|
clear_buffer_nilfs_checked(bh);
|
|
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
|
|
index d4ca8925f017..e0a5a181fa46 100644
|
|
--- a/fs/nilfs2/segment.c
|
|
+++ b/fs/nilfs2/segment.c
|
|
@@ -662,7 +662,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
|
|
|
|
bh = head = page_buffers(page);
|
|
do {
|
|
- if (!buffer_dirty(bh))
|
|
+ if (!buffer_dirty(bh) || buffer_async_write(bh))
|
|
continue;
|
|
get_bh(bh);
|
|
list_add_tail(&bh->b_assoc_buffers, listp);
|
|
@@ -696,7 +696,8 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
|
|
for (i = 0; i < pagevec_count(&pvec); i++) {
|
|
bh = head = page_buffers(pvec.pages[i]);
|
|
do {
|
|
- if (buffer_dirty(bh)) {
|
|
+ if (buffer_dirty(bh) &&
|
|
+ !buffer_async_write(bh)) {
|
|
get_bh(bh);
|
|
list_add_tail(&bh->b_assoc_buffers,
|
|
listp);
|
|
@@ -1578,6 +1579,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
|
|
|
|
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
|
|
b_assoc_buffers) {
|
|
+ set_buffer_async_write(bh);
|
|
if (bh->b_page != bd_page) {
|
|
if (bd_page) {
|
|
lock_page(bd_page);
|
|
@@ -1591,6 +1593,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
|
|
|
|
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
|
|
b_assoc_buffers) {
|
|
+ set_buffer_async_write(bh);
|
|
if (bh == segbuf->sb_super_root) {
|
|
if (bh->b_page != bd_page) {
|
|
lock_page(bd_page);
|
|
@@ -1676,6 +1679,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
|
|
list_for_each_entry(segbuf, logs, sb_list) {
|
|
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
|
|
b_assoc_buffers) {
|
|
+ clear_buffer_async_write(bh);
|
|
if (bh->b_page != bd_page) {
|
|
if (bd_page)
|
|
end_page_writeback(bd_page);
|
|
@@ -1685,6 +1689,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
|
|
|
|
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
|
|
b_assoc_buffers) {
|
|
+ clear_buffer_async_write(bh);
|
|
if (bh == segbuf->sb_super_root) {
|
|
if (bh->b_page != bd_page) {
|
|
end_page_writeback(bd_page);
|
|
@@ -1754,6 +1759,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
|
|
b_assoc_buffers) {
|
|
set_buffer_uptodate(bh);
|
|
clear_buffer_dirty(bh);
|
|
+ clear_buffer_async_write(bh);
|
|
if (bh->b_page != bd_page) {
|
|
if (bd_page)
|
|
end_page_writeback(bd_page);
|
|
@@ -1775,6 +1781,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
|
|
b_assoc_buffers) {
|
|
set_buffer_uptodate(bh);
|
|
clear_buffer_dirty(bh);
|
|
+ clear_buffer_async_write(bh);
|
|
clear_buffer_delay(bh);
|
|
clear_buffer_nilfs_volatile(bh);
|
|
clear_buffer_nilfs_redirected(bh);
|
|
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
|
|
index d69a1d1d7e15..8b3068771dab 100644
|
|
--- a/fs/quota/dquot.c
|
|
+++ b/fs/quota/dquot.c
|
|
@@ -580,9 +580,17 @@ int dquot_scan_active(struct super_block *sb,
|
|
dqstats_inc(DQST_LOOKUPS);
|
|
dqput(old_dquot);
|
|
old_dquot = dquot;
|
|
- ret = fn(dquot, priv);
|
|
- if (ret < 0)
|
|
- goto out;
|
|
+ /*
|
|
+ * ->release_dquot() can be racing with us. Our reference
|
|
+ * protects us from new calls to it so just wait for any
|
|
+ * outstanding call and recheck the DQ_ACTIVE_B after that.
|
|
+ */
|
|
+ wait_on_dquot(dquot);
|
|
+ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
|
|
+ ret = fn(dquot, priv);
|
|
+ if (ret < 0)
|
|
+ goto out;
|
|
+ }
|
|
spin_lock(&dq_list_lock);
|
|
/* We are safe to continue now because our dquot could not
|
|
* be moved out of the inuse list while we hold the reference */
|
|
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
|
|
index c542c73cfa3c..f9c90b552452 100644
|
|
--- a/fs/ubifs/orphan.c
|
|
+++ b/fs/ubifs/orphan.c
|
|
@@ -130,13 +130,14 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
|
|
else if (inum > o->inum)
|
|
p = p->rb_right;
|
|
else {
|
|
- if (o->dnext) {
|
|
+ if (o->del) {
|
|
spin_unlock(&c->orphan_lock);
|
|
dbg_gen("deleted twice ino %lu",
|
|
(unsigned long)inum);
|
|
return;
|
|
}
|
|
if (o->cnext) {
|
|
+ o->del = 1;
|
|
o->dnext = c->orph_dnext;
|
|
c->orph_dnext = o;
|
|
spin_unlock(&c->orphan_lock);
|
|
@@ -447,6 +448,7 @@ static void erase_deleted(struct ubifs_info *c)
|
|
orphan = dnext;
|
|
dnext = orphan->dnext;
|
|
ubifs_assert(!orphan->new);
|
|
+ ubifs_assert(orphan->del);
|
|
rb_erase(&orphan->rb, &c->orph_tree);
|
|
list_del(&orphan->list);
|
|
c->tot_orphans -= 1;
|
|
@@ -536,6 +538,7 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
|
|
rb_link_node(&orphan->rb, parent, p);
|
|
rb_insert_color(&orphan->rb, &c->orph_tree);
|
|
list_add_tail(&orphan->list, &c->orph_list);
|
|
+ orphan->del = 1;
|
|
orphan->dnext = c->orph_dnext;
|
|
c->orph_dnext = orphan;
|
|
dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum,
|
|
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
|
|
index 4971cb23b6c8..3f962617e29b 100644
|
|
--- a/fs/ubifs/ubifs.h
|
|
+++ b/fs/ubifs/ubifs.h
|
|
@@ -905,6 +905,7 @@ struct ubifs_budget_req {
|
|
* @dnext: next orphan to delete
|
|
* @inum: inode number
|
|
* @new: %1 => added since the last commit, otherwise %0
|
|
+ * @del: %1 => delete pending, otherwise %0
|
|
*/
|
|
struct ubifs_orphan {
|
|
struct rb_node rb;
|
|
@@ -914,6 +915,7 @@ struct ubifs_orphan {
|
|
struct ubifs_orphan *dnext;
|
|
ino_t inum;
|
|
int new;
|
|
+ unsigned del:1;
|
|
};
|
|
|
|
/**
|
|
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
|
|
index 5a85b3415c1b..be71efc36bba 100644
|
|
--- a/include/linux/cgroup.h
|
|
+++ b/include/linux/cgroup.h
|
|
@@ -32,7 +32,6 @@ extern int cgroup_lock_is_held(void);
|
|
extern bool cgroup_lock_live_group(struct cgroup *cgrp);
|
|
extern void cgroup_unlock(void);
|
|
extern void cgroup_fork(struct task_struct *p);
|
|
-extern void cgroup_fork_callbacks(struct task_struct *p);
|
|
extern void cgroup_post_fork(struct task_struct *p);
|
|
extern void cgroup_exit(struct task_struct *p, int run_callbacks);
|
|
extern int cgroupstats_build(struct cgroupstats *stats,
|
|
@@ -514,16 +513,54 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state(
|
|
return cgrp->subsys[subsys_id];
|
|
}
|
|
|
|
-/*
|
|
- * function to get the cgroup_subsys_state which allows for extra
|
|
- * rcu_dereference_check() conditions, such as locks used during the
|
|
- * cgroup_subsys::attach() methods.
|
|
+/**
|
|
+ * task_css_set_check - obtain a task's css_set with extra access conditions
|
|
+ * @task: the task to obtain css_set for
|
|
+ * @__c: extra condition expression to be passed to rcu_dereference_check()
|
|
+ *
|
|
+ * A task's css_set is RCU protected, initialized and exited while holding
|
|
+ * task_lock(), and can only be modified while holding both cgroup_mutex
|
|
+ * and task_lock() while the task is alive. This macro verifies that the
|
|
+ * caller is inside proper critical section and returns @task's css_set.
|
|
+ *
|
|
+ * The caller can also specify additional allowed conditions via @__c, such
|
|
+ * as locks used during the cgroup_subsys::attach() methods.
|
|
+ */
|
|
+#define task_css_set_check(task, __c) \
|
|
+ rcu_dereference_check((task)->cgroups, \
|
|
+ lockdep_is_held(&(task)->alloc_lock) || \
|
|
+ cgroup_lock_is_held() || (__c))
|
|
+
|
|
+/**
|
|
+ * task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds
|
|
+ * @task: the target task
|
|
+ * @subsys_id: the target subsystem ID
|
|
+ * @__c: extra condition expression to be passed to rcu_dereference_check()
|
|
+ *
|
|
+ * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
|
|
+ * synchronization rules are the same as task_css_set_check().
|
|
*/
|
|
#define task_subsys_state_check(task, subsys_id, __c) \
|
|
- rcu_dereference_check(task->cgroups->subsys[subsys_id], \
|
|
- lockdep_is_held(&task->alloc_lock) || \
|
|
- cgroup_lock_is_held() || (__c))
|
|
+ task_css_set_check((task), (__c))->subsys[(subsys_id)]
|
|
|
|
+/**
|
|
+ * task_css_set - obtain a task's css_set
|
|
+ * @task: the task to obtain css_set for
|
|
+ *
|
|
+ * See task_css_set_check().
|
|
+ */
|
|
+static inline struct css_set *task_css_set(struct task_struct *task)
|
|
+{
|
|
+ return task_css_set_check(task, false);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * task_subsys_state - obtain css for (task, subsys)
|
|
+ * @task: the target task
|
|
+ * @subsys_id: the target subsystem ID
|
|
+ *
|
|
+ * See task_subsys_state_check().
|
|
+ */
|
|
static inline struct cgroup_subsys_state *
|
|
task_subsys_state(struct task_struct *task, int subsys_id)
|
|
{
|
|
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
|
|
index dd6444f67ed1..2ffbf9938a31 100644
|
|
--- a/include/linux/jbd2.h
|
|
+++ b/include/linux/jbd2.h
|
|
@@ -1178,6 +1178,7 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
|
|
int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
|
|
int jbd2_journal_force_commit_nested(journal_t *journal);
|
|
int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
|
|
+int jbd2_complete_transaction(journal_t *journal, tid_t tid);
|
|
int jbd2_log_do_checkpoint(journal_t *journal);
|
|
int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
|
|
|
|
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
|
|
index d146ca10c0f5..e6fe17431b95 100644
|
|
--- a/include/linux/nbd.h
|
|
+++ b/include/linux/nbd.h
|
|
@@ -68,6 +68,7 @@ struct nbd_device {
|
|
u64 bytesize;
|
|
pid_t pid; /* pid of nbd-client, if attached */
|
|
int xmit_timeout;
|
|
+ int disconnect; /* a disconnect has been requested by user */
|
|
};
|
|
|
|
#endif
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index 42b919c36da1..3a7b87e1fd89 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -2159,6 +2159,8 @@ extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
|
|
extern struct sk_buff *skb_segment(struct sk_buff *skb,
|
|
netdev_features_t features);
|
|
|
|
+unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
|
|
+
|
|
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
|
|
int len, void *buffer)
|
|
{
|
|
@@ -2580,5 +2582,22 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
|
|
|
|
return true;
|
|
}
|
|
+
|
|
+/**
|
|
+ * skb_gso_network_seglen - Return length of individual segments of a gso packet
|
|
+ *
|
|
+ * @skb: GSO skb
|
|
+ *
|
|
+ * skb_gso_network_seglen is used to determine the real size of the
|
|
+ * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
|
|
+ *
|
|
+ * The MAC/L2 header is not accounted for.
|
|
+ */
|
|
+static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
|
|
+{
|
|
+ unsigned int hdr_len = skb_transport_header(skb) -
|
|
+ skb_network_header(skb);
|
|
+ return hdr_len + skb_gso_transport_seglen(skb);
|
|
+}
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _LINUX_SKBUFF_H */
|
|
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
|
|
index cb94668f6e9f..a36c87a6623d 100644
|
|
--- a/include/xen/interface/io/netif.h
|
|
+++ b/include/xen/interface/io/netif.h
|
|
@@ -13,6 +13,24 @@
|
|
#include "../grant_table.h"
|
|
|
|
/*
|
|
+ * Older implementation of Xen network frontend / backend has an
|
|
+ * implicit dependency on the MAX_SKB_FRAGS as the maximum number of
|
|
+ * ring slots a skb can use. Netfront / netback may not work as
|
|
+ * expected when frontend and backend have different MAX_SKB_FRAGS.
|
|
+ *
|
|
+ * A better approach is to add mechanism for netfront / netback to
|
|
+ * negotiate this value. However we cannot fix all possible
|
|
+ * frontends, so we need to define a value which states the minimum
|
|
+ * slots backend must support.
|
|
+ *
|
|
+ * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS
|
|
+ * (18), which is proved to work with most frontends. Any new backend
|
|
+ * which doesn't negotiate with frontend should expect frontend to
|
|
+ * send a valid packet using slots up to this value.
|
|
+ */
|
|
+#define XEN_NETIF_NR_SLOTS_MIN 18
|
|
+
|
|
+/*
|
|
* Notifications after enqueuing any type of message should be conditional on
|
|
* the appropriate req_event or rsp_event field in the shared ring.
|
|
* If the client sends notification for rx requests then it should specify
|
|
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
|
|
index 75271b9a8f61..7d28aff605c7 100644
|
|
--- a/include/xen/interface/io/ring.h
|
|
+++ b/include/xen/interface/io/ring.h
|
|
@@ -188,6 +188,11 @@ struct __name##_back_ring { \
|
|
#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
|
|
(((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
|
|
|
|
+/* Ill-behaved frontend determination: Can there be this many requests? */
|
|
+#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
|
|
+ (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
|
|
+
|
|
+
|
|
#define RING_PUSH_REQUESTS(_r) do { \
|
|
wmb(); /* back sees requests /before/ updated producer index */ \
|
|
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
|
|
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
|
|
index 519c252f0fc8..7c8f4f7d0c71 100644
|
|
--- a/kernel/cgroup.c
|
|
+++ b/kernel/cgroup.c
|
|
@@ -4497,41 +4497,19 @@ void cgroup_fork(struct task_struct *child)
|
|
}
|
|
|
|
/**
|
|
- * cgroup_fork_callbacks - run fork callbacks
|
|
- * @child: the new task
|
|
- *
|
|
- * Called on a new task very soon before adding it to the
|
|
- * tasklist. No need to take any locks since no-one can
|
|
- * be operating on this task.
|
|
- */
|
|
-void cgroup_fork_callbacks(struct task_struct *child)
|
|
-{
|
|
- if (need_forkexit_callback) {
|
|
- int i;
|
|
- /*
|
|
- * forkexit callbacks are only supported for builtin
|
|
- * subsystems, and the builtin section of the subsys array is
|
|
- * immutable, so we don't need to lock the subsys array here.
|
|
- */
|
|
- for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
|
|
- struct cgroup_subsys *ss = subsys[i];
|
|
- if (ss->fork)
|
|
- ss->fork(child);
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-/**
|
|
* cgroup_post_fork - called on a new task after adding it to the task list
|
|
* @child: the task in question
|
|
*
|
|
- * Adds the task to the list running through its css_set if necessary.
|
|
- * Has to be after the task is visible on the task list in case we race
|
|
- * with the first call to cgroup_iter_start() - to guarantee that the
|
|
- * new task ends up on its list.
|
|
+ * Adds the task to the list running through its css_set if necessary and
|
|
+ * call the subsystem fork() callbacks. Has to be after the task is
|
|
+ * visible on the task list in case we race with the first call to
|
|
+ * cgroup_iter_start() - to guarantee that the new task ends up on its
|
|
+ * list.
|
|
*/
|
|
void cgroup_post_fork(struct task_struct *child)
|
|
{
|
|
+ int i;
|
|
+
|
|
/*
|
|
* use_task_css_set_links is set to 1 before we walk the tasklist
|
|
* under the tasklist_lock and we read it here after we added the child
|
|
@@ -4551,7 +4529,21 @@ void cgroup_post_fork(struct task_struct *child)
|
|
task_unlock(child);
|
|
write_unlock(&css_set_lock);
|
|
}
|
|
+
|
|
+ /*
|
|
+ * Call ss->fork(). This must happen after @child is linked on
|
|
+ * css_set; otherwise, @child might change state between ->fork()
|
|
+ * and addition to css_set.
|
|
+ */
|
|
+ if (need_forkexit_callback) {
|
|
+ for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
|
|
+ struct cgroup_subsys *ss = subsys[i];
|
|
+ if (ss->fork)
|
|
+ ss->fork(child);
|
|
+ }
|
|
+ }
|
|
}
|
|
+
|
|
/**
|
|
* cgroup_exit - detach cgroup from exiting task
|
|
* @tsk: pointer to task_struct of exiting process
|
|
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
|
|
index f86e93920b62..a902e2adc40a 100644
|
|
--- a/kernel/cgroup_freezer.c
|
|
+++ b/kernel/cgroup_freezer.c
|
|
@@ -186,23 +186,15 @@ static void freezer_fork(struct task_struct *task)
|
|
{
|
|
struct freezer *freezer;
|
|
|
|
- /*
|
|
- * No lock is needed, since the task isn't on tasklist yet,
|
|
- * so it can't be moved to another cgroup, which means the
|
|
- * freezer won't be removed and will be valid during this
|
|
- * function call. Nevertheless, apply RCU read-side critical
|
|
- * section to suppress RCU lockdep false positives.
|
|
- */
|
|
rcu_read_lock();
|
|
freezer = task_freezer(task);
|
|
- rcu_read_unlock();
|
|
|
|
/*
|
|
* The root cgroup is non-freezable, so we can skip the
|
|
* following check.
|
|
*/
|
|
if (!freezer->css.cgroup->parent)
|
|
- return;
|
|
+ goto out;
|
|
|
|
spin_lock_irq(&freezer->lock);
|
|
BUG_ON(freezer->state == CGROUP_FROZEN);
|
|
@@ -210,7 +202,10 @@ static void freezer_fork(struct task_struct *task)
|
|
/* Locking avoids race with FREEZING -> THAWED transitions. */
|
|
if (freezer->state == CGROUP_FREEZING)
|
|
freeze_task(task);
|
|
+
|
|
spin_unlock_irq(&freezer->lock);
|
|
+out:
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
/*
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index 8e82398c2843..eba82e2d34e9 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -7149,14 +7149,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
|
|
static void __perf_event_exit_context(void *__info)
|
|
{
|
|
struct perf_event_context *ctx = __info;
|
|
- struct perf_event *event, *tmp;
|
|
+ struct perf_event *event;
|
|
|
|
perf_pmu_rotate_stop(ctx->pmu);
|
|
|
|
- list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
|
|
- __perf_remove_from_context(event);
|
|
- list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
|
|
+ rcu_read_lock();
|
|
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
|
|
__perf_remove_from_context(event);
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
static void perf_event_exit_cpu_context(int cpu)
|
|
@@ -7180,11 +7180,11 @@ static void perf_event_exit_cpu(int cpu)
|
|
{
|
|
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
|
|
|
+ perf_event_exit_cpu_context(cpu);
|
|
+
|
|
mutex_lock(&swhash->hlist_mutex);
|
|
swevent_hlist_release(swhash);
|
|
mutex_unlock(&swhash->hlist_mutex);
|
|
-
|
|
- perf_event_exit_cpu_context(cpu);
|
|
}
|
|
#else
|
|
static inline void perf_event_exit_cpu(int cpu) { }
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index 81633337aee1..afac42b8889c 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -1124,7 +1124,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|
{
|
|
int retval;
|
|
struct task_struct *p;
|
|
- int cgroup_callbacks_done = 0;
|
|
|
|
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
|
|
return ERR_PTR(-EINVAL);
|
|
@@ -1383,12 +1382,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|
p->group_leader = p;
|
|
INIT_LIST_HEAD(&p->thread_group);
|
|
|
|
- /* Now that the task is set up, run cgroup callbacks if
|
|
- * necessary. We need to run them before the task is visible
|
|
- * on the tasklist. */
|
|
- cgroup_fork_callbacks(p);
|
|
- cgroup_callbacks_done = 1;
|
|
-
|
|
/* Need tasklist lock for parent etc handling! */
|
|
write_lock_irq(&tasklist_lock);
|
|
|
|
@@ -1493,7 +1486,7 @@ bad_fork_cleanup_cgroup:
|
|
#endif
|
|
if (clone_flags & CLONE_THREAD)
|
|
threadgroup_change_end(current);
|
|
- cgroup_exit(p, cgroup_callbacks_done);
|
|
+ cgroup_exit(p, 0);
|
|
delayacct_tsk_free(p);
|
|
module_put(task_thread_info(p)->exec_domain->module);
|
|
bad_fork_cleanup_count:
|
|
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
|
|
index 575d092fa746..13ea688f2761 100644
|
|
--- a/kernel/workqueue.c
|
|
+++ b/kernel/workqueue.c
|
|
@@ -1469,12 +1469,19 @@ static void destroy_worker(struct worker *worker)
|
|
if (worker->flags & WORKER_IDLE)
|
|
gcwq->nr_idle--;
|
|
|
|
+ /*
|
|
+ * Once WORKER_DIE is set, the kworker may destroy itself at any
|
|
+ * point. Pin to ensure the task stays until we're done with it.
|
|
+ */
|
|
+ get_task_struct(worker->task);
|
|
+
|
|
list_del_init(&worker->entry);
|
|
worker->flags |= WORKER_DIE;
|
|
|
|
spin_unlock_irq(&gcwq->lock);
|
|
|
|
kthread_stop(worker->task);
|
|
+ put_task_struct(worker->task);
|
|
kfree(worker);
|
|
|
|
spin_lock_irq(&gcwq->lock);
|
|
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
|
|
index 9ad7d1ef6ac1..09d87b709179 100644
|
|
--- a/mm/memory_hotplug.c
|
|
+++ b/mm/memory_hotplug.c
|
|
@@ -515,19 +515,20 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
|
|
|
|
zone->present_pages += onlined_pages;
|
|
zone->zone_pgdat->node_present_pages += onlined_pages;
|
|
- if (need_zonelists_rebuild)
|
|
- build_all_zonelists(zone);
|
|
- else
|
|
- zone_pcp_update(zone);
|
|
+ if (onlined_pages) {
|
|
+ node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
|
|
+ if (need_zonelists_rebuild)
|
|
+ build_all_zonelists(zone);
|
|
+ else
|
|
+ zone_pcp_update(zone);
|
|
+ }
|
|
|
|
mutex_unlock(&zonelists_mutex);
|
|
|
|
init_per_zone_wmark_min();
|
|
|
|
- if (onlined_pages) {
|
|
+ if (onlined_pages)
|
|
kswapd_run(zone_to_nid(zone));
|
|
- node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
|
|
- }
|
|
|
|
vm_total_pages = nr_free_pagecache_pages();
|
|
|
|
|
|
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
|
|
index 8e3aa4dc5bec..d8f031a762ae 100644
|
|
--- a/net/ceph/osd_client.c
|
|
+++ b/net/ceph/osd_client.c
|
|
@@ -1721,6 +1721,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
|
|
dout("osdc_start_request failed map, "
|
|
" will retry %lld\n", req->r_tid);
|
|
rc = 0;
|
|
+ } else {
|
|
+ __unregister_request(osdc, req);
|
|
}
|
|
goto out_unlock;
|
|
}
|
|
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
|
|
index c0c21b1ce1ec..6af54f2d0da0 100644
|
|
--- a/net/core/fib_rules.c
|
|
+++ b/net/core/fib_rules.c
|
|
@@ -718,6 +718,13 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
|
|
attach_rules(&ops->rules_list, dev);
|
|
break;
|
|
|
|
+ case NETDEV_CHANGENAME:
|
|
+ list_for_each_entry(ops, &net->rules_ops, list) {
|
|
+ detach_rules(&ops->rules_list, dev);
|
|
+ attach_rules(&ops->rules_list, dev);
|
|
+ }
|
|
+ break;
|
|
+
|
|
case NETDEV_UNREGISTER:
|
|
list_for_each_entry(ops, &net->rules_ops, list)
|
|
detach_rules(&ops->rules_list, dev);
|
|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
|
index e99aedd9c496..7a597d4feaec 100644
|
|
--- a/net/core/skbuff.c
|
|
+++ b/net/core/skbuff.c
|
|
@@ -45,6 +45,8 @@
|
|
#include <linux/in.h>
|
|
#include <linux/inet.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/tcp.h>
|
|
+#include <linux/udp.h>
|
|
#include <linux/netdevice.h>
|
|
#ifdef CONFIG_NET_CLS_ACT
|
|
#include <net/pkt_sched.h>
|
|
@@ -3281,3 +3283,26 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb)
|
|
" while LRO is enabled\n", skb->dev->name);
|
|
}
|
|
EXPORT_SYMBOL(__skb_warn_lro_forwarding);
|
|
+
|
|
+/**
|
|
+ * skb_gso_transport_seglen - Return length of individual segments of a gso packet
|
|
+ *
|
|
+ * @skb: GSO skb
|
|
+ *
|
|
+ * skb_gso_transport_seglen is used to determine the real size of the
|
|
+ * individual segments, including Layer4 headers (TCP/UDP).
|
|
+ *
|
|
+ * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
|
|
+ */
|
|
+unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
|
|
+{
|
|
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
+ unsigned int hdr_len;
|
|
+
|
|
+ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
|
|
+ hdr_len = tcp_hdrlen(skb);
|
|
+ else
|
|
+ hdr_len = sizeof(struct udphdr);
|
|
+ return hdr_len + shinfo->gso_size;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
|
|
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
|
|
index 29a07b6c7168..e0d9f02fec11 100644
|
|
--- a/net/ipv4/ip_forward.c
|
|
+++ b/net/ipv4/ip_forward.c
|
|
@@ -39,6 +39,68 @@
|
|
#include <net/route.h>
|
|
#include <net/xfrm.h>
|
|
|
|
+static bool ip_may_fragment(const struct sk_buff *skb)
|
|
+{
|
|
+ return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
|
|
+ !skb->local_df;
|
|
+}
|
|
+
|
|
+static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
|
|
+{
|
|
+ if (skb->len <= mtu || skb->local_df)
|
|
+ return false;
|
|
+
|
|
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
|
|
+{
|
|
+ unsigned int mtu;
|
|
+
|
|
+ if (skb->local_df || !skb_is_gso(skb))
|
|
+ return false;
|
|
+
|
|
+ mtu = dst_mtu(skb_dst(skb));
|
|
+
|
|
+ /* if seglen > mtu, do software segmentation for IP fragmentation on
|
|
+ * output. DF bit cannot be set since ip_forward would have sent
|
|
+ * icmp error.
|
|
+ */
|
|
+ return skb_gso_network_seglen(skb) > mtu;
|
|
+}
|
|
+
|
|
+/* called if GSO skb needs to be fragmented on forward */
|
|
+static int ip_forward_finish_gso(struct sk_buff *skb)
|
|
+{
|
|
+ struct sk_buff *segs;
|
|
+ int ret = 0;
|
|
+
|
|
+ segs = skb_gso_segment(skb, 0);
|
|
+ if (IS_ERR(segs)) {
|
|
+ kfree_skb(skb);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ consume_skb(skb);
|
|
+
|
|
+ do {
|
|
+ struct sk_buff *nskb = segs->next;
|
|
+ int err;
|
|
+
|
|
+ segs->next = NULL;
|
|
+ err = dst_output(segs);
|
|
+
|
|
+ if (err && ret == 0)
|
|
+ ret = err;
|
|
+ segs = nskb;
|
|
+ } while (segs);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static int ip_forward_finish(struct sk_buff *skb)
|
|
{
|
|
struct ip_options * opt = &(IPCB(skb)->opt);
|
|
@@ -48,6 +110,9 @@ static int ip_forward_finish(struct sk_buff *skb)
|
|
if (unlikely(opt->optlen))
|
|
ip_forward_options(skb);
|
|
|
|
+ if (ip_gso_exceeds_dst_mtu(skb))
|
|
+ return ip_forward_finish_gso(skb);
|
|
+
|
|
return dst_output(skb);
|
|
}
|
|
|
|
@@ -87,8 +152,7 @@ int ip_forward(struct sk_buff *skb)
|
|
if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
|
|
goto sr_failed;
|
|
|
|
- if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
|
|
- (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
|
|
+ if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, dst_mtu(&rt->dst))) {
|
|
IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
|
|
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
|
|
htonl(dst_mtu(&rt->dst)));
|
|
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
|
|
index 91cd5f1657b7..8cd6854c2cae 100644
|
|
--- a/net/ipv6/ip6_output.c
|
|
+++ b/net/ipv6/ip6_output.c
|
|
@@ -382,6 +382,17 @@ static inline int ip6_forward_finish(struct sk_buff *skb)
|
|
return dst_output(skb);
|
|
}
|
|
|
|
+static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
|
|
+{
|
|
+ if (skb->len <= mtu || skb->local_df)
|
|
+ return false;
|
|
+
|
|
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
int ip6_forward(struct sk_buff *skb)
|
|
{
|
|
struct dst_entry *dst = skb_dst(skb);
|
|
@@ -503,7 +514,7 @@ int ip6_forward(struct sk_buff *skb)
|
|
if (mtu < IPV6_MIN_MTU)
|
|
mtu = IPV6_MIN_MTU;
|
|
|
|
- if (skb->len > mtu && !skb_is_gso(skb)) {
|
|
+ if (ip6_pkt_too_big(skb, mtu)) {
|
|
/* Again, force OUTPUT device used as source address */
|
|
skb->dev = dst->dev;
|
|
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
|
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
|
|
index e9e50cadd52f..0ed156b537d2 100644
|
|
--- a/net/sctp/socket.c
|
|
+++ b/net/sctp/socket.c
|
|
@@ -70,6 +70,7 @@
|
|
#include <linux/init.h>
|
|
#include <linux/crypto.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/compat.h>
|
|
|
|
#include <net/ip.h>
|
|
#include <net/icmp.h>
|
|
@@ -1376,11 +1377,19 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
|
|
/*
|
|
* New (hopefully final) interface for the API.
|
|
* We use the sctp_getaddrs_old structure so that use-space library
|
|
- * can avoid any unnecessary allocations. The only defferent part
|
|
+ * can avoid any unnecessary allocations. The only different part
|
|
* is that we store the actual length of the address buffer into the
|
|
- * addrs_num structure member. That way we can re-use the existing
|
|
+ * addrs_num structure member. That way we can re-use the existing
|
|
* code.
|
|
*/
|
|
+#ifdef CONFIG_COMPAT
|
|
+struct compat_sctp_getaddrs_old {
|
|
+ sctp_assoc_t assoc_id;
|
|
+ s32 addr_num;
|
|
+ compat_uptr_t addrs; /* struct sockaddr * */
|
|
+};
|
|
+#endif
|
|
+
|
|
SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
|
|
char __user *optval,
|
|
int __user *optlen)
|
|
@@ -1389,16 +1398,30 @@ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
|
|
sctp_assoc_t assoc_id = 0;
|
|
int err = 0;
|
|
|
|
- if (len < sizeof(param))
|
|
- return -EINVAL;
|
|
+#ifdef CONFIG_COMPAT
|
|
+ if (is_compat_task()) {
|
|
+ struct compat_sctp_getaddrs_old param32;
|
|
|
|
- if (copy_from_user(¶m, optval, sizeof(param)))
|
|
- return -EFAULT;
|
|
+ if (len < sizeof(param32))
|
|
+ return -EINVAL;
|
|
+ if (copy_from_user(¶m32, optval, sizeof(param32)))
|
|
+ return -EFAULT;
|
|
|
|
- err = __sctp_setsockopt_connectx(sk,
|
|
- (struct sockaddr __user *)param.addrs,
|
|
- param.addr_num, &assoc_id);
|
|
+ param.assoc_id = param32.assoc_id;
|
|
+ param.addr_num = param32.addr_num;
|
|
+ param.addrs = compat_ptr(param32.addrs);
|
|
+ } else
|
|
+#endif
|
|
+ {
|
|
+ if (len < sizeof(param))
|
|
+ return -EINVAL;
|
|
+ if (copy_from_user(¶m, optval, sizeof(param)))
|
|
+ return -EFAULT;
|
|
+ }
|
|
|
|
+ err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
|
|
+ param.addrs, param.addr_num,
|
|
+ &assoc_id);
|
|
if (err == 0 || err == -EINPROGRESS) {
|
|
if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
|
|
return -EFAULT;
|
|
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
|
|
index 72d89e129caa..c3930172c06c 100644
|
|
--- a/net/sunrpc/sched.c
|
|
+++ b/net/sunrpc/sched.c
|
|
@@ -302,13 +302,20 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
|
|
/*
|
|
* Make an RPC task runnable.
|
|
*
|
|
- * Note: If the task is ASYNC, this must be called with
|
|
- * the spinlock held to protect the wait queue operation.
|
|
+ * Note: If the task is ASYNC, and is being made runnable after sitting on an
|
|
+ * rpc_wait_queue, this must be called with the queue spinlock held to protect
|
|
+ * the wait queue operation.
|
|
+ * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
|
|
+ * which is needed to ensure that __rpc_execute() doesn't loop (due to the
|
|
+ * lockless RPC_IS_QUEUED() test) before we've had a chance to test
|
|
+ * the RPC_TASK_RUNNING flag.
|
|
*/
|
|
static void rpc_make_runnable(struct rpc_task *task)
|
|
{
|
|
+ bool need_wakeup = !rpc_test_and_set_running(task);
|
|
+
|
|
rpc_clear_queued(task);
|
|
- if (rpc_test_and_set_running(task))
|
|
+ if (!need_wakeup)
|
|
return;
|
|
if (RPC_IS_ASYNC(task)) {
|
|
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
|
|
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
|
|
index 31f981d700a3..31275e52c667 100644
|
|
--- a/net/sunrpc/xprtsock.c
|
|
+++ b/net/sunrpc/xprtsock.c
|
|
@@ -501,6 +501,7 @@ static int xs_nospace(struct rpc_task *task)
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
|
+ struct sock *sk = transport->inet;
|
|
int ret = -EAGAIN;
|
|
|
|
dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
|
|
@@ -518,7 +519,7 @@ static int xs_nospace(struct rpc_task *task)
|
|
* window size
|
|
*/
|
|
set_bit(SOCK_NOSPACE, &transport->sock->flags);
|
|
- transport->inet->sk_write_pending++;
|
|
+ sk->sk_write_pending++;
|
|
/* ...and wait for more buffer space */
|
|
xprt_wait_for_buffer_space(task, xs_nospace_callback);
|
|
}
|
|
@@ -528,6 +529,9 @@ static int xs_nospace(struct rpc_task *task)
|
|
}
|
|
|
|
spin_unlock_bh(&xprt->transport_lock);
|
|
+
|
|
+ /* Race breaker in case memory is freed before above code is called */
|
|
+ sk->sk_write_space(sk);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
|
|
index 1249e17b61ff..23e9cba8d4d2 100644
|
|
--- a/security/selinux/ss/policydb.c
|
|
+++ b/security/selinux/ss/policydb.c
|
|
@@ -3214,10 +3214,10 @@ static int filename_write_helper(void *key, void *data, void *ptr)
|
|
if (rc)
|
|
return rc;
|
|
|
|
- buf[0] = ft->stype;
|
|
- buf[1] = ft->ttype;
|
|
- buf[2] = ft->tclass;
|
|
- buf[3] = otype->otype;
|
|
+ buf[0] = cpu_to_le32(ft->stype);
|
|
+ buf[1] = cpu_to_le32(ft->ttype);
|
|
+ buf[2] = cpu_to_le32(ft->tclass);
|
|
+ buf[3] = cpu_to_le32(otype->otype);
|
|
|
|
rc = put_entry(buf, sizeof(u32), 4, fp);
|
|
if (rc)
|
|
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
|
|
index 76e0d5695075..823359ed95e1 100644
|
|
--- a/sound/arm/pxa2xx-pcm-lib.c
|
|
+++ b/sound/arm/pxa2xx-pcm-lib.c
|
|
@@ -166,7 +166,9 @@ void pxa2xx_pcm_dma_irq(int dma_ch, void *dev_id)
|
|
} else {
|
|
printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
|
|
rtd->params->name, dma_ch, dcsr);
|
|
+ snd_pcm_stream_lock(substream);
|
|
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
|
|
+ snd_pcm_stream_unlock(substream);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(pxa2xx_pcm_dma_irq);
|
|
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
|
|
index e8de831f98bc..05093bd3c390 100644
|
|
--- a/sound/pci/asihpi/asihpi.c
|
|
+++ b/sound/pci/asihpi/asihpi.c
|
|
@@ -769,7 +769,10 @@ static void snd_card_asihpi_timer_function(unsigned long data)
|
|
s->number);
|
|
ds->drained_count++;
|
|
if (ds->drained_count > 20) {
|
|
+ unsigned long flags;
|
|
+ snd_pcm_stream_lock_irqsave(s, flags);
|
|
snd_pcm_stop(s, SNDRV_PCM_STATE_XRUN);
|
|
+ snd_pcm_stream_unlock_irqrestore(s, flags);
|
|
continue;
|
|
}
|
|
} else {
|
|
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
|
|
index 590682f115ef..0c48216a74ba 100644
|
|
--- a/sound/pci/atiixp.c
|
|
+++ b/sound/pci/atiixp.c
|
|
@@ -688,7 +688,9 @@ static void snd_atiixp_xrun_dma(struct atiixp *chip, struct atiixp_dma *dma)
|
|
if (! dma->substream || ! dma->running)
|
|
return;
|
|
snd_printdd("atiixp: XRUN detected (DMA %d)\n", dma->ops->type);
|
|
+ snd_pcm_stream_lock(dma->substream);
|
|
snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
|
|
+ snd_pcm_stream_unlock(dma->substream);
|
|
}
|
|
|
|
/*
|
|
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
|
|
index 524d35f31232..94d6e9bf245b 100644
|
|
--- a/sound/pci/atiixp_modem.c
|
|
+++ b/sound/pci/atiixp_modem.c
|
|
@@ -638,7 +638,9 @@ static void snd_atiixp_xrun_dma(struct atiixp_modem *chip,
|
|
if (! dma->substream || ! dma->running)
|
|
return;
|
|
snd_printdd("atiixp-modem: XRUN detected (DMA %d)\n", dma->ops->type);
|
|
+ snd_pcm_stream_lock(dma->substream);
|
|
snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
|
|
+ snd_pcm_stream_unlock(dma->substream);
|
|
}
|
|
|
|
/*
|
|
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
|
|
index 8e92fb88ed09..f0b8d8e38f71 100644
|
|
--- a/sound/soc/codecs/sgtl5000.c
|
|
+++ b/sound/soc/codecs/sgtl5000.c
|
|
@@ -37,7 +37,7 @@
|
|
static const u16 sgtl5000_regs[SGTL5000_MAX_REG_OFFSET] = {
|
|
[SGTL5000_CHIP_CLK_CTRL] = 0x0008,
|
|
[SGTL5000_CHIP_I2S_CTRL] = 0x0010,
|
|
- [SGTL5000_CHIP_SSS_CTRL] = 0x0008,
|
|
+ [SGTL5000_CHIP_SSS_CTRL] = 0x0010,
|
|
[SGTL5000_CHIP_DAC_VOL] = 0x3c3c,
|
|
[SGTL5000_CHIP_PAD_STRENGTH] = 0x015f,
|
|
[SGTL5000_CHIP_ANA_HP_CTRL] = 0x1818,
|
|
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
|
|
index 7db6fa515028..e05da9171ddd 100644
|
|
--- a/sound/soc/codecs/sta32x.c
|
|
+++ b/sound/soc/codecs/sta32x.c
|
|
@@ -147,42 +147,42 @@ static const unsigned int sta32x_limiter_drc_release_tlv[] = {
|
|
13, 16, TLV_DB_SCALE_ITEM(-1500, 300, 0),
|
|
};
|
|
|
|
-static const struct soc_enum sta32x_drc_ac_enum =
|
|
- SOC_ENUM_SINGLE(STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
|
|
- 2, sta32x_drc_ac);
|
|
-static const struct soc_enum sta32x_auto_eq_enum =
|
|
- SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
|
|
- 3, sta32x_auto_eq_mode);
|
|
-static const struct soc_enum sta32x_auto_gc_enum =
|
|
- SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
|
|
- 4, sta32x_auto_gc_mode);
|
|
-static const struct soc_enum sta32x_auto_xo_enum =
|
|
- SOC_ENUM_SINGLE(STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
|
|
- 16, sta32x_auto_xo_mode);
|
|
-static const struct soc_enum sta32x_preset_eq_enum =
|
|
- SOC_ENUM_SINGLE(STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
|
|
- 32, sta32x_preset_eq_mode);
|
|
-static const struct soc_enum sta32x_limiter_ch1_enum =
|
|
- SOC_ENUM_SINGLE(STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
|
|
- 3, sta32x_limiter_select);
|
|
-static const struct soc_enum sta32x_limiter_ch2_enum =
|
|
- SOC_ENUM_SINGLE(STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
|
|
- 3, sta32x_limiter_select);
|
|
-static const struct soc_enum sta32x_limiter_ch3_enum =
|
|
- SOC_ENUM_SINGLE(STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
|
|
- 3, sta32x_limiter_select);
|
|
-static const struct soc_enum sta32x_limiter1_attack_rate_enum =
|
|
- SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxA_SHIFT,
|
|
- 16, sta32x_limiter_attack_rate);
|
|
-static const struct soc_enum sta32x_limiter2_attack_rate_enum =
|
|
- SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxA_SHIFT,
|
|
- 16, sta32x_limiter_attack_rate);
|
|
-static const struct soc_enum sta32x_limiter1_release_rate_enum =
|
|
- SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxR_SHIFT,
|
|
- 16, sta32x_limiter_release_rate);
|
|
-static const struct soc_enum sta32x_limiter2_release_rate_enum =
|
|
- SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxR_SHIFT,
|
|
- 16, sta32x_limiter_release_rate);
|
|
+static SOC_ENUM_SINGLE_DECL(sta32x_drc_ac_enum,
|
|
+ STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
|
|
+ sta32x_drc_ac);
|
|
+static SOC_ENUM_SINGLE_DECL(sta32x_auto_eq_enum,
|
|
+ STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
|
|
+ sta32x_auto_eq_mode);
|
|
+static SOC_ENUM_SINGLE_DECL(sta32x_auto_gc_enum,
|
|
+ STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
|
|
+ sta32x_auto_gc_mode);
|
|
+static SOC_ENUM_SINGLE_DECL(sta32x_auto_xo_enum,
|
|
+ STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
|
|
+ sta32x_auto_xo_mode);
|
|
+static SOC_ENUM_SINGLE_DECL(sta32x_preset_eq_enum,
|
|
+ STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
|
|
+ sta32x_preset_eq_mode);
|
|
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch1_enum,
|
|
+ STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
|
|
+ sta32x_limiter_select);
|
|
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch2_enum,
|
|
+ STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
|
|
+ sta32x_limiter_select);
|
|
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch3_enum,
|
|
+ STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
|
|
+ sta32x_limiter_select);
|
|
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_attack_rate_enum,
|
|
+ STA32X_L1AR, STA32X_LxA_SHIFT,
|
|
+ sta32x_limiter_attack_rate);
|
|
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_attack_rate_enum,
|
|
+ STA32X_L2AR, STA32X_LxA_SHIFT,
|
|
+ sta32x_limiter_attack_rate);
|
|
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_release_rate_enum,
|
|
+ STA32X_L1AR, STA32X_LxR_SHIFT,
|
|
+ sta32x_limiter_release_rate);
|
|
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_release_rate_enum,
|
|
+ STA32X_L2AR, STA32X_LxR_SHIFT,
|
|
+ sta32x_limiter_release_rate);
|
|
|
|
/* byte array controls for setting biquad, mixer, scaling coefficients;
|
|
* for biquads all five coefficients need to be set in one go,
|
|
@@ -394,7 +394,7 @@ SOC_SINGLE_TLV("Treble Tone Control", STA32X_TONE, STA32X_TONE_TTC_SHIFT, 15, 0,
|
|
SOC_ENUM("Limiter1 Attack Rate (dB/ms)", sta32x_limiter1_attack_rate_enum),
|
|
SOC_ENUM("Limiter2 Attack Rate (dB/ms)", sta32x_limiter2_attack_rate_enum),
|
|
SOC_ENUM("Limiter1 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
|
|
-SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
|
|
+SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter2_release_rate_enum),
|
|
|
|
/* depending on mode, the attack/release thresholds have
|
|
* two different enum definitions; provide both
|
|
diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
|
|
index a5127b4ff9e1..9e82674ed0ca 100644
|
|
--- a/sound/soc/codecs/wm8770.c
|
|
+++ b/sound/soc/codecs/wm8770.c
|
|
@@ -162,8 +162,8 @@ static const char *ain_text[] = {
|
|
"AIN5", "AIN6", "AIN7", "AIN8"
|
|
};
|
|
|
|
-static const struct soc_enum ain_enum =
|
|
- SOC_ENUM_DOUBLE(WM8770_ADCMUX, 0, 4, 8, ain_text);
|
|
+static SOC_ENUM_DOUBLE_DECL(ain_enum,
|
|
+ WM8770_ADCMUX, 0, 4, ain_text);
|
|
|
|
static const struct snd_kcontrol_new ain_mux =
|
|
SOC_DAPM_ENUM("Capture Mux", ain_enum);
|
|
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
|
|
index 1332692ef81b..77552b5048cc 100644
|
|
--- a/sound/soc/codecs/wm8958-dsp2.c
|
|
+++ b/sound/soc/codecs/wm8958-dsp2.c
|
|
@@ -153,7 +153,7 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
|
|
|
|
data32 &= 0xffffff;
|
|
|
|
- wm8994_bulk_write(codec->control_data,
|
|
+ wm8994_bulk_write(wm8994->wm8994,
|
|
data32 & 0xffffff,
|
|
block_len / 2,
|
|
(void *)(data + 8));
|
|
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
|
|
index 4f81ed456325..30afae7baddf 100644
|
|
--- a/sound/soc/imx/imx-ssi.c
|
|
+++ b/sound/soc/imx/imx-ssi.c
|
|
@@ -497,6 +497,8 @@ static void imx_ssi_ac97_reset(struct snd_ac97 *ac97)
|
|
|
|
if (imx_ssi->ac97_reset)
|
|
imx_ssi->ac97_reset(ac97);
|
|
+ /* First read sometimes fails, do a dummy read */
|
|
+ imx_ssi_ac97_read(ac97, 0);
|
|
}
|
|
|
|
static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97)
|
|
@@ -505,6 +507,9 @@ static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97)
|
|
|
|
if (imx_ssi->ac97_warm_reset)
|
|
imx_ssi->ac97_warm_reset(ac97);
|
|
+
|
|
+ /* First read sometimes fails, do a dummy read */
|
|
+ imx_ssi_ac97_read(ac97, 0);
|
|
}
|
|
|
|
struct snd_ac97_bus_ops soc_ac97_ops = {
|
|
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
|
|
index 716da861c629..17f9348966b3 100644
|
|
--- a/sound/soc/s6000/s6000-pcm.c
|
|
+++ b/sound/soc/s6000/s6000-pcm.c
|
|
@@ -128,7 +128,9 @@ static irqreturn_t s6000_pcm_irq(int irq, void *data)
|
|
substream->runtime &&
|
|
snd_pcm_running(substream)) {
|
|
dev_dbg(pcm->dev, "xrun\n");
|
|
+ snd_pcm_stream_lock(substream);
|
|
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
|
|
+ snd_pcm_stream_unlock(substream);
|
|
ret = IRQ_HANDLED;
|
|
}
|
|
|
|
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
|
|
index 4da9ca9c81bf..7c4f311af0f1 100644
|
|
--- a/sound/usb/6fire/pcm.c
|
|
+++ b/sound/usb/6fire/pcm.c
|
|
@@ -639,17 +639,25 @@ int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
|
|
void usb6fire_pcm_abort(struct sfire_chip *chip)
|
|
{
|
|
struct pcm_runtime *rt = chip->pcm;
|
|
+ unsigned long flags;
|
|
int i;
|
|
|
|
if (rt) {
|
|
rt->panic = true;
|
|
|
|
- if (rt->playback.instance)
|
|
+ if (rt->playback.instance) {
|
|
+ snd_pcm_stream_lock_irqsave(rt->playback.instance, flags);
|
|
snd_pcm_stop(rt->playback.instance,
|
|
SNDRV_PCM_STATE_XRUN);
|
|
- if (rt->capture.instance)
|
|
+ snd_pcm_stream_unlock_irqrestore(rt->playback.instance, flags);
|
|
+ }
|
|
+
|
|
+ if (rt->capture.instance) {
|
|
+ snd_pcm_stream_lock_irqsave(rt->capture.instance, flags);
|
|
snd_pcm_stop(rt->capture.instance,
|
|
SNDRV_PCM_STATE_XRUN);
|
|
+ snd_pcm_stream_unlock_irqrestore(rt->capture.instance, flags);
|
|
+ }
|
|
|
|
for (i = 0; i < PCM_N_URBS; i++) {
|
|
usb_poison_urb(&rt->in_urbs[i].instance);
|
|
diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
|
|
index 8b81cb54026f..d97d35da53f8 100644
|
|
--- a/sound/usb/misc/ua101.c
|
|
+++ b/sound/usb/misc/ua101.c
|
|
@@ -613,14 +613,24 @@ static int start_usb_playback(struct ua101 *ua)
|
|
|
|
static void abort_alsa_capture(struct ua101 *ua)
|
|
{
|
|
- if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states))
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) {
|
|
+ snd_pcm_stream_lock_irqsave(ua->capture.substream, flags);
|
|
snd_pcm_stop(ua->capture.substream, SNDRV_PCM_STATE_XRUN);
|
|
+ snd_pcm_stream_unlock_irqrestore(ua->capture.substream, flags);
|
|
+ }
|
|
}
|
|
|
|
static void abort_alsa_playback(struct ua101 *ua)
|
|
{
|
|
- if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states))
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) {
|
|
+ snd_pcm_stream_lock_irqsave(ua->playback.substream, flags);
|
|
snd_pcm_stop(ua->playback.substream, SNDRV_PCM_STATE_XRUN);
|
|
+ snd_pcm_stream_unlock_irqrestore(ua->playback.substream, flags);
|
|
+ }
|
|
}
|
|
|
|
static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream,
|
|
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
|
|
index f1324c423835..0e4e909d111e 100644
|
|
--- a/sound/usb/mixer_maps.c
|
|
+++ b/sound/usb/mixer_maps.c
|
|
@@ -304,6 +304,11 @@ static struct usbmix_name_map hercules_usb51_map[] = {
|
|
{ 0 } /* terminator */
|
|
};
|
|
|
|
+static const struct usbmix_name_map kef_x300a_map[] = {
|
|
+ { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
|
|
+ { 0 }
|
|
+};
|
|
+
|
|
/*
|
|
* Control map entries
|
|
*/
|
|
@@ -371,6 +376,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
|
|
.map = scratch_live_map,
|
|
.ignore_ctl_error = 1,
|
|
},
|
|
+ {
|
|
+ .id = USB_ID(0x27ac, 0x1000),
|
|
+ .map = kef_x300a_map,
|
|
+ },
|
|
{ 0 } /* terminator */
|
|
};
|
|
|
|
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
|
|
index 711299ccf261..bdc4ddb7a060 100644
|
|
--- a/sound/usb/usx2y/usbusx2yaudio.c
|
|
+++ b/sound/usb/usx2y/usbusx2yaudio.c
|
|
@@ -273,7 +273,11 @@ static void usX2Y_clients_stop(struct usX2Ydev *usX2Y)
|
|
struct snd_usX2Y_substream *subs = usX2Y->subs[s];
|
|
if (subs) {
|
|
if (atomic_read(&subs->state) >= state_PRERUNNING) {
|
|
+ unsigned long flags;
|
|
+
|
|
+ snd_pcm_stream_lock_irqsave(subs->pcm_substream, flags);
|
|
snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
|
|
+ snd_pcm_stream_unlock_irqrestore(subs->pcm_substream, flags);
|
|
}
|
|
for (u = 0; u < NRURBS; u++) {
|
|
struct urb *urb = subs->urb[u];
|
|
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
|
|
index 5b3a0ef4e232..808d94669d0d 100644
|
|
--- a/tools/perf/util/parse-events.c
|
|
+++ b/tools/perf/util/parse-events.c
|
|
@@ -413,7 +413,7 @@ int parse_events_add_cache(struct list_head *list, int *idx,
|
|
for (i = 0; (i < 2) && (op_result[i]); i++) {
|
|
char *str = op_result[i];
|
|
|
|
- snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str);
|
|
+ n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
|
|
|
|
if (cache_op == -1) {
|
|
cache_op = parse_aliases(str, hw_cache_op,
|
|
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
|
|
index e9fff9830bf0..53d34f626c15 100644
|
|
--- a/virt/kvm/iommu.c
|
|
+++ b/virt/kvm/iommu.c
|
|
@@ -101,6 +101,10 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
|
while ((gfn << PAGE_SHIFT) & (page_size - 1))
|
|
page_size >>= 1;
|
|
|
|
+ /* Make sure hva is aligned to the page size we want to map */
|
|
+ while (gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
|
|
+ page_size >>= 1;
|
|
+
|
|
/*
|
|
* Pin all pages we are about to map in memory. This is
|
|
* important because we unmap and unpin in 4kb steps later.
|