sun8i legacy kernel upstream patches up to .110 , manually fixed

This commit is contained in:
Igor Pecovnik 2016-02-14 12:44:00 +01:00
parent 20fd1b0c33
commit abc80fc988
73 changed files with 112222 additions and 23 deletions

View file

@ -0,0 +1,527 @@
diff --git a/Makefile b/Makefile
index 3efde3d..90c3a6f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 40
+SUBLEVEL = 41
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index aa0f913..25e9734 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -741,7 +741,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
}
-void arch_flush_lazy_mmu_mode(void);
+static inline void arch_flush_lazy_mmu_mode(void)
+{
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
+}
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 8e8b9a4..faf2c04 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -91,6 +91,7 @@ struct pv_lazy_ops {
/* Set deferred update mode, used for batching operations. */
void (*enter)(void);
void (*leave)(void);
+ void (*flush)(void);
};
struct pv_time_ops {
@@ -680,6 +681,7 @@ void paravirt_end_context_switch(struct task_struct *next);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
+void paravirt_flush_lazy_mmu(void);
void _paravirt_nop(void);
u32 _paravirt_ident_32(u32);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index ab13760..128323e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
leave_lazy(PARAVIRT_LAZY_MMU);
}
+void paravirt_flush_lazy_mmu(void)
+{
+ preempt_disable();
+
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ arch_leave_lazy_mmu_mode();
+ arch_enter_lazy_mmu_mode();
+ }
+
+ preempt_enable();
+}
+
void paravirt_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
@@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return percpu_read(paravirt_lazy_mode);
}
-void arch_flush_lazy_mmu_mode(void)
-{
- preempt_disable();
-
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
- arch_leave_lazy_mmu_mode();
- arch_enter_lazy_mmu_mode();
- }
-
- preempt_enable();
-}
-
struct pv_info pv_info = {
.name = "bare hardware",
.paravirt_enabled = 0,
@@ -477,6 +477,7 @@ struct pv_mmu_ops pv_mmu_ops = {
.lazy_mode = {
.enter = paravirt_nop,
.leave = paravirt_nop,
+ .flush = paravirt_nop,
},
.set_fixmap = native_set_fixmap,
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 642d880..53272bd 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1333,6 +1333,7 @@ __init void lguest_init(void)
pv_mmu_ops.read_cr3 = lguest_read_cr3;
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
+ pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
pv_mmu_ops.pte_update = lguest_pte_update;
pv_mmu_ops.pte_update_defer = lguest_pte_update;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index e922e01..4a0a2e8 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -377,10 +377,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
if (pgd_none(*pgd_ref))
return -1;
- if (pgd_none(*pgd))
+ if (pgd_none(*pgd)) {
set_pgd(pgd, *pgd_ref);
- else
+ arch_flush_lazy_mmu_mode();
+ } else {
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+ }
/*
* Below here mismatches are bugs because these lower tables
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 5cb8e27..cf7fe36 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2076,6 +2076,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.lazy_mode = {
.enter = paravirt_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu,
+ .flush = paravirt_flush_lazy_mmu,
},
.set_fixmap = xen_set_fixmap,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a135c61..99a7855 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
- first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
+ first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 3234224..b8e6463 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector)
int ret;
edid = (struct edid *)udl_get_edid(udl);
+ if (!edid) {
+ drm_mode_connector_update_edid_property(connector, NULL);
+ return 0;
+ }
connector->display_info.raw_edid = (char *)edid;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index f2f482b..76afcb4 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1123,6 +1123,33 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
}
#endif
+static inline unsigned long get_vm_size(struct vm_area_struct *vma)
+{
+ return vma->vm_end - vma->vm_start;
+}
+
+static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
+{
+ return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
+}
+
+/*
+ * Set a new vm offset.
+ *
+ * Verify that the incoming offset really works as a page offset,
+ * and that the offset and size fit in a resource_size_t.
+ */
+static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
+{
+ pgoff_t pgoff = off >> PAGE_SHIFT;
+ if (off != (resource_size_t) pgoff << PAGE_SHIFT)
+ return -EINVAL;
+ if (off + get_vm_size(vma) - 1 < off)
+ return -EINVAL;
+ vma->vm_pgoff = pgoff;
+ return 0;
+}
+
/*
* set up a mapping for shared memory segments
*/
@@ -1132,20 +1159,33 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
struct map_info *map = mtd->priv;
- unsigned long start;
- unsigned long off;
- u32 len;
-
- if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
- off = vma->vm_pgoff << PAGE_SHIFT;
+ resource_size_t start, off;
+ unsigned long len, vma_len;
+
+ /* This is broken because it assumes the MTD device is map-based
+ and that mtd->priv is a valid struct map_info. It should be
+ replaced with something that uses the mtd_get_unmapped_area()
+ operation properly. */
+ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
+ off = get_vm_offset(vma);
start = map->phys;
len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
start &= PAGE_MASK;
- if ((vma->vm_end - vma->vm_start + off) > len)
+ vma_len = get_vm_size(vma);
+
+ /* Overflow in off+len? */
+ if (vma_len + off < off)
+ return -EINVAL;
+ /* Does it fit in the mapping? */
+ if (vma_len + off > len)
return -EINVAL;
off += start;
- vma->vm_pgoff = off >> PAGE_SHIFT;
+ /* Did that overflow? */
+ if (off < start)
+ return -EINVAL;
+ if (set_vm_offset(vma, off) < 0)
+ return -EINVAL;
vma->vm_flags |= VM_IO | VM_RESERVED;
#ifdef pgprot_noncached
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index df49ce2..978af21 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3483,6 +3483,30 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
}
}
+static void rtl_speed_down(struct rtl8169_private *tp)
+{
+ u32 adv;
+ int lpa;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ lpa = rtl_readphy(tp, MII_LPA);
+
+ if (lpa & (LPA_10HALF | LPA_10FULL))
+ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
+ else if (lpa & (LPA_100HALF | LPA_100FULL))
+ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
+ else
+ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+ (tp->mii.supports_gmii ?
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full : 0);
+
+ rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
+ adv);
+}
+
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -3508,9 +3532,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
return false;
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, 0x0000);
-
+ rtl_speed_down(tp);
rtl_wol_suspend_quirk(tp);
return true;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 101b28e..58e6183 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
linkrate = phy->linkrate;
memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
+ /* Handle vacant phy - rest of dr data is not valid so skip it */
+ if (phy->phy_state == PHY_VACANT) {
+ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ phy->attached_dev_type = NO_DEVICE;
+ if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
+ phy->phy_id = phy_id;
+ goto skip;
+ } else
+ goto out;
+ }
+
phy->attached_dev_type = to_dev_type(dr);
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
goto out;
@@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
+ skip:
if (new_phy)
if (sas_phy_add(phy->phy)) {
sas_phy_free(phy->phy);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index f30e124..aa3c106 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -392,6 +392,7 @@ static inline int core_alua_state_standby(
case REPORT_LUNS:
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
+ return 0;
case MAINTENANCE_IN:
switch (cdb[1]) {
case MI_REPORT_TARGET_PGS:
@@ -434,6 +435,7 @@ static inline int core_alua_state_unavailable(
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
+ return 0;
case MAINTENANCE_IN:
switch (cdb[1]) {
case MI_REPORT_TARGET_PGS:
@@ -474,6 +476,7 @@ static inline int core_alua_state_transition(
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
+ return 0;
case MAINTENANCE_IN:
switch (cdb[1]) {
case MI_REPORT_TARGET_PGS:
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f771e9f..9cc574c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1576,14 +1576,24 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
break;
case Opt_blank_pass:
- vol->password = NULL;
- break;
- case Opt_pass:
/* passwords have to be handled differently
* to allow the character used for deliminator
* to be passed within them
*/
+ /*
+ * Check if this is a case where the password
+ * starts with a delimiter
+ */
+ tmp_end = strchr(data, '=');
+ tmp_end++;
+ if (!(tmp_end < end && tmp_end[1] == delim)) {
+ /* No it is not. Set the password to NULL */
+ vol->password = NULL;
+ break;
+ }
+ /* Yes it is. Drop down to Opt_pass below.*/
+ case Opt_pass:
/* Obtain the value string */
value = strchr(data, '=');
value++;
diff --git a/fs/inode.c b/fs/inode.c
index 9f4f5fe..8de457e 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -705,7 +705,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
* inode to the back of the list so we don't spin on it.
*/
if (!spin_trylock(&inode->i_lock)) {
- list_move_tail(&inode->i_lru, &sb->s_inode_lru);
+ list_move(&inode->i_lru, &sb->s_inode_lru);
continue;
}
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c685e31..c3ae144 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
u64 this_clock, remote_clock;
u64 *ptr, old_val, val;
+#if BITS_PER_LONG != 64
+again:
+ /*
+ * Careful here: The local and the remote clock values need to
+ * be read out atomic as we need to compare the values and
+ * then update either the local or the remote side. So the
+ * cmpxchg64 below only protects one readout.
+ *
+ * We must reread via sched_clock_local() in the retry case on
+ * 32bit as an NMI could use sched_clock_local() via the
+ * tracer and hit between the readout of
+ * the low32bit and the high 32bit portion.
+ */
+ this_clock = sched_clock_local(my_scd);
+ /*
+ * We must enforce atomic readout on 32bit, otherwise the
+ * update on the remote cpu can hit inbetween the readout of
+ * the low32bit and the high 32bit portion.
+ */
+ remote_clock = cmpxchg64(&scd->clock, 0, 0);
+#else
+ /*
+ * On 64bit the read of [my]scd->clock is atomic versus the
+ * update, so we can avoid the above 32bit dance.
+ */
sched_clock_local(my_scd);
again:
this_clock = my_scd->clock;
remote_clock = scd->clock;
+#endif
/*
* Use the opportunity that we have both locks
diff --git a/kernel/sys.c b/kernel/sys.c
index b0003db..6a74b83 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -320,7 +320,6 @@ void kernel_restart_prepare(char *cmd)
system_state = SYSTEM_RESTART;
usermodehelper_disable();
device_shutdown();
- syscore_shutdown();
}
/**
@@ -366,6 +365,7 @@ void kernel_restart(char *cmd)
{
kernel_restart_prepare(cmd);
disable_nonboot_cpus();
+ syscore_shutdown();
if (!cmd)
printk(KERN_EMERG "Restarting system.\n");
else
@@ -391,6 +391,7 @@ static void kernel_shutdown_prepare(enum system_states state)
void kernel_halt(void)
{
kernel_shutdown_prepare(SYSTEM_HALT);
+ disable_nonboot_cpus();
syscore_shutdown();
printk(KERN_EMERG "System halted.\n");
kmsg_dump(KMSG_DUMP_HALT);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index e539dfa..a3c1dd9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -642,7 +642,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
free_page(tmp);
}
- free_page((unsigned long)stat->pages);
stat->pages = NULL;
stat->start = NULL;
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index c91fb2f..77758542 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -1082,6 +1082,8 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = {
{ "ROP", NULL, "Right Speaker PGA" },
{ "RON", NULL, "Right Speaker PGA" },
+ { "Charge Pump", NULL, "CLK_DSP" },
+
{ "Left Headphone Output PGA", NULL, "Charge Pump" },
{ "Right Headphone Output PGA", NULL, "Charge Pump" },
{ "Left Line Output PGA", NULL, "Charge Pump" },
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 38a607a..fb95069 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -396,7 +396,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
else
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0, cpu_to_le16(wIndex),
+ 0, wIndex,
&tmp, sizeof(tmp), 1000);
up_read(&mixer->chip->shutdown_rwsem);
@@ -427,7 +427,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
else
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- cpu_to_le16(wValue), cpu_to_le16(wIndex),
+ wValue, wIndex,
NULL, 0, 1000);
up_read(&mixer->chip->shutdown_rwsem);
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index c46171a..b7fa802 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -486,7 +486,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
{
int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- cpu_to_le16(1), 0, NULL, 0, 1000);
+ 1, 0, NULL, 0, 1000);
if (ret < 0)
return ret;

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,909 @@
diff --git a/Makefile b/Makefile
index c156161..0ec4a35 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 44
+SUBLEVEL = 45
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index d81f994..762f7a6 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -45,6 +45,10 @@
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
+#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
+#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
#define PPC_INST_SLBFEE 0x7c0007a7
#define PPC_INST_STRING 0x7c00042a
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index ae0843f..3bb7197 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -960,7 +960,10 @@ static int emulate_instruction(struct pt_regs *regs)
#ifdef CONFIG_PPC64
/* Emulate the mfspr rD, DSCR. */
- if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) &&
+ if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
+ PPC_INST_MFSPR_DSCR_USER) ||
+ ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
+ PPC_INST_MFSPR_DSCR)) &&
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mfdscr, regs);
rd = (instword >> 21) & 0x1f;
@@ -968,7 +971,10 @@ static int emulate_instruction(struct pt_regs *regs)
return 0;
}
/* Emulate the mtspr DSCR, rD. */
- if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) &&
+ if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
+ PPC_INST_MTSPR_DSCR_USER) ||
+ ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
+ PPC_INST_MTSPR_DSCR)) &&
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mtdscr, regs);
rd = (instword >> 21) & 0x1f;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 6e8f677..6130719 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -201,7 +201,7 @@ int __node_distance(int a, int b)
int distance = LOCAL_DISTANCE;
if (!form1_affinity)
- return distance;
+ return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
for (i = 0; i < distance_ref_points_depth; i++) {
if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 520b426..fd1a099 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -310,7 +310,7 @@ void intel_pmu_lbr_read(void)
* - in case there is no HW filter
* - in case the HW filter has errata or limitations
*/
-static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
+static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
{
u64 br_type = event->attr.branch_sample_type;
int mask = 0;
@@ -318,8 +318,11 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
if (br_type & PERF_SAMPLE_BRANCH_USER)
mask |= X86_BR_USER;
- if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
+ if (br_type & PERF_SAMPLE_BRANCH_KERNEL) {
+ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
mask |= X86_BR_KERNEL;
+ }
/* we ignore BRANCH_HV here */
@@ -339,6 +342,8 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
* be used by fixup code for some CPU
*/
event->hw.branch_reg.reg = mask;
+
+ return 0;
}
/*
@@ -386,7 +391,9 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event)
/*
* setup SW LBR filter
*/
- intel_pmu_setup_sw_lbr_filter(event);
+ ret = intel_pmu_setup_sw_lbr_filter(event);
+ if (ret)
+ return ret;
/*
* setup HW LBR filter, if any
@@ -442,8 +449,18 @@ static int branch_type(unsigned long from, unsigned long to)
return X86_BR_NONE;
addr = buf;
- } else
- addr = (void *)from;
+ } else {
+ /*
+ * The LBR logs any address in the IP, even if the IP just
+ * faulted. This means userspace can control the from address.
+ * Ensure we don't blindy read any address by validating it is
+ * a known text address.
+ */
+ if (kernel_text_address(from))
+ addr = (void *)from;
+ else
+ return X86_BR_NONE;
+ }
/*
* decoder needs to know the ABI especially
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index fae7090..71d37f5 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -45,11 +45,15 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
int i;
unsigned long puds = 0, pmds = 0, ptes = 0, tables;
unsigned long start = 0, good_end;
+ unsigned long pgd_extra = 0;
phys_addr_t base;
for (i = 0; i < nr_range; i++) {
unsigned long range, extra;
+ if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT))
+ pgd_extra++;
+
range = mr[i].end - mr[i].start;
puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
@@ -74,6 +78,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
+ tables += (pgd_extra * PAGE_SIZE);
#ifdef CONFIG_X86_32
/* for fixmap */
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 53ddbc7..0bf5bd1 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -77,7 +77,7 @@ static unsigned int longhaul_index;
static int scale_voltage;
static int disable_acpi_c3;
static int revid_errata;
-
+static int enable;
/* Clock ratios multiplied by 10 */
static int mults[32];
@@ -965,6 +965,10 @@ static int __init longhaul_init(void)
if (!x86_match_cpu(longhaul_id))
return -ENODEV;
+ if (!enable) {
+ printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n");
+ return -ENODEV;
+ }
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
printk(KERN_ERR PFX "More than 1 CPU detected, "
@@ -1021,6 +1025,10 @@ MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
* such. */
module_param(revid_errata, int, 0644);
MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
+/* By default driver is disabled to prevent incompatible
+ * system freeze. */
+module_param(enable, int, 0644);
+MODULE_PARM_DESC(enable, "Enable driver");
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 020a7d7..69bea56 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -370,6 +370,7 @@ void intel_dvo_init(struct drm_device *dev)
const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
struct i2c_adapter *i2c;
int gpio;
+ bool dvoinit;
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
@@ -389,7 +390,17 @@ void intel_dvo_init(struct drm_device *dev)
i2c = &dev_priv->gmbus[gpio].adapter;
intel_dvo->dev = *dvo;
- if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
+
+ /* GMBUS NAK handling seems to be unstable, hence let the
+ * transmitter detection run in bit banging mode for now.
+ */
+ intel_gmbus_force_bit(i2c, true);
+
+ dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
+
+ intel_gmbus_force_bit(i2c, false);
+
+ if (!dvoinit)
continue;
intel_encoder->type = INTEL_OUTPUT_DVO;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index a8b28c4..1ad5906 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -793,6 +793,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Fujitsu Esprimo Q900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 5ce9bf5..43672b6 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1389,10 +1389,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb\n",
- firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
- firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
+ le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
+ le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
- usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index c62132c..e458acb 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -445,6 +445,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ continue;
+ }
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
@@ -1146,17 +1156,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
- tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
- tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
@@ -1168,6 +1177,15 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
} else {
save->crtc_enabled[i] = false;
}
@@ -1185,6 +1203,22 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
}
/* wait for the MC to settle */
udelay(100);
+
+ /* lock double buffered regs */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
+ tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (!(tmp & 1)) {
+ tmp |= 1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ }
+ }
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1206,6 +1240,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+ /* unlock regs and wait for update */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
/* unblackout the MC */
tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
tmp &= ~BLACKOUT_MODE_MASK;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 34a0e85..e534e5d 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -225,6 +225,8 @@
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
+#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
+#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7dffc57..d706da8 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -668,7 +668,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x990F) ||
(rdev->pdev->device == 0x9910) ||
(rdev->pdev->device == 0x9917) ||
- (rdev->pdev->device == 0x9999)) {
+ (rdev->pdev->device == 0x9999) ||
+ (rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9903) ||
@@ -677,7 +678,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x990D) ||
(rdev->pdev->device == 0x990E) ||
(rdev->pdev->device == 0x9913) ||
- (rdev->pdev->device == 0x9918)) {
+ (rdev->pdev->device == 0x9918) ||
+ (rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9919) ||
@@ -911,6 +913,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ if (ASIC_IS_DCE6(rdev))
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
/* primary versions */
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2aa7046..d90b8b7 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -42,6 +42,10 @@
#define CAYMAN_MAX_TCC_MASK 0xFF
#define DMIF_ADDR_CONFIG 0xBD4
+
+/* DCE6 only */
+#define DMIF_ADDR_CALC 0xC00
+
#define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5e30e12..38d87e1 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1998,6 +1998,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
num_modes = power_info->info.ucNumOfPowerModeEntries;
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ if (num_modes == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
@@ -2396,6 +2398,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ if (power_info->pplib.ucNumStates == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
power_info->pplib.ucNumStates, GFP_KERNEL);
if (!rdev->pm.power_state)
@@ -2478,6 +2482,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
+ u8 *power_state_offset;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
@@ -2494,15 +2499,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_info_array = (struct _NonClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+ if (state_array->ucNumEntries == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
state_array->ucNumEntries, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
+ power_state_offset = (u8 *)state_array->states;
for (i = 0; i < state_array->ucNumEntries; i++) {
mode_index = 0;
- power_state = (union pplib_power_state *)&state_array->states[i];
- /* XXX this might be an inagua bug... */
- non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
@@ -2514,9 +2521,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
if (power_state->v2.ucNumDPMLevels) {
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
clock_array_index = power_state->v2.clockInfoIndex[j];
- /* XXX this might be an inagua bug... */
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
clock_info = (union pplib_clock_info *)
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
valid = radeon_atombios_parse_pplib_clock_info(rdev,
@@ -2538,6 +2542,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_info);
state_index++;
}
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
}
/* if multiple clock modes, mark the lowest as no display */
for (i = 0; i < state_index; i++) {
@@ -2584,7 +2589,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
default:
break;
}
- } else {
+ }
+
+ if (state_index == 0) {
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
if (rdev->pm.power_state) {
rdev->pm.power_state[0].clock_info =
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index b8459bd..bf6ca2d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -872,7 +872,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct radeon_device *rdev = dev->dev_private;
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
- seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
+ if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
+ seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
+ else
+ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
if (rdev->asic->pm.get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 1197f21..5508ad7 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1799,6 +1799,7 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.backend_map = gb_backend_map;
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
/* primary versions */
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 2c2bc63..45e240d 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -55,6 +55,8 @@
#define DMIF_ADDR_CONFIG 0xBD4
+#define DMIF_ADDR_CALC 0xC00
+
#define SRBM_STATUS 0xE50
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b68d28a..33a1760 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1327,7 +1327,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
static int __devinit ibmveth_probe(struct vio_dev *dev,
const struct vio_device_id *id)
{
- int rc, i;
+ int rc, i, mac_len;
struct net_device *netdev;
struct ibmveth_adapter *adapter;
unsigned char *mac_addr_p;
@@ -1337,11 +1337,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
dev->unit_address);
mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
- NULL);
+ &mac_len);
if (!mac_addr_p) {
dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
return -EINVAL;
}
+ /* Workaround for old/broken pHyp */
+ if (mac_len == 8)
+ mac_addr_p += 2;
+ else if (mac_len != 6) {
+ dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
+ mac_len);
+ return -EINVAL;
+ }
mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
VETH_MCAST_FILTER_SIZE, NULL);
@@ -1366,17 +1374,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
- /*
- * Some older boxes running PHYP non-natively have an OF that returns
- * a 8-byte local-mac-address field (and the first 2 bytes have to be
- * ignored) while newer boxes' OF return a 6-byte field. Note that
- * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
- * The RPA doc specifies that the first byte must be 10b, so we'll
- * just look for it to solve this 8 vs. 6 byte field issue
- */
- if ((*mac_addr_p & 0x3) != 0x02)
- mac_addr_p += 2;
-
adapter->mac_addr = 0;
memcpy(&adapter->mac_addr, mac_addr_p, 6);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 978af21..dd037dd 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5168,6 +5168,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
goto err_stop_0;
}
+ /* 8168evl does not automatically pad to minimum length. */
+ if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
+ skb->len < ETH_ZLEN)) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto err_update_stats;
+ skb_put(skb, ETH_ZLEN - skb->len);
+ }
+
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
goto err_stop_0;
@@ -5239,6 +5247,7 @@ err_dma_1:
rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
err_dma_0:
dev_kfree_skb(skb);
+err_update_stats:
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a3c9374..843b3e8 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2459,14 +2459,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* TD list.
*/
if (list_empty(&ep_ring->td_list)) {
- xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
- "with no TDs queued?\n",
- TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
- ep_index);
- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
- (le32_to_cpu(event->flags) &
- TRB_TYPE_BITMASK)>>10);
- xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+ /*
+ * A stopped endpoint may generate an extra completion
+ * event if the device was suspended. Don't print
+ * warnings.
+ */
+ if (!(trb_comp_code == COMP_STOP ||
+ trb_comp_code == COMP_STOP_INVAL)) {
+ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
+ TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+ ep_index);
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+ (le32_to_cpu(event->flags) &
+ TRB_TYPE_BITMASK)>>10);
+ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+ }
if (ep->skip) {
ep->skip = false;
xhci_dbg(xhci, "td_list is empty while skip "
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 1feb68e..b1cdb0a 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -61,15 +61,6 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
/* This is an autofs submount, we can't expire it */
if (autofs_type_indirect(sbi->type))
goto done;
-
- /*
- * Otherwise it's an offset mount and we need to check
- * if we can umount its mount, if there is one.
- */
- if (!d_mountpoint(path.dentry)) {
- status = 0;
- goto done;
- }
}
/* Update the expiry counter if fs is busy */
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 9202b22..86a8c88 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1630,6 +1630,10 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
return 0;
ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &offset);
+ if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
+ ext4_warning(sb, "resize would cause inodes_count overflow");
+ return -EINVAL;
+ }
ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
n_desc_blocks = (n_group + EXT4_DESC_PER_BLOCK(sb)) /
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index d9928c1..1a13caa 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -231,6 +231,7 @@
{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -238,11 +239,13 @@
{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
@@ -594,6 +597,8 @@
{0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 31fdc48..0caf1f8 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -608,9 +608,9 @@ void audit_trim_trees(void)
}
spin_unlock(&hash_lock);
trim_marked(tree);
- put_tree(tree);
drop_collected_mounts(root_mnt);
skip_it:
+ put_tree(tree);
mutex_lock(&audit_filter_mutex);
}
list_del(&cursor);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b29ebd3..75c11bf 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4855,36 +4855,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
iter->cpu_file = TRACE_PIPE_ALL_CPU;
}
-static void
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
{
- static arch_spinlock_t ftrace_dump_lock =
- (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
+ static atomic_t dump_running;
unsigned int old_userobj;
- static int dump_ran;
unsigned long flags;
int cnt = 0, cpu;
- /* only one dump */
- local_irq_save(flags);
- arch_spin_lock(&ftrace_dump_lock);
- if (dump_ran)
- goto out;
-
- dump_ran = 1;
+ /* Only allow one dump user at a time. */
+ if (atomic_inc_return(&dump_running) != 1) {
+ atomic_dec(&dump_running);
+ return;
+ }
+ /*
+ * Always turn off tracing when we dump.
+ * We don't need to show trace output of what happens
+ * between multiple crashes.
+ *
+ * If the user does a sysrq-z, then they can re-enable
+ * tracing with echo 1 > tracing_on.
+ */
tracing_off();
- /* Did function tracer already get disabled? */
- if (ftrace_is_dead()) {
- printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
- printk("# MAY BE MISSING FUNCTION EVENTS\n");
- }
-
- if (disable_tracing)
- ftrace_kill();
+ local_irq_save(flags);
trace_init_global_iter(&iter);
@@ -4917,6 +4913,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
printk(KERN_TRACE "Dumping ftrace buffer:\n");
+ /* Did function tracer already get disabled? */
+ if (ftrace_is_dead()) {
+ printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+ printk("# MAY BE MISSING FUNCTION EVENTS\n");
+ }
+
/*
* We need to stop all tracing on all CPUS to read the
* the next buffer. This is a bit expensive, but is
@@ -4956,26 +4958,14 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
printk(KERN_TRACE "---------------------------------\n");
out_enable:
- /* Re-enable tracing if requested */
- if (!disable_tracing) {
- trace_flags |= old_userobj;
+ trace_flags |= old_userobj;
- for_each_tracing_cpu(cpu) {
- atomic_dec(&iter.tr->data[cpu]->disabled);
- }
- tracing_on();
+ for_each_tracing_cpu(cpu) {
+ atomic_dec(&iter.tr->data[cpu]->disabled);
}
-
- out:
- arch_spin_unlock(&ftrace_dump_lock);
+ atomic_dec(&dump_running);
local_irq_restore(flags);
}
-
-/* By default: disable tracing after the dump */
-void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
-{
- __ftrace_dump(true, oops_dump_mode);
-}
EXPORT_SYMBOL_GPL(ftrace_dump);
__init static int tracer_alloc_buffers(void)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 288541f..09fd98a 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -461,8 +461,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* Maximum number of functions to trace before diagnosing a hang */
#define GRAPH_MAX_FUNC_TEST 100000000
-static void
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
static unsigned int graph_hang_thresh;
/* Wrap the real function entry probe to avoid possible hanging */
@@ -472,8 +470,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
ftrace_graph_stop();
printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
- if (ftrace_dump_on_oops)
- __ftrace_dump(false, DUMP_ALL);
+ if (ftrace_dump_on_oops) {
+ ftrace_dump(DUMP_ALL);
+ /* ftrace_dump() disables tracing */
+ tracing_on();
+ }
return 0;
}
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 1aa5cac..55add93 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -37,14 +37,10 @@ static int get_callid(const char *dptr, unsigned int dataoff,
if (ret > 0)
break;
if (!ret)
- return 0;
+ return -EINVAL;
dataoff += *matchoff;
}
- /* Empty callid is useless */
- if (!*matchlen)
- return -EINVAL;
-
/* Too large is useless */
if (*matchlen > IP_VS_PEDATA_MAXLEN)
return -EINVAL;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,143 @@
diff --git a/Makefile b/Makefile
index 3d88eb8..a85d4eb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 46
+SUBLEVEL = 47
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e458acb..6a8776e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2443,8 +2443,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
} else {
/* size in MB on evergreen/cayman/tn */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
}
rdev->mc.visible_vram_size = rdev->mc.aper_size;
r700_vram_gtt_location(rdev, &rdev->mc);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f493c64..49b55ed 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -744,7 +744,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
return r;
}
DRM_INFO("radeon: %uM of VRAM memory ready\n",
- (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
+ (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
rdev->mc.gtt_size >> PAGE_SHIFT);
if (r) {
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 5508ad7..2dbd585 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2464,8 +2464,8 @@ static int si_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* size in MB on si */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
si_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index a72bf25..56b178a 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1410,14 +1410,18 @@ static int __devinit abituguru_probe(struct platform_device *pdev)
pr_info("found Abit uGuru\n");
/* Register sysfs hooks */
- for (i = 0; i < sysfs_attr_i; i++)
- if (device_create_file(&pdev->dev,
- &data->sysfs_attr[i].dev_attr))
+ for (i = 0; i < sysfs_attr_i; i++) {
+ res = device_create_file(&pdev->dev,
+ &data->sysfs_attr[i].dev_attr);
+ if (res)
goto abituguru_probe_error;
- for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
- if (device_create_file(&pdev->dev,
- &abituguru_sysfs_attr[i].dev_attr))
+ }
+ for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) {
+ res = device_create_file(&pdev->dev,
+ &abituguru_sysfs_attr[i].dev_attr);
+ if (res)
goto abituguru_probe_error;
+ }
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (!IS_ERR(data->hwmon_dev))
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 6193349..3c2812f 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -349,7 +349,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
/* Enable the adapter */
dw_writel(dev, 1, DW_IC_ENABLE);
- /* Enable interrupts */
+ /* Clear and enable interrupts */
+ i2c_dw_clear_int(dev);
dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
}
diff --git a/drivers/media/dvb/mantis/mantis_dvb.c b/drivers/media/dvb/mantis/mantis_dvb.c
index e5180e4..5d15c6b 100644
--- a/drivers/media/dvb/mantis/mantis_dvb.c
+++ b/drivers/media/dvb/mantis/mantis_dvb.c
@@ -248,8 +248,10 @@ int __devinit mantis_dvb_init(struct mantis_pci *mantis)
err5:
tasklet_kill(&mantis->tasklet);
dvb_net_release(&mantis->dvbnet);
- dvb_unregister_frontend(mantis->fe);
- dvb_frontend_detach(mantis->fe);
+ if (mantis->fe) {
+ dvb_unregister_frontend(mantis->fe);
+ dvb_frontend_detach(mantis->fe);
+ }
err4:
mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 14f8e1f..3a65f43 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1653,7 +1653,11 @@ static noinline int copy_to_sk(struct btrfs_root *root,
item_off = btrfs_item_ptr_offset(leaf, i);
item_len = btrfs_item_size_nr(leaf, i);
- if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
+ btrfs_item_key_to_cpu(leaf, key, i);
+ if (!key_in_sk(key, sk))
+ continue;
+
+ if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
item_len = 0;
if (sizeof(sh) + item_len + *sk_offset >
@@ -1662,10 +1666,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
goto overflow;
}
- btrfs_item_key_to_cpu(leaf, key, i);
- if (!key_in_sk(key, sk))
- continue;
-
sh.objectid = key->objectid;
sh.offset = key->offset;
sh.type = key->type;

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,953 @@
diff --git a/Makefile b/Makefile
index 6dd1ffb..5adfadf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 50
+SUBLEVEL = 51
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index 2a81d32..e51e5cd 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -90,4 +90,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int);
EXPORT_SYMBOL(__ashrdi3);
uint64_t __ashldi3(uint64_t, unsigned int);
EXPORT_SYMBOL(__ashldi3);
+int __ffsdi2(uint64_t);
+EXPORT_SYMBOL(__ffsdi2);
#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b27b452..3663e0b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -555,8 +555,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
if (index != XCR_XFEATURE_ENABLED_MASK)
return 1;
xcr0 = xcr;
- if (kvm_x86_ops->get_cpl(vcpu) != 0)
- return 1;
if (!(xcr0 & XSTATE_FP))
return 1;
if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
@@ -570,7 +568,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
- if (__kvm_set_xcr(vcpu, index, xcr)) {
+ if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
+ __kvm_set_xcr(vcpu, index, xcr)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 7f1ea56..4c63665 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1453,6 +1453,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
/* XXX the notifier code should handle this better */
if (!cn->notifier_head.head) {
srcu_cleanup_notifier_head(&cn->notifier_head);
+ list_del(&cn->node);
kfree(cn);
}
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index f030d9e..3f505d5 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -133,7 +133,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts)
memcpy(bl_cmd, bl_command, sizeof(bl_command));
if (ts->pdata->bl_keys)
memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
- ts->pdata->bl_keys, sizeof(bl_command));
+ ts->pdata->bl_keys, CY_NUM_BL_KEYS);
error = ttsp_write_block_data(ts, CY_REG_BASE,
sizeof(bl_cmd), bl_cmd);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 2e1f806..b6ed7e9 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -704,6 +704,12 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
struct arp_pkt *arp = arp_pkt(skb);
struct slave *tx_slave = NULL;
+ /* Don't modify or load balance ARPs that do not originate locally
+ * (e.g.,arrive via a bridge).
+ */
+ if (!bond_slave_has_mac(bond, arp->mac_src))
+ return NULL;
+
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected
* rx channel
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4581aa5..51f1766 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -18,6 +18,7 @@
#include <linux/timer.h>
#include <linux/proc_fs.h>
#include <linux/if_bonding.h>
+#include <linux/etherdevice.h>
#include <linux/cpumask.h>
#include <linux/in6.h>
#include <linux/netpoll.h>
@@ -450,6 +451,18 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
}
#endif
+static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+ const u8 *mac)
+{
+ int i = 0;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, i)
+ if (!compare_ether_addr_64bits(mac, tmp->dev->dev_addr))
+ return tmp;
+
+ return NULL;
+}
/* exported from bond_main.c */
extern int bond_net_id;
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index ca2748a..8de54e5 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -520,6 +520,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
return 0;
no_clock:
+ iounmap(etsects->regs);
no_ioremap:
release_resource(etsects->rsrc);
no_resource:
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index b3287c0..4ce981c 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1097,6 +1097,7 @@ static void cp_clean_rings (struct cp_private *cp)
cp->dev->stats.tx_dropped++;
}
}
+ netdev_reset_queue(cp->dev);
memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index cf20388..2a59e7a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5126,7 +5126,20 @@ err_out:
return -EIO;
}
-static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
+static bool rtl_skb_pad(struct sk_buff *skb)
+{
+ if (skb_padto(skb, ETH_ZLEN))
+ return false;
+ skb_put(skb, ETH_ZLEN - skb->len);
+ return true;
+}
+
+static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
+{
+ return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
+}
+
+static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
struct sk_buff *skb, u32 *opts)
{
const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
@@ -5139,13 +5152,20 @@ static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = ip_hdr(skb);
+ if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
+ return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
+
if (ip->protocol == IPPROTO_TCP)
opts[offset] |= info->checksum.tcp;
else if (ip->protocol == IPPROTO_UDP)
opts[offset] |= info->checksum.udp;
else
WARN_ON_ONCE(1);
+ } else {
+ if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
+ return rtl_skb_pad(skb);
}
+ return true;
}
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -5166,17 +5186,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
goto err_stop_0;
}
- /* 8168evl does not automatically pad to minimum length. */
- if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
- skb->len < ETH_ZLEN)) {
- if (skb_padto(skb, ETH_ZLEN))
- goto err_update_stats;
- skb_put(skb, ETH_ZLEN - skb->len);
- }
-
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
goto err_stop_0;
+ opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
+ opts[0] = DescOwn;
+
+ if (!rtl8169_tso_csum(tp, skb, opts))
+ goto err_update_stats;
+
len = skb_headlen(skb);
mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
@@ -5188,11 +5206,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->tx_skb[entry].len = len;
txd->addr = cpu_to_le64(mapping);
- opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
- opts[0] = DescOwn;
-
- rtl8169_tso_csum(tp, skb, opts);
-
frags = rtl8169_xmit_frags(tp, skb, opts);
if (frags < 0)
goto err_dma_1;
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index a0e8f80..bf6a818 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -52,6 +52,8 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
port_index = rr_priv(team)->sent_packets++ % team->port_count;
port = team_get_port_by_index_rcu(team, port_index);
+ if (unlikely(!port))
+ goto drop;
port = __get_first_port_up(team, port);
if (unlikely(!port))
goto drop;
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 1ab0560..a7c4324 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -831,11 +831,11 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
return 0;
sess->time2retain_timer_flags |= ISCSI_TF_STOP;
- spin_unlock_bh(&se_tpg->session_lock);
+ spin_unlock(&se_tpg->session_lock);
del_timer_sync(&sess->time2retain_timer);
- spin_lock_bh(&se_tpg->session_lock);
+ spin_lock(&se_tpg->session_lock);
sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
pr_debug("Stopped Time2Retain Timer for SID: %u\n",
sess->sid);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 3377437..a39a08c 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -179,7 +179,8 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
- { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
+ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
};
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index b353e7e..4a2423e 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -52,7 +52,9 @@
/* Abbott Diabetics vendor and product ids */
#define ABBOTT_VENDOR_ID 0x1a61
-#define ABBOTT_PRODUCT_ID 0x3410
+#define ABBOTT_STEREO_PLUG_ID 0x3410
+#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
+#define ABBOTT_STRIP_PORT_ID 0x3420
/* Commands */
#define TI_GET_VERSION 0x01
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 6f292dd..f255d37 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -577,7 +577,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
int add = (arg & IN_MASK_ADD);
int ret;
- /* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
fsn_mark = fsnotify_find_inode_mark(group, inode);
@@ -628,7 +627,6 @@ static int inotify_new_watch(struct fsnotify_group *group,
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
- /* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
@@ -757,6 +755,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
int ret, fput_needed;
unsigned flags = 0;
+ /* don't allow invalid bits: we don't want flags set */
+ if (unlikely(!(mask & ALL_INOTIFY_BITS)))
+ return -EINVAL;
+
filp = fget_light(fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 2ae1371..1c33dd7 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -105,9 +105,14 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
* @head: the head for your list.
* @member: the name of the hlist_nulls_node within the struct.
*
+ * The barrier() is needed to make sure compiler doesn't cache first element [1],
+ * as this loop can be restarted [2]
+ * [1] Documentation/atomic_ops.txt around line 114
+ * [2] Documentation/RCU/rculist_nulls.txt around line 146
*/
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
- for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
+ for (({barrier();}), \
+ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
(!is_a_nulls(pos)) && \
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 8f15b1d..9b54ebe 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -336,6 +336,9 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
struct timespec;
+/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
+extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
+extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
unsigned int flags, struct timespec *timeout);
extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
diff --git a/net/compat.c b/net/compat.c
index ae6d67a..014e1c7 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -743,19 +743,25 @@ static unsigned char nas[21] = {
asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags)
{
- return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+ return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags)
{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
{
- return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+ return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags)
@@ -777,6 +783,9 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
int datagrams;
struct timespec ktspec;
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+
if (COMPAT_USE_64BIT_TIME)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT,
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index b57532d..a16509c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -722,6 +722,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
tiph = &tunnel->parms.iph;
}
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
if ((dst = tiph->daddr) == 0) {
/* NBMA tunnel */
@@ -865,7 +866,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
skb_reset_transport_header(skb);
skb_push(skb, gre_hlen);
skb_reset_network_header(skb);
- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
IPSKB_REROUTED);
skb_dst_drop(skb);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ae1413e..d2f6348 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -448,6 +448,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (tos & 1)
tos = old_iph->tos;
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
if (!dst) {
/* NBMA tunnel */
if ((rt = skb_rtable(skb)) == NULL) {
@@ -530,7 +531,6 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
skb->transport_header = skb->network_header;
skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
IPSKB_REROUTED);
skb_dst_drop(skb);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index dcb19f5..0b91c30 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3055,8 +3055,11 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
for (i = 0; i < shi->nr_frags; ++i) {
const struct skb_frag_struct *f = &shi->frags[i];
- struct page *page = skb_frag_page(f);
- sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
+ unsigned int offset = f->page_offset;
+ struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
+
+ sg_set_page(&sg, page, skb_frag_size(f),
+ offset_in_page(offset));
if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
return 1;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 762c78f..55d96c3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3038,8 +3038,8 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
* tcp_xmit_retransmit_queue().
*/
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
- int prior_sacked, bool is_dupack,
- int flag)
+ int prior_sacked, int prior_packets,
+ bool is_dupack, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -3105,7 +3105,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
tcp_add_reno_sack(sk);
} else
do_lost = tcp_try_undo_partial(sk, pkts_acked);
- newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
+ newly_acked_sacked = prior_packets - tp->packets_out +
+ tp->sacked_out - prior_sacked;
break;
case TCP_CA_Loss:
if (flag & FLAG_DATA_ACKED)
@@ -3127,7 +3128,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
if (is_dupack)
tcp_add_reno_sack(sk);
}
- newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
+ newly_acked_sacked = prior_packets - tp->packets_out +
+ tp->sacked_out - prior_sacked;
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
tcp_try_undo_dsack(sk);
@@ -3740,9 +3742,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
bool is_dupack = false;
u32 prior_in_flight;
u32 prior_fackets;
- int prior_packets;
+ int prior_packets = tp->packets_out;
int prior_sacked = tp->sacked_out;
int pkts_acked = 0;
+ int previous_packets_out = 0;
int frto_cwnd = 0;
/* If the ack is older than previous acks
@@ -3819,14 +3822,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
sk->sk_err_soft = 0;
icsk->icsk_probes_out = 0;
tp->rcv_tstamp = tcp_time_stamp;
- prior_packets = tp->packets_out;
if (!prior_packets)
goto no_queue;
/* See if we can take anything off of the retransmit queue. */
+ previous_packets_out = tp->packets_out;
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
- pkts_acked = prior_packets - tp->packets_out;
+ pkts_acked = previous_packets_out - tp->packets_out;
if (tp->frto_counter)
frto_cwnd = tcp_process_frto(sk, flag);
@@ -3841,7 +3844,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
- is_dupack, flag);
+ prior_packets, is_dupack, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
tcp_cong_avoid(sk, ack, prior_in_flight);
@@ -3856,7 +3859,7 @@ no_queue:
/* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK)
tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
- is_dupack, flag);
+ prior_packets, is_dupack, flag);
/* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
* it needs to be for normal retransmission.
@@ -3876,7 +3879,7 @@ old_ack:
if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
- is_dupack, flag);
+ prior_packets, is_dupack, flag);
}
SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9db21e3..12999a3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -835,11 +835,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
&md5);
tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
- if (tcp_packets_in_flight(tp) == 0) {
+ if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
- skb->ooo_okay = 1;
- } else
- skb->ooo_okay = 0;
+
+ /* if no packet is in qdisc/device queue, then allow XPS to select
+ * another queue.
+ */
+ skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2c496d6..f4fe3c0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2432,8 +2432,10 @@ static void init_loopback(struct net_device *dev)
sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
/* Failure cases are ignored */
- if (!IS_ERR(sp_rt))
+ if (!IS_ERR(sp_rt)) {
+ sp_ifa->rt = sp_rt;
ip6_ins_rt(sp_rt);
+ }
}
read_unlock_bh(&idev->lock);
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ce661ba..bf290ce 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1236,7 +1236,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
if (WARN_ON(np->cork.opt))
return -EINVAL;
- np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
+ np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
if (unlikely(np->cork.opt == NULL))
return -ENOBUFS;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 9728a75..c6dee80 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -350,19 +350,19 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
skb_put(skb, 2);
/* Copy user data into skb */
- error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
+ error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov,
+ total_len);
if (error < 0) {
kfree_skb(skb);
goto error_put_sess_tun;
}
- skb_put(skb, total_len);
l2tp_xmit_skb(session, skb, session->hdr_len);
sock_put(ps->tunnel_sock);
sock_put(sk);
- return error;
+ return total_len;
error_put_sess_tun:
sock_put(ps->tunnel_sock);
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index d8d4243..6bb1d42 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -245,6 +245,71 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
}
}
+/**
+ * netlbl_domhsh_validate - Validate a new domain mapping entry
+ * @entry: the entry to validate
+ *
+ * This function validates the new domain mapping entry to ensure that it is
+ * a valid entry. Returns zero on success, negative values on failure.
+ *
+ */
+static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
+{
+ struct netlbl_af4list *iter4;
+ struct netlbl_domaddr4_map *map4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct netlbl_af6list *iter6;
+ struct netlbl_domaddr6_map *map6;
+#endif /* IPv6 */
+
+ if (entry == NULL)
+ return -EINVAL;
+
+ switch (entry->type) {
+ case NETLBL_NLTYPE_UNLABELED:
+ if (entry->type_def.cipsov4 != NULL ||
+ entry->type_def.addrsel != NULL)
+ return -EINVAL;
+ break;
+ case NETLBL_NLTYPE_CIPSOV4:
+ if (entry->type_def.cipsov4 == NULL)
+ return -EINVAL;
+ break;
+ case NETLBL_NLTYPE_ADDRSELECT:
+ netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) {
+ map4 = netlbl_domhsh_addr4_entry(iter4);
+ switch (map4->type) {
+ case NETLBL_NLTYPE_UNLABELED:
+ if (map4->type_def.cipsov4 != NULL)
+ return -EINVAL;
+ break;
+ case NETLBL_NLTYPE_CIPSOV4:
+ if (map4->type_def.cipsov4 == NULL)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
+ map6 = netlbl_domhsh_addr6_entry(iter6);
+ switch (map6->type) {
+ case NETLBL_NLTYPE_UNLABELED:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+#endif /* IPv6 */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* Domain Hash Table Functions
*/
@@ -311,6 +376,10 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
struct netlbl_af6list *tmp6;
#endif /* IPv6 */
+ ret_val = netlbl_domhsh_validate(entry);
+ if (ret_val != 0)
+ return ret_val;
+
/* XXX - we can remove this RCU read lock as the spinlock protects the
* entire function, but before we do we need to fixup the
* netlbl_af[4,6]list RCU functions to do "the right thing" with
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index cfcd783..8ed5d93 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2848,12 +2848,11 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
return -EOPNOTSUPP;
uaddr->sa_family = AF_PACKET;
+ memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
if (dev)
- strncpy(uaddr->sa_data, dev->name, 14);
- else
- memset(uaddr->sa_data, 0, 14);
+ strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
rcu_read_unlock();
*uaddr_len = sizeof(*uaddr);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9fd05ed..4bc6e0b 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3929,6 +3929,12 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
/* Release our hold on the endpoint. */
sp = sctp_sk(sk);
+ /* This could happen during socket init, thus we bail out
+ * early, since the rest of the below is not setup either.
+ */
+ if (sp->ep == NULL)
+ return;
+
if (sp->do_auto_asconf) {
sp->do_auto_asconf = 0;
list_del(&sp->auto_asconf_list);
diff --git a/net/socket.c b/net/socket.c
index dab3176..47ce3ea 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1899,9 +1899,9 @@ struct used_address {
unsigned int name_len;
};
-static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
- struct msghdr *msg_sys, unsigned flags,
- struct used_address *used_address)
+static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+ struct msghdr *msg_sys, unsigned flags,
+ struct used_address *used_address)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
@@ -2017,22 +2017,30 @@ out:
* BSD sendmsg interface
*/
-SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
+long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
{
int fput_needed, err;
struct msghdr msg_sys;
- struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ struct socket *sock;
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
- err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
+ err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
fput_light(sock->file, fput_needed);
out:
return err;
}
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
+{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+ return __sys_sendmsg(fd, msg, flags);
+}
+
/*
* Linux sendmmsg interface
*/
@@ -2063,15 +2071,16 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
while (datagrams < vlen) {
if (MSG_CMSG_COMPAT & flags) {
- err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
- &msg_sys, flags, &used_address);
+ err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
+ &msg_sys, flags, &used_address);
if (err < 0)
break;
err = __put_user(err, &compat_entry->msg_len);
++compat_entry;
} else {
- err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
- &msg_sys, flags, &used_address);
+ err = ___sys_sendmsg(sock,
+ (struct msghdr __user *)entry,
+ &msg_sys, flags, &used_address);
if (err < 0)
break;
err = put_user(err, &entry->msg_len);
@@ -2095,11 +2104,13 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags)
{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
return __sys_sendmmsg(fd, mmsg, vlen, flags);
}
-static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
- struct msghdr *msg_sys, unsigned flags, int nosec)
+static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ struct msghdr *msg_sys, unsigned flags, int nosec)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
@@ -2192,23 +2203,31 @@ out:
* BSD recvmsg interface
*/
-SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
- unsigned int, flags)
+long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags)
{
int fput_needed, err;
struct msghdr msg_sys;
- struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ struct socket *sock;
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
- err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0);
+ err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
fput_light(sock->file, fput_needed);
out:
return err;
}
+SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+ unsigned int, flags)
+{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+ return __sys_recvmsg(fd, msg, flags);
+}
+
/*
* Linux recvmmsg interface
*/
@@ -2246,17 +2265,18 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
* No need to ask LSM for more than the first datagram.
*/
if (MSG_CMSG_COMPAT & flags) {
- err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
- &msg_sys, flags & ~MSG_WAITFORONE,
- datagrams);
+ err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
+ &msg_sys, flags & ~MSG_WAITFORONE,
+ datagrams);
if (err < 0)
break;
err = __put_user(err, &compat_entry->msg_len);
++compat_entry;
} else {
- err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
- &msg_sys, flags & ~MSG_WAITFORONE,
- datagrams);
+ err = ___sys_recvmsg(sock,
+ (struct msghdr __user *)entry,
+ &msg_sys, flags & ~MSG_WAITFORONE,
+ datagrams);
if (err < 0)
break;
err = put_user(err, &entry->msg_len);
@@ -2323,6 +2343,9 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
int datagrams;
struct timespec timeout_sys;
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+
if (!timeout)
return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL);
diff --git a/sound/usb/card.c b/sound/usb/card.c
index b41730d..658ea11 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -149,14 +149,32 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
return -EINVAL;
}
+ alts = &iface->altsetting[0];
+ altsd = get_iface_desc(alts);
+
+ /*
+ * Android with both accessory and audio interfaces enabled gets the
+ * interface numbers wrong.
+ */
+ if ((chip->usb_id == USB_ID(0x18d1, 0x2d04) ||
+ chip->usb_id == USB_ID(0x18d1, 0x2d05)) &&
+ interface == 0 &&
+ altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
+ altsd->bInterfaceSubClass == USB_SUBCLASS_VENDOR_SPEC) {
+ interface = 2;
+ iface = usb_ifnum_to_if(dev, interface);
+ if (!iface)
+ return -EINVAL;
+ alts = &iface->altsetting[0];
+ altsd = get_iface_desc(alts);
+ }
+
if (usb_interface_claimed(iface)) {
snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n",
dev->devnum, ctrlif, interface);
return -EINVAL;
}
- alts = &iface->altsetting[0];
- altsd = get_iface_desc(alts);
if ((altsd->bInterfaceClass == USB_CLASS_AUDIO ||
altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) &&
altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) {
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index e075a67..5ca4652 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -821,6 +821,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
case USB_ID(0x046d, 0x0808):
case USB_ID(0x046d, 0x0809):
+ case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
case USB_ID(0x046d, 0x0991):

View file

@ -0,0 +1,791 @@
diff --git a/Makefile b/Makefile
index 5adfadf..810572b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 51
+SUBLEVEL = 52
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 42dec04..0a5e8a5 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -305,9 +305,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
+extern void flush_kernel_dcache_page(struct page *);
#define flush_dcache_mmap_lock(mapping) \
spin_lock_irq(&(mapping)->tree_lock)
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 40ca11e..8f0d285 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -299,6 +299,39 @@ void flush_dcache_page(struct page *page)
EXPORT_SYMBOL(flush_dcache_page);
/*
+ * Ensure cache coherency for the kernel mapping of this page. We can
+ * assume that the page is pinned via kmap.
+ *
+ * If the page only exists in the page cache and there are no user
+ * space mappings, this is a no-op since the page was already marked
+ * dirty at creation. Otherwise, we need to flush the dirty kernel
+ * cache lines directly.
+ */
+void flush_kernel_dcache_page(struct page *page)
+{
+ if (cache_is_vivt() || cache_is_vipt_aliasing()) {
+ struct address_space *mapping;
+
+ mapping = page_mapping(page);
+
+ if (!mapping || mapping_mapped(mapping)) {
+ void *addr;
+
+ addr = page_address(page);
+ /*
+ * kmap_atomic() doesn't set the page virtual
+ * address for highmem pages, and
+ * kunmap_atomic() takes care of cache
+ * flushing already.
+ */
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ }
+ }
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+
+/*
* Flush an anonymous page so that users of get_user_pages()
* can safely access the data. The expected sequence is:
*
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index d51225f..eb5293a 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -57,6 +57,12 @@ void flush_dcache_page(struct page *page)
}
EXPORT_SYMBOL(flush_dcache_page);
+void flush_kernel_dcache_page(struct page *page)
+{
+ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *dst, const void *src,
unsigned long len)
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 147614e..6a8a382 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -384,21 +384,37 @@ static int dlci_del(struct dlci_add *dlci)
struct frad_local *flp;
struct net_device *master, *slave;
int err;
+ bool found = false;
+
+ rtnl_lock();
/* validate slave device */
master = __dev_get_by_name(&init_net, dlci->devname);
- if (!master)
- return -ENODEV;
+ if (!master) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ list_for_each_entry(dlp, &dlci_devs, list) {
+ if (dlp->master == master) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ err = -ENODEV;
+ goto out;
+ }
if (netif_running(master)) {
- return -EBUSY;
+ err = -EBUSY;
+ goto out;
}
dlp = netdev_priv(master);
slave = dlp->slave;
flp = netdev_priv(slave);
- rtnl_lock();
err = (*flp->deassoc)(slave, master);
if (!err) {
list_del(&dlp->list);
@@ -407,8 +423,8 @@ static int dlci_del(struct dlci_add *dlci)
dev_put(slave);
}
+out:
rtnl_unlock();
-
return err;
}
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 7d47514..2e99f79 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1034,22 +1034,37 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
{
u8 fcr = ioread8(priv->membase + UART_FCR);
+ struct uart_port *port = &priv->port;
+ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+ char *error_msg[5] = {};
+ int i = 0;
/* Reset FIFO */
fcr |= UART_FCR_CLEAR_RCVR;
iowrite8(fcr, priv->membase + UART_FCR);
if (lsr & PCH_UART_LSR_ERR)
- dev_err(&priv->pdev->dev, "Error data in FIFO\n");
+ error_msg[i++] = "Error data in FIFO\n";
- if (lsr & UART_LSR_FE)
- dev_err(&priv->pdev->dev, "Framing Error\n");
+ if (lsr & UART_LSR_FE) {
+ port->icount.frame++;
+ error_msg[i++] = " Framing Error\n";
+ }
- if (lsr & UART_LSR_PE)
- dev_err(&priv->pdev->dev, "Parity Error\n");
+ if (lsr & UART_LSR_PE) {
+ port->icount.parity++;
+ error_msg[i++] = " Parity Error\n";
+ }
- if (lsr & UART_LSR_OE)
- dev_err(&priv->pdev->dev, "Overrun Error\n");
+ if (lsr & UART_LSR_OE) {
+ port->icount.overrun++;
+ error_msg[i++] = " Overrun Error\n";
+ }
+
+ if (tty == NULL) {
+ for (i = 0; error_msg[i] != NULL; i++)
+ dev_err(&priv->pdev->dev, error_msg[i]);
+ }
}
static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
diff --git a/fs/exec.c b/fs/exec.c
index 2b7f5ff..0ea0b4c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1163,13 +1163,6 @@ void setup_new_exec(struct linux_binprm * bprm)
set_dumpable(current->mm, suid_dumpable);
}
- /*
- * Flush performance counters when crossing a
- * security domain:
- */
- if (!get_dumpable(current->mm))
- perf_event_exit_task(current);
-
/* An exec changes our domain. We are no longer part of the thread
group */
@@ -1233,6 +1226,15 @@ void install_exec_creds(struct linux_binprm *bprm)
commit_creds(bprm->cred);
bprm->cred = NULL;
+
+ /*
+ * Disable monitoring for regular users
+ * when executing setuid binaries. Must
+ * wait until new credentials are committed
+ * by commit_creds() above
+ */
+ if (get_dumpable(current->mm) != SUID_DUMP_USER)
+ perf_event_exit_task(current);
/*
* cred_guard_mutex must be held at least to this point to prevent
* ptrace_attach() from altering our determination of the task's
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 8640a12..25c472b 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -357,31 +357,50 @@ static unsigned int vfs_dent_type(uint8_t type)
static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
{
int err, over = 0;
+ loff_t pos = file->f_pos;
struct qstr nm;
union ubifs_key key;
struct ubifs_dent_node *dent;
struct inode *dir = file->f_path.dentry->d_inode;
struct ubifs_info *c = dir->i_sb->s_fs_info;
- dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
+ dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, pos);
- if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2)
+ if (pos > UBIFS_S_KEY_HASH_MASK || pos == 2)
/*
* The directory was seek'ed to a senseless position or there
* are no more entries.
*/
return 0;
+ if (file->f_version == 0) {
+ /*
+ * The file was seek'ed, which means that @file->private_data
+ * is now invalid. This may also be just the first
+ * 'ubifs_readdir()' invocation, in which case
+ * @file->private_data is NULL, and the below code is
+ * basically a no-op.
+ */
+ kfree(file->private_data);
+ file->private_data = NULL;
+ }
+
+ /*
+ * 'generic_file_llseek()' unconditionally sets @file->f_version to
+ * zero, and we use this for detecting whether the file was seek'ed.
+ */
+ file->f_version = 1;
+
/* File positions 0 and 1 correspond to "." and ".." */
- if (file->f_pos == 0) {
+ if (pos == 0) {
ubifs_assert(!file->private_data);
over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR);
if (over)
return 0;
- file->f_pos = 1;
+ file->f_pos = pos = 1;
}
- if (file->f_pos == 1) {
+ if (pos == 1) {
ubifs_assert(!file->private_data);
over = filldir(dirent, "..", 2, 1,
parent_ino(file->f_path.dentry), DT_DIR);
@@ -397,7 +416,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
goto out;
}
- file->f_pos = key_hash_flash(c, &dent->key);
+ file->f_pos = pos = key_hash_flash(c, &dent->key);
file->private_data = dent;
}
@@ -405,17 +424,16 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
if (!dent) {
/*
* The directory was seek'ed to and is now readdir'ed.
- * Find the entry corresponding to @file->f_pos or the
- * closest one.
+ * Find the entry corresponding to @pos or the closest one.
*/
- dent_key_init_hash(c, &key, dir->i_ino, file->f_pos);
+ dent_key_init_hash(c, &key, dir->i_ino, pos);
nm.name = NULL;
dent = ubifs_tnc_next_ent(c, &key, &nm);
if (IS_ERR(dent)) {
err = PTR_ERR(dent);
goto out;
}
- file->f_pos = key_hash_flash(c, &dent->key);
+ file->f_pos = pos = key_hash_flash(c, &dent->key);
file->private_data = dent;
}
@@ -427,7 +445,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
ubifs_inode(dir)->creat_sqnum);
nm.len = le16_to_cpu(dent->nlen);
- over = filldir(dirent, dent->name, nm.len, file->f_pos,
+ over = filldir(dirent, dent->name, nm.len, pos,
le64_to_cpu(dent->inum),
vfs_dent_type(dent->type));
if (over)
@@ -443,9 +461,17 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
}
kfree(file->private_data);
- file->f_pos = key_hash_flash(c, &dent->key);
+ file->f_pos = pos = key_hash_flash(c, &dent->key);
file->private_data = dent;
cond_resched();
+
+ if (file->f_version == 0)
+ /*
+ * The file was seek'ed meanwhile, lets return and start
+ * reading direntries from the new position on the next
+ * invocation.
+ */
+ return 0;
}
out:
@@ -456,15 +482,13 @@ out:
kfree(file->private_data);
file->private_data = NULL;
+ /* 2 is a special value indicating that there are no more direntries */
file->f_pos = 2;
return 0;
}
-/* If a directory is seeked, we have to free saved readdir() state */
static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int origin)
{
- kfree(file->private_data);
- file->private_data = NULL;
return generic_file_llseek(file, offset, origin);
}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8e9a069..89fb74e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -950,8 +950,7 @@ struct perf_event {
/* mmap bits */
struct mutex mmap_mutex;
atomic_t mmap_count;
- int mmap_locked;
- struct user_struct *mmap_user;
+
struct ring_buffer *rb;
struct list_head rb_entry;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 839a24f..7ceb270 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -193,9 +193,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
-static void ring_buffer_attach(struct perf_event *event,
- struct ring_buffer *rb);
-
void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void)
@@ -2849,6 +2846,7 @@ static void free_event_rcu(struct rcu_head *head)
}
static void ring_buffer_put(struct ring_buffer *rb);
+static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
static void free_event(struct perf_event *event)
{
@@ -2873,15 +2871,30 @@ static void free_event(struct perf_event *event)
if (has_branch_stack(event)) {
static_key_slow_dec_deferred(&perf_sched_events);
/* is system-wide event */
- if (!(event->attach_state & PERF_ATTACH_TASK))
+ if (!(event->attach_state & PERF_ATTACH_TASK)) {
atomic_dec(&per_cpu(perf_branch_stack_events,
event->cpu));
+ }
}
}
if (event->rb) {
- ring_buffer_put(event->rb);
- event->rb = NULL;
+ struct ring_buffer *rb;
+
+ /*
+ * Can happen when we close an event with re-directed output.
+ *
+ * Since we have a 0 refcount, perf_mmap_close() will skip
+ * over us; possibly making our ring_buffer_put() the last.
+ */
+ mutex_lock(&event->mmap_mutex);
+ rb = event->rb;
+ if (rb) {
+ rcu_assign_pointer(event->rb, NULL);
+ ring_buffer_detach(event, rb);
+ ring_buffer_put(rb); /* could be last */
+ }
+ mutex_unlock(&event->mmap_mutex);
}
if (is_cgroup_event(event))
@@ -3119,30 +3132,13 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
unsigned int events = POLL_HUP;
/*
- * Race between perf_event_set_output() and perf_poll(): perf_poll()
- * grabs the rb reference but perf_event_set_output() overrides it.
- * Here is the timeline for two threads T1, T2:
- * t0: T1, rb = rcu_dereference(event->rb)
- * t1: T2, old_rb = event->rb
- * t2: T2, event->rb = new rb
- * t3: T2, ring_buffer_detach(old_rb)
- * t4: T1, ring_buffer_attach(rb1)
- * t5: T1, poll_wait(event->waitq)
- *
- * To avoid this problem, we grab mmap_mutex in perf_poll()
- * thereby ensuring that the assignment of the new ring buffer
- * and the detachment of the old buffer appear atomic to perf_poll()
+ * Pin the event->rb by taking event->mmap_mutex; otherwise
+ * perf_event_set_output() can swizzle our rb and make us miss wakeups.
*/
mutex_lock(&event->mmap_mutex);
-
- rcu_read_lock();
- rb = rcu_dereference(event->rb);
- if (rb) {
- ring_buffer_attach(event, rb);
+ rb = event->rb;
+ if (rb)
events = atomic_xchg(&rb->poll, 0);
- }
- rcu_read_unlock();
-
mutex_unlock(&event->mmap_mutex);
poll_wait(file, &event->waitq, wait);
@@ -3459,16 +3455,12 @@ static void ring_buffer_attach(struct perf_event *event,
return;
spin_lock_irqsave(&rb->event_lock, flags);
- if (!list_empty(&event->rb_entry))
- goto unlock;
-
- list_add(&event->rb_entry, &rb->event_list);
-unlock:
+ if (list_empty(&event->rb_entry))
+ list_add(&event->rb_entry, &rb->event_list);
spin_unlock_irqrestore(&rb->event_lock, flags);
}
-static void ring_buffer_detach(struct perf_event *event,
- struct ring_buffer *rb)
+static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
{
unsigned long flags;
@@ -3487,13 +3479,10 @@ static void ring_buffer_wakeup(struct perf_event *event)
rcu_read_lock();
rb = rcu_dereference(event->rb);
- if (!rb)
- goto unlock;
-
- list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
- wake_up_all(&event->waitq);
-
-unlock:
+ if (rb) {
+ list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+ wake_up_all(&event->waitq);
+ }
rcu_read_unlock();
}
@@ -3522,18 +3511,10 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
static void ring_buffer_put(struct ring_buffer *rb)
{
- struct perf_event *event, *n;
- unsigned long flags;
-
if (!atomic_dec_and_test(&rb->refcount))
return;
- spin_lock_irqsave(&rb->event_lock, flags);
- list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
- list_del_init(&event->rb_entry);
- wake_up_all(&event->waitq);
- }
- spin_unlock_irqrestore(&rb->event_lock, flags);
+ WARN_ON_ONCE(!list_empty(&rb->event_list));
call_rcu(&rb->rcu_head, rb_free_rcu);
}
@@ -3543,26 +3524,100 @@ static void perf_mmap_open(struct vm_area_struct *vma)
struct perf_event *event = vma->vm_file->private_data;
atomic_inc(&event->mmap_count);
+ atomic_inc(&event->rb->mmap_count);
}
+/*
+ * A buffer can be mmap()ed multiple times; either directly through the same
+ * event, or through other events by use of perf_event_set_output().
+ *
+ * In order to undo the VM accounting done by perf_mmap() we need to destroy
+ * the buffer here, where we still have a VM context. This means we need
+ * to detach all events redirecting to us.
+ */
static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
- if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
- unsigned long size = perf_data_size(event->rb);
- struct user_struct *user = event->mmap_user;
- struct ring_buffer *rb = event->rb;
+ struct ring_buffer *rb = event->rb;
+ struct user_struct *mmap_user = rb->mmap_user;
+ int mmap_locked = rb->mmap_locked;
+ unsigned long size = perf_data_size(rb);
+
+ atomic_dec(&rb->mmap_count);
+
+ if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
+ return;
+
+ /* Detach current event from the buffer. */
+ rcu_assign_pointer(event->rb, NULL);
+ ring_buffer_detach(event, rb);
+ mutex_unlock(&event->mmap_mutex);
+
+ /* If there's still other mmap()s of this buffer, we're done. */
+ if (atomic_read(&rb->mmap_count)) {
+ ring_buffer_put(rb); /* can't be last */
+ return;
+ }
+
+ /*
+ * No other mmap()s, detach from all other events that might redirect
+ * into the now unreachable buffer. Somewhat complicated by the
+ * fact that rb::event_lock otherwise nests inside mmap_mutex.
+ */
+again:
+ rcu_read_lock();
+ list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
+ if (!atomic_long_inc_not_zero(&event->refcount)) {
+ /*
+ * This event is en-route to free_event() which will
+ * detach it and remove it from the list.
+ */
+ continue;
+ }
+ rcu_read_unlock();
- atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
- vma->vm_mm->pinned_vm -= event->mmap_locked;
- rcu_assign_pointer(event->rb, NULL);
- ring_buffer_detach(event, rb);
+ mutex_lock(&event->mmap_mutex);
+ /*
+ * Check we didn't race with perf_event_set_output() which can
+ * swizzle the rb from under us while we were waiting to
+ * acquire mmap_mutex.
+ *
+ * If we find a different rb; ignore this event, a next
+ * iteration will no longer find it on the list. We have to
+ * still restart the iteration to make sure we're not now
+ * iterating the wrong list.
+ */
+ if (event->rb == rb) {
+ rcu_assign_pointer(event->rb, NULL);
+ ring_buffer_detach(event, rb);
+ ring_buffer_put(rb); /* can't be last, we still have one */
+ }
mutex_unlock(&event->mmap_mutex);
+ put_event(event);
- ring_buffer_put(rb);
- free_uid(user);
+ /*
+ * Restart the iteration; either we're on the wrong list or
+ * destroyed its integrity by doing a deletion.
+ */
+ goto again;
}
+ rcu_read_unlock();
+
+ /*
+ * It could be there's still a few 0-ref events on the list; they'll
+ * get cleaned up by free_event() -- they'll also still have their
+ * ref on the rb and will free it whenever they are done with it.
+ *
+ * Aside from that, this buffer is 'fully' detached and unmapped,
+ * undo the VM accounting.
+ */
+
+ atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
+ vma->vm_mm->pinned_vm -= mmap_locked;
+ free_uid(mmap_user);
+
+ ring_buffer_put(rb); /* could be last */
}
static const struct vm_operations_struct perf_mmap_vmops = {
@@ -3612,12 +3667,24 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
WARN_ON_ONCE(event->ctx->parent_ctx);
+again:
mutex_lock(&event->mmap_mutex);
if (event->rb) {
- if (event->rb->nr_pages == nr_pages)
- atomic_inc(&event->rb->refcount);
- else
+ if (event->rb->nr_pages != nr_pages) {
ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
+ /*
+ * Raced against perf_mmap_close() through
+ * perf_event_set_output(). Try again, hope for better
+ * luck.
+ */
+ mutex_unlock(&event->mmap_mutex);
+ goto again;
+ }
+
goto unlock;
}
@@ -3658,12 +3725,16 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
ret = -ENOMEM;
goto unlock;
}
- rcu_assign_pointer(event->rb, rb);
+
+ atomic_set(&rb->mmap_count, 1);
+ rb->mmap_locked = extra;
+ rb->mmap_user = get_current_user();
atomic_long_add(user_extra, &user->locked_vm);
- event->mmap_locked = extra;
- event->mmap_user = get_current_user();
- vma->vm_mm->pinned_vm += event->mmap_locked;
+ vma->vm_mm->pinned_vm += extra;
+
+ ring_buffer_attach(event, rb);
+ rcu_assign_pointer(event->rb, rb);
perf_event_update_userpage(event);
@@ -3672,7 +3743,11 @@ unlock:
atomic_inc(&event->mmap_count);
mutex_unlock(&event->mmap_mutex);
- vma->vm_flags |= VM_RESERVED;
+ /*
+ * Since pinned accounting is per vm we cannot allow fork() to copy our
+ * vma.
+ */
+ vma->vm_flags |= VM_DONTCOPY | VM_RESERVED;
vma->vm_ops = &perf_mmap_vmops;
return ret;
@@ -6161,6 +6236,8 @@ set:
if (atomic_read(&event->mmap_count))
goto unlock;
+ old_rb = event->rb;
+
if (output_event) {
/* get the rb we want to redirect to */
rb = ring_buffer_get(output_event);
@@ -6168,16 +6245,28 @@ set:
goto unlock;
}
- old_rb = event->rb;
- rcu_assign_pointer(event->rb, rb);
if (old_rb)
ring_buffer_detach(event, old_rb);
+
+ if (rb)
+ ring_buffer_attach(event, rb);
+
+ rcu_assign_pointer(event->rb, rb);
+
+ if (old_rb) {
+ ring_buffer_put(old_rb);
+ /*
+ * Since we detached before setting the new rb, so that we
+ * could attach the new rb, we could have missed a wakeup.
+ * Provide it now.
+ */
+ wake_up_all(&event->waitq);
+ }
+
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
- if (old_rb)
- ring_buffer_put(old_rb);
out:
return ret;
}
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index bb38c4d..fc8bfcf 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -147,7 +147,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
return;
}
- for_each_online_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
unsigned int nr;
nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
@@ -233,7 +233,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
if (cpu >= 0) {
toggle_bp_task_slot(bp, cpu, enable, type, weight);
} else {
- for_each_online_cpu(cpu)
+ for_each_possible_cpu(cpu)
toggle_bp_task_slot(bp, cpu, enable, type, weight);
}
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index b0b107f..b400e64 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -30,6 +30,10 @@ struct ring_buffer {
spinlock_t event_lock;
struct list_head event_list;
+ atomic_t mmap_count;
+ unsigned long mmap_locked;
+ struct user_struct *mmap_user;
+
struct perf_event_mmap_page *user_page;
void *data_pages[0];
};
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index fa07aed..932420d 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1880,6 +1880,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
conn, code, ident, dlen);
+ if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
+ return NULL;
+
len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
count = min_t(unsigned int, conn->mtu, len);

View file

@ -0,0 +1,215 @@
diff --git a/MAINTAINERS b/MAINTAINERS
index c744d9c..5725829 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6390,6 +6390,7 @@ STABLE BRANCH
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: stable@vger.kernel.org
S: Supported
+F: Documentation/stable_kernel_rules.txt
STAGING SUBSYSTEM
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/Makefile b/Makefile
index 810572b..104049d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 52
+SUBLEVEL = 53
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/block/genhd.c b/block/genhd.c
index 60108d9..d815a0f 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -518,7 +518,7 @@ static void register_disk(struct gendisk *disk)
ddev->parent = disk->driverfs_dev;
- dev_set_name(ddev, disk->disk_name);
+ dev_set_name(ddev, "%s", disk->disk_name);
/* delay uevents, until we scanned partition table */
dev_set_uevent_suppress(ddev, 1);
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 056571b..b4c046c 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -512,7 +512,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
struct crypto_template *crypto_lookup_template(const char *name)
{
- return try_then_request_module(__crypto_lookup_template(name), name);
+ return try_then_request_module(__crypto_lookup_template(name), "%s",
+ name);
}
EXPORT_SYMBOL_GPL(crypto_lookup_template);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3c4c225..386d40e 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -666,7 +666,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
mutex_unlock(&nbd->tx_lock);
- thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name);
+ thread = kthread_create(nbd_thread, nbd, "%s",
+ nbd->disk->disk_name);
if (IS_ERR(thread)) {
mutex_lock(&nbd->tx_lock);
return PTR_ERR(thread);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index d620b44..8a3aff7 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2882,7 +2882,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
if (lba < 0)
return -EINVAL;
- cgc->buffer = kmalloc(blocksize, GFP_KERNEL);
+ cgc->buffer = kzalloc(blocksize, GFP_KERNEL);
if (cgc->buffer == NULL)
return -ENOMEM;
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 9eca9f1..4c449b2 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -330,7 +330,7 @@ static void uevent_notify(struct charger_manager *cm, const char *event)
strncpy(env_str, event, UEVENT_BUF_SIZE);
kobject_uevent(&cm->dev->kobj, KOBJ_CHANGE);
- dev_info(cm->dev, event);
+ dev_info(cm->dev, "%s", event);
}
/**
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index d4ed9eb..caac1b2 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -465,7 +465,7 @@ static int osd_probe(struct device *dev)
oud->class_dev.class = &osd_uld_class;
oud->class_dev.parent = dev;
oud->class_dev.release = __remove;
- error = dev_set_name(&oud->class_dev, disk->disk_name);
+ error = dev_set_name(&oud->class_dev, "%s", disk->disk_name);
if (error) {
OSD_ERR("dev_set_name failed => %d\n", error);
goto err_put_cdev;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2bbb845..3141c1a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -140,7 +140,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
char *buffer_data;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
- const char *temp = "temporary ";
+ static const char temp[] = "temporary ";
int len;
if (sdp->type != TYPE_DISK)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index ffba1ef..40747fe 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -4086,10 +4086,6 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_VENDOR_ID_IBM, 0x0299,
0, 0, pbn_b0_bt_2_115200 },
- { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
- 0x1000, 0x0012,
- 0, 0, pbn_b0_bt_2_115200 },
-
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
0xA000, 0x1000,
0, 0, pbn_b0_1_115200 },
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index a790821..ea3d1ca 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -17,7 +17,8 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
struct quad_buffer_head *qbh, char *id)
{
secno sec;
- if (hpfs_sb(s)->sb_chk) if (bmp_block * 16384 > hpfs_sb(s)->sb_fs_size) {
+ unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
+ if (hpfs_sb(s)->sb_chk) if (bmp_block >= n_bands) {
hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id);
return NULL;
}
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 54f6ecc..0bf578d 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -552,7 +552,13 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
sbi->sb_cp_table = NULL;
sbi->sb_c_bitmap = -1;
sbi->sb_max_fwd_alloc = 0xffffff;
-
+
+ if (sbi->sb_fs_size >= 0x80000000) {
+ hpfs_error(s, "invalid size in superblock: %08x",
+ (unsigned)sbi->sb_fs_size);
+ goto bail4;
+ }
+
/* Load bitmap directory */
if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps))))
goto bail4;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 1798846..cb997b1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -161,8 +161,8 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
*/
memcpy(p, argp->p, avail);
/* step to next page */
- argp->p = page_address(argp->pagelist[0]);
argp->pagelist++;
+ argp->p = page_address(argp->pagelist[0]);
if (argp->pagelen < PAGE_SIZE) {
argp->end = argp->p + (argp->pagelen>>2);
argp->pagelen = 0;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6570548..a1e0795 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -145,7 +145,6 @@ static void tick_nohz_update_jiffies(ktime_t now)
tick_do_update_jiffies64(now);
local_irq_restore(flags);
- calc_load_exit_idle();
touch_softlockup_watchdog();
}
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 925ca58..8c93fa8 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -39,6 +39,11 @@ static int should_authenticate(struct ceph_auth_client *ac)
return xi->starting;
}
+static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
+{
+ return 0;
+}
+
/*
* the generic auth code decode the global_id, and we carry no actual
* authenticate state, so nothing happens here.
@@ -106,6 +111,7 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = {
.destroy = destroy,
.is_authenticated = is_authenticated,
.should_authenticate = should_authenticate,
+ .build_request = build_request,
.handle_reply = handle_reply,
.create_authorizer = ceph_auth_none_create_authorizer,
.destroy_authorizer = ceph_auth_none_destroy_authorizer,

View file

@ -0,0 +1,534 @@
diff --git a/Makefile b/Makefile
index 104049d..6ca3657 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 53
+SUBLEVEL = 54
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 85d6332..a99ed7a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -824,6 +824,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
struct frame_tail __user *tail;
+ perf_callchain_store(entry, regs->ARM_pc);
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 054cc01..d50a821 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -36,9 +36,8 @@ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
/* snapshots of runstate info */
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
-/* unused ns of stolen and blocked time */
+/* unused ns of stolen time */
static DEFINE_PER_CPU(u64, xen_residual_stolen);
-static DEFINE_PER_CPU(u64, xen_residual_blocked);
/* return an consistent snapshot of 64-bit time/counter value */
static u64 get64(const u64 *p)
@@ -115,7 +114,7 @@ static void do_stolen_accounting(void)
{
struct vcpu_runstate_info state;
struct vcpu_runstate_info *snap;
- s64 blocked, runnable, offline, stolen;
+ s64 runnable, offline, stolen;
cputime_t ticks;
get_runstate_snapshot(&state);
@@ -125,7 +124,6 @@ static void do_stolen_accounting(void)
snap = &__get_cpu_var(xen_runstate_snapshot);
/* work out how much time the VCPU has not been runn*ing* */
- blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
@@ -141,17 +139,6 @@ static void do_stolen_accounting(void)
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
__this_cpu_write(xen_residual_stolen, stolen);
account_steal_ticks(ticks);
-
- /* Add the appropriate number of ticks of blocked time,
- including any left-overs from last time. */
- blocked += __this_cpu_read(xen_residual_blocked);
-
- if (blocked < 0)
- blocked = 0;
-
- ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
- __this_cpu_write(xen_residual_blocked, blocked);
- account_idle_ticks(ticks);
}
/* Get the TSC speed from Xen */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f9914e5..3251d4b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -974,6 +974,10 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
ec_enlarge_storm_threshold, "CLEVO hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
+ {
+ ec_skip_dsdt_scan, "HP Folio 13", {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
{},
};
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 71a4d04..aeb8220 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -284,6 +284,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* AMD */
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
+ { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
/* AMD is using RAID class only for ahci controllers */
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index f9eaa82..47a1fb8 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1543,8 +1543,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
u32 fbs = readl(port_mmio + PORT_FBS);
int pmp = fbs >> PORT_FBS_DWE_OFFSET;
- if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
- ata_link_online(&ap->pmp_link[pmp])) {
+ if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) {
link = &ap->pmp_link[pmp];
fbs_need_dec = true;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 758122f..15a6af8 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2469,10 +2469,10 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
struct dma_pl330_chan *pch = to_pchan(chan);
unsigned long flags;
- spin_lock_irqsave(&pch->lock, flags);
-
tasklet_kill(&pch->task);
+ spin_lock_irqsave(&pch->lock, flags);
+
pl330_release_channel(pch->pl330_chid);
pch->pl330_chid = NULL;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 506b9a0..4763426 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -104,7 +104,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
tx_agc[RF90_PATH_A] = 0x10101010;
tx_agc[RF90_PATH_B] = 0x10101010;
} else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
- TXHIGHPWRLEVEL_LEVEL1) {
+ TXHIGHPWRLEVEL_LEVEL2) {
tx_agc[RF90_PATH_A] = 0x00000000;
tx_agc[RF90_PATH_B] = 0x00000000;
} else{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 8cf41bb..3f869c9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -357,6 +357,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
{RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
+ {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
{RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
{}
};
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 680dbfa..103c95e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1062,6 +1062,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
/*
* Serverworks CSB5 IDE does not fully support native mode
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 9694c1e..fc50168 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -100,9 +100,9 @@ static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp)
int vcc = gpio_is_valid(cf->board->vcc_pin);
*sp = SS_DETECT | SS_3VCARD;
- if (!rdy || gpio_get_value(rdy))
+ if (!rdy || gpio_get_value(cf->board->irq_pin))
*sp |= SS_READY;
- if (!vcc || gpio_get_value(vcc))
+ if (!vcc || gpio_get_value(cf->board->vcc_pin))
*sp |= SS_POWERON;
} else
*sp = 0;
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 0fbe57b..21d63d1 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -310,7 +310,7 @@ static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client,
dev_dbg(&client->dev, "alarm IRQ armed\n");
} else {
/* disable AIE irq */
- ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
+ ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 0);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index cb8c162..6de5760 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -511,6 +511,7 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
/* Caller must hold fsg->lock */
static void wakeup_thread(struct fsg_common *common)
{
+ smp_wmb(); /* ensure the write of bh->state is complete */
/* Tell the main thread that something has happened */
common->thread_wakeup_needed = 1;
if (common->thread_task)
@@ -730,6 +731,7 @@ static int sleep_thread(struct fsg_common *common)
}
__set_current_state(TASK_RUNNING);
common->thread_wakeup_needed = 0;
+ smp_rmb(); /* ensure the latest bh->state is visible */
return rc;
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 5080b1d..3347e9b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -369,6 +369,10 @@ static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci
ctx->size += CTX_SIZE(xhci->hcc_params);
ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
+ if (!ctx->bytes) {
+ kfree(ctx);
+ return NULL;
+ }
memset(ctx->bytes, 0, ctx->size);
return ctx;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index df90fe5..93ad67e 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -179,5 +179,7 @@ static int xhci_plat_remove(struct platform_device *dev)
usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
kfree(xhci);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5b24260..33e20e4 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -159,8 +159,6 @@ static void option_instat_callback(struct urb *urb);
#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
#define NOVATELWIRELESS_PRODUCT_E362 0x9010
-#define NOVATELWIRELESS_PRODUCT_G1 0xA001
-#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
@@ -744,8 +742,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
/* Novatel Ovation MC551 a.k.a. Verizon USB551L */
{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index d46277d..c7ccbc6 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -37,7 +37,13 @@ static const struct usb_device_id id_table[] = {
{DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
{DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */
{DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */
- {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
+ {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */
+ {DEVICE_G1K(0x1410, 0xa002)}, /* Novatel Gobi Modem device */
+ {DEVICE_G1K(0x1410, 0xa003)}, /* Novatel Gobi Modem device */
+ {DEVICE_G1K(0x1410, 0xa004)}, /* Novatel Gobi Modem device */
+ {DEVICE_G1K(0x1410, 0xa005)}, /* Novatel Gobi Modem device */
+ {DEVICE_G1K(0x1410, 0xa006)}, /* Novatel Gobi Modem device */
+ {DEVICE_G1K(0x1410, 0xa007)}, /* Novatel Gobi Modem device */
{DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
{DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
{DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index a513a54..f2b2ffd 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -323,14 +323,14 @@ UniToupper(register wchar_t uc)
/*
* UniStrupr: Upper case a unicode string
*/
-static inline wchar_t *
-UniStrupr(register wchar_t *upin)
+static inline __le16 *
+UniStrupr(register __le16 *upin)
{
- register wchar_t *up;
+ register __le16 *up;
up = upin;
while (*up) { /* For all characters */
- *up = UniToupper(*up);
+ *up = cpu_to_le16(UniToupper(le16_to_cpu(*up)));
up++;
}
return upin; /* Return input pointer */
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 63c460e..6d0c62a 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -394,7 +394,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
int rc = 0;
int len;
char nt_hash[CIFS_NTHASH_SIZE];
- wchar_t *user;
+ __le16 *user;
wchar_t *domain;
wchar_t *server;
@@ -419,7 +419,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
return rc;
}
- /* convert ses->user_name to unicode and uppercase */
+ /* convert ses->user_name to unicode */
len = ses->user_name ? strlen(ses->user_name) : 0;
user = kmalloc(2 + (len * 2), GFP_KERNEL);
if (user == NULL) {
@@ -429,7 +429,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
}
if (len) {
- len = cifs_strtoUTF16((__le16 *)user, ses->user_name, len, nls_cp);
+ len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
UniStrupr(user);
} else {
memset(user, '\0', 2);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 6fbfbdb..43944c6 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -549,6 +549,11 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
fattr->cf_mode &= ~(S_IWUGO);
fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
+ if (fattr->cf_nlink < 1) {
+ cFYI(1, "replacing bogus file nlink value %u\n",
+ fattr->cf_nlink);
+ fattr->cf_nlink = 1;
+ }
}
fattr->cf_uid = cifs_sb->mnt_uid;
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index d7940b2..fbb9b82 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -573,11 +573,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
(block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
+((char *)de - bh->b_data))) {
- /* On error, skip the f_pos to the next block. */
- dir_file->f_pos = (dir_file->f_pos |
- (dir->i_sb->s_blocksize - 1)) + 1;
- brelse (bh);
- return count;
+ /* silently ignore the rest of the block */
+ break;
}
ext3fs_dirhash(de->name, de->name_len, hinfo);
if ((hinfo->hash < start_hash) ||
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0efff1e..b9a3726 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4713,7 +4713,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
error = ext4_get_inode_loc(inode, &iloc);
if (error)
return error;
- physical = iloc.bh->b_blocknr << blockbits;
+ physical = (__u64)iloc.bh->b_blocknr << blockbits;
offset = EXT4_GOOD_OLD_INODE_SIZE +
EXT4_I(inode)->i_extra_isize;
physical += offset;
@@ -4721,7 +4721,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
flags |= FIEMAP_EXTENT_DATA_INLINE;
brelse(iloc.bh);
} else { /* external block */
- physical = EXT4_I(inode)->i_file_acl << blockbits;
+ physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
length = inode->i_sb->s_blocksize;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 2857e5b..98bff01ee 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4221,7 +4221,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct inode *inode;
- unsigned long delalloc_blocks;
+ unsigned long long delalloc_blocks;
inode = dentry->d_inode;
generic_fillattr(inode, stat);
@@ -4238,7 +4238,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
*/
delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
- stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
+ stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9);
return 0;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 19e4518..8784842 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4639,11 +4639,16 @@ do_more:
* blocks being freed are metadata. these blocks shouldn't
* be used until this transaction is committed
*/
+ retry:
new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
if (!new_entry) {
- ext4_mb_unload_buddy(&e4b);
- err = -ENOMEM;
- goto error_return;
+ /*
+ * We use a retry loop because
+ * ext4_free_blocks() is not allowed to fail.
+ */
+ cond_resched();
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
}
new_entry->efd_start_cluster = bit;
new_entry->efd_group = block_group;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index ac76939..9fb3fae 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -585,11 +585,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
if (ext4_check_dir_entry(dir, NULL, de, bh,
(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+ ((char *)de - bh->b_data))) {
- /* On error, skip the f_pos to the next block. */
- dir_file->f_pos = (dir_file->f_pos |
- (dir->i_sb->s_blocksize - 1)) + 1;
- brelse(bh);
- return count;
+ /* silently ignore the rest of the block */
+ break;
}
ext4fs_dirhash(de->name, de->name_len, hinfo);
if ((hinfo->hash < start_hash) ||
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 6075ac03..f567127 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -500,10 +500,10 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
&transaction->t_outstanding_credits);
if (atomic_dec_and_test(&transaction->t_updates))
wake_up(&journal->j_wait_updates);
+ tid = transaction->t_tid;
spin_unlock(&transaction->t_handle_lock);
jbd_debug(2, "restarting handle %p\n", handle);
- tid = transaction->t_tid;
need_to_start = !tid_geq(journal->j_commit_request, tid);
read_unlock(&journal->j_state_lock);
if (need_to_start)
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 2e3ea30..5b8d944 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -6499,6 +6499,16 @@ static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
}
new_oi = OCFS2_I(args->new_inode);
+ /*
+ * Adjust extent record count to reserve space for extended attribute.
+ * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
+ */
+ if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
+ !(ocfs2_inode_is_fast_symlink(args->new_inode))) {
+ struct ocfs2_extent_list *el = &new_di->id2.i_list;
+ le16_add_cpu(&el->l_count, -(inline_size /
+ sizeof(struct ocfs2_extent_rec)));
+ }
spin_lock(&new_oi->ip_lock);
new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7684920..86a500d 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -546,9 +546,9 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
return 0;
if (irq_settings_can_request(desc)) {
- if (desc->action)
- if (irqflags & desc->action->flags & IRQF_SHARED)
- canrequest =1;
+ if (!desc->action ||
+ irqflags & desc->action->flags & IRQF_SHARED)
+ canrequest = 1;
}
irq_put_desc_unlock(desc, flags);
return canrequest;
diff --git a/kernel/timer.c b/kernel/timer.c
index dd93d90..7e0a770 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -145,9 +145,11 @@ static unsigned long round_jiffies_common(unsigned long j, int cpu,
/* now that we have rounded, subtract the extra skew again */
j -= cpu * 3;
- if (j <= jiffies) /* rounding ate our timeout entirely; */
- return original;
- return j;
+ /*
+ * Make sure j is still in the future. Otherwise return the
+ * unmodified value.
+ */
+ return time_is_after_jiffies(j) ? j : original;
}
/**

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,743 @@
diff --git a/Makefile b/Makefile
index 2fe1f6d..7f4df0c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 56
+SUBLEVEL = 57
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 2d6e649..6610e81 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -132,7 +132,7 @@ static void __cpuinit mxcsr_feature_mask_init(void)
clts();
if (cpu_has_fxsr) {
memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
- asm volatile("fxsave %0" : : "m" (fx_scratch));
+ asm volatile("fxsave %0" : "+m" (fx_scratch));
mask = fx_scratch.mxcsr_mask;
if (mask == 0)
mask = 0x0000ffbf;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 6ea287e2..9bdfcf5 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -117,6 +117,7 @@ struct acpi_battery {
struct acpi_device *device;
struct notifier_block pm_nb;
unsigned long update_time;
+ int revision;
int rate_now;
int capacity_now;
int voltage_now;
@@ -350,6 +351,7 @@ static struct acpi_offsets info_offsets[] = {
};
static struct acpi_offsets extended_info_offsets[] = {
+ {offsetof(struct acpi_battery, revision), 0},
{offsetof(struct acpi_battery, power_unit), 0},
{offsetof(struct acpi_battery, design_capacity), 0},
{offsetof(struct acpi_battery, full_charge_capacity), 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 45c5cf8..232119a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -296,6 +296,7 @@ enum intel_pch {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
+#define QUIRK_NO_PCH_PWM_ENABLE (1<<2)
struct intel_fbdev;
struct intel_fbc_work;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 84867a8..0e35922 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9160,6 +9160,17 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
}
+/*
+ * Some machines (Dell XPS13) suffer broken backlight controls if
+ * BLM_PCH_PWM_ENABLE is set.
+ */
+static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
+ DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -9192,6 +9203,11 @@ struct intel_quirk intel_quirks[] = {
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+ /* Dell XPS13 HD Sandy Bridge */
+ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
+ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
+ { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
};
static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 207180d..ab4d990 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -1097,7 +1097,8 @@ bool intel_lvds_init(struct drm_device *dev)
goto failed;
out:
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev) &&
+ !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
u32 pwm;
pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index a746ba2..a956053 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -1007,7 +1007,7 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
soft = &pkt.soft.rfc1201;
- lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE));
+ lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
if (pkt.hard.offset[0]) {
ofs = pkt.hard.offset[0];
length = 256 - ofs;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index d3695ed..a061e37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -108,9 +108,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
- /* Enable DFP and Recycle mode */
- reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
reg |= IXGBE_DPMCS_TSOEF;
+
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 4ce981c..2205db7 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -478,7 +478,7 @@ rx_status_loop:
while (1) {
u32 status, len;
- dma_addr_t mapping;
+ dma_addr_t mapping, new_mapping;
struct sk_buff *skb, *new_skb;
struct cp_desc *desc;
const unsigned buflen = cp->rx_buf_sz;
@@ -520,6 +520,13 @@ rx_status_loop:
goto rx_next;
}
+ new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
+ dev->stats.rx_dropped++;
+ goto rx_next;
+ }
+
dma_unmap_single(&cp->pdev->dev, mapping,
buflen, PCI_DMA_FROMDEVICE);
@@ -531,12 +538,11 @@ rx_status_loop:
skb_put(skb, len);
- mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
- PCI_DMA_FROMDEVICE);
cp->rx_skb[rx_tail] = new_skb;
cp_rx_skb(cp, skb, desc);
rx++;
+ mapping = new_mapping;
rx_next:
cp->rx_ring[rx_tail].opts2 = 0;
@@ -704,6 +710,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
}
+static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
+ int first, int entry_last)
+{
+ int frag, index;
+ struct cp_desc *txd;
+ skb_frag_t *this_frag;
+ for (frag = 0; frag+first < entry_last; frag++) {
+ index = first+frag;
+ cp->tx_skb[index] = NULL;
+ txd = &cp->tx_ring[index];
+ this_frag = &skb_shinfo(skb)->frags[frag];
+ dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
+ skb_frag_size(this_frag), PCI_DMA_TODEVICE);
+ }
+}
+
static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
struct net_device *dev)
{
@@ -737,6 +759,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
len = skb->len;
mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, mapping))
+ goto out_dma_error;
+
txd->opts2 = opts2;
txd->addr = cpu_to_le64(mapping);
wmb();
@@ -774,6 +799,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
first_len = skb_headlen(skb);
first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
first_len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, first_mapping))
+ goto out_dma_error;
+
cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
@@ -787,6 +815,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
mapping = dma_map_single(&cp->pdev->dev,
skb_frag_address(this_frag),
len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, mapping)) {
+ unwind_tx_frag_mapping(cp, skb, first_entry, entry);
+ goto out_dma_error;
+ }
+
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
ctrl = eor | len | DescOwn;
@@ -845,11 +878,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
+out_unlock:
spin_unlock_irqrestore(&cp->lock, intr_flags);
cpw8(TxPoll, NormalTxPoll);
return NETDEV_TX_OK;
+out_dma_error:
+ kfree_skb(skb);
+ cp->dev->stats.tx_dropped++;
+ goto out_unlock;
}
/* Set or clear the multicast filter for this adaptor.
@@ -1020,6 +1058,10 @@ static int cp_refill_rx(struct cp_private *cp)
mapping = dma_map_single(&cp->pdev->dev, skb->data,
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, mapping)) {
+ kfree_skb(skb);
+ goto err_out;
+ }
cp->rx_skb[i] = skb;
cp->rx_ring[i].opts2 = 0;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 5caba55..d89747a 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -43,7 +43,6 @@
#define EEPROM_MAC_OFFSET (0x01)
#define DEFAULT_TX_CSUM_ENABLE (true)
#define DEFAULT_RX_CSUM_ENABLE (true)
-#define DEFAULT_TSO_ENABLE (true)
#define SMSC75XX_INTERNAL_PHY_ID (1)
#define SMSC75XX_TX_OVERHEAD (8)
#define MAX_RX_FIFO_SIZE (20 * 1024)
@@ -1049,17 +1048,14 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
- if (DEFAULT_TX_CSUM_ENABLE) {
+ if (DEFAULT_TX_CSUM_ENABLE)
dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- if (DEFAULT_TSO_ENABLE)
- dev->net->features |= NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_TSO6;
- }
+
if (DEFAULT_RX_CSUM_ENABLE)
dev->net->features |= NETIF_F_RXCSUM;
dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM;
/* Init all registers */
ret = smsc75xx_reset(dev);
@@ -1184,8 +1180,6 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
{
u32 tx_cmd_a, tx_cmd_b;
- skb_linearize(skb);
-
if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
struct sk_buff *skb2 =
skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 41c5237..2b8406a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -821,6 +821,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
if (error != 0)
goto err_rx;
+ ath9k_hw_disable(priv->ah);
#ifdef CONFIG_MAC80211_LEDS
/* must be initialized before ieee80211_register_hw */
priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 7d00a87..4be8ccc 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -1449,8 +1449,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
/* Allocate buffer and copy payload */
blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
buf_block_len = (pkt_len + blk_size - 1) / blk_size;
- *(u16 *) &payload[0] = (u16) pkt_len;
- *(u16 *) &payload[2] = type;
+ *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
+ *(__le16 *)&payload[2] = cpu_to_le16(type);
/*
* This is SDIO specific header
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 50f92d5..4d792a2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -856,13 +856,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
-void rt2x00queue_pause_queue(struct data_queue *queue)
+void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
{
- if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
- !test_bit(QUEUE_STARTED, &queue->flags) ||
- test_and_set_bit(QUEUE_PAUSED, &queue->flags))
- return;
-
switch (queue->qid) {
case QID_AC_VO:
case QID_AC_VI:
@@ -878,6 +873,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue)
break;
}
}
+void rt2x00queue_pause_queue(struct data_queue *queue)
+{
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ !test_bit(QUEUE_STARTED, &queue->flags) ||
+ test_and_set_bit(QUEUE_PAUSED, &queue->flags))
+ return;
+
+ rt2x00queue_pause_queue_nocheck(queue);
+}
EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
void rt2x00queue_unpause_queue(struct data_queue *queue)
@@ -939,7 +943,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue)
return;
}
- rt2x00queue_pause_queue(queue);
+ rt2x00queue_pause_queue_nocheck(queue);
queue->rt2x00dev->ops->lib->stop_queue(queue);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 039c054..c75b27b 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -375,11 +375,18 @@ static void mxs_auart_settermios(struct uart_port *u,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
- u32 istatus, istat;
+ u32 istat;
struct mxs_auart_port *s = context;
u32 stat = readl(s->port.membase + AUART_STAT);
- istatus = istat = readl(s->port.membase + AUART_INTR);
+ istat = readl(s->port.membase + AUART_INTR);
+
+ /* ack irq */
+ writel(istat & (AUART_INTR_RTIS
+ | AUART_INTR_TXIS
+ | AUART_INTR_RXIS
+ | AUART_INTR_CTSMIS),
+ s->port.membase + AUART_INTR_CLR);
if (istat & AUART_INTR_CTSMIS) {
uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
@@ -398,12 +405,6 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS;
}
- writel(istatus & (AUART_INTR_RTIS
- | AUART_INTR_TXIS
- | AUART_INTR_RXIS
- | AUART_INTR_CTSMIS),
- s->port.membase + AUART_INTR_CLR);
-
return IRQ_HANDLED;
}
@@ -543,7 +544,7 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
struct mxs_auart_port *s;
struct uart_port *port;
unsigned int old_ctrl0, old_ctrl2;
- unsigned int to = 1000;
+ unsigned int to = 20000;
if (co->index > MXS_AUART_PORTS || co->index < 0)
return;
@@ -564,18 +565,23 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
uart_console_write(port, str, count, mxs_auart_console_putchar);
- /*
- * Finally, wait for transmitter to become empty
- * and restore the TCR
- */
+ /* Finally, wait for transmitter to become empty ... */
while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
+ udelay(1);
if (!to--)
break;
- udelay(1);
}
- writel(old_ctrl0, port->membase + AUART_CTRL0);
- writel(old_ctrl2, port->membase + AUART_CTRL2);
+ /*
+ * ... and restore the TCR if we waited long enough for the transmitter
+ * to be idle. This might keep the transmitter enabled although it is
+ * unused, but that is better than to disable it while it is still
+ * transmitting.
+ */
+ if (!(readl(port->membase + AUART_STAT) & AUART_STAT_BUSY)) {
+ writel(old_ctrl0, port->membase + AUART_CTRL0);
+ writel(old_ctrl2, port->membase + AUART_CTRL2);
+ }
clk_disable(s->clk);
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 3568c8a..48bc91d 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -120,6 +120,7 @@ static int fill_event_metadata(struct fsnotify_group *group,
metadata->event_len = FAN_EVENT_METADATA_LEN;
metadata->metadata_len = FAN_EVENT_METADATA_LEN;
metadata->vers = FANOTIFY_METADATA_VERSION;
+ metadata->reserved = 0;
metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
metadata->pid = pid_vnr(event->tgid);
if (unlikely(event->mask & FAN_Q_OVERFLOW))
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d074cf0..8e810ba 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -250,9 +250,9 @@ perf_cgroup_match(struct perf_event *event)
return !event->cgrp || event->cgrp == cpuctx->cgrp;
}
-static inline void perf_get_cgroup(struct perf_event *event)
+static inline bool perf_tryget_cgroup(struct perf_event *event)
{
- css_get(&event->cgrp->css);
+ return css_tryget(&event->cgrp->css);
}
static inline void perf_put_cgroup(struct perf_event *event)
@@ -481,7 +481,11 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
event->cgrp = cgrp;
/* must be done before we fput() the file */
- perf_get_cgroup(event);
+ if (!perf_tryget_cgroup(event)) {
+ event->cgrp = NULL;
+ ret = -ENOENT;
+ goto out;
+ }
/*
* all events in a group must monitor
@@ -911,6 +915,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
}
/*
+ * Initialize event state based on the perf_event_attr::disabled.
+ */
+static inline void perf_event__state_init(struct perf_event *event)
+{
+ event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
+ PERF_EVENT_STATE_INACTIVE;
+}
+
+/*
* Called at perf_event creation and when events are attached/detached from a
* group.
*/
@@ -6058,8 +6071,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->overflow_handler = overflow_handler;
event->overflow_handler_context = context;
- if (attr->disabled)
- event->state = PERF_EVENT_STATE_OFF;
+ perf_event__state_init(event);
pmu = NULL;
@@ -6481,9 +6493,17 @@ SYSCALL_DEFINE5(perf_event_open,
mutex_lock(&gctx->mutex);
perf_remove_from_context(group_leader);
+
+ /*
+ * Removing from the context ends up with disabled
+ * event. What we want here is event in the initial
+ * startup state, ready to be add into new context.
+ */
+ perf_event__state_init(group_leader);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_remove_from_context(sibling);
+ perf_event__state_init(sibling);
put_ctx(gctx);
}
mutex_unlock(&gctx->mutex);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e955364..da4512f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5511,7 +5511,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
* idle runqueue:
*/
if (rq->cfs.load.weight)
- rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
+ rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
return rr_interval;
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index bf7a604..086c973 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -34,6 +34,8 @@ static int tcp_adv_win_scale_min = -31;
static int tcp_adv_win_scale_max = 31;
static int ip_ttl_min = 1;
static int ip_ttl_max = 255;
+static int tcp_syn_retries_min = 1;
+static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
@@ -276,7 +278,9 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_syn_retries,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &tcp_syn_retries_min,
+ .extra2 = &tcp_syn_retries_max
},
{
.procname = "tcp_synack_retries",
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 8110362..d5e4615 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -256,10 +256,12 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
{
struct mr6_table *mrt, *next;
+ rtnl_lock();
list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
list_del(&mrt->list);
ip6mr_free_table(mrt);
}
+ rtnl_unlock();
fib_rules_unregister(net->ipv6.mr6_rules_ops);
}
#else
@@ -286,7 +288,10 @@ static int __net_init ip6mr_rules_init(struct net *net)
static void __net_exit ip6mr_rules_exit(struct net *net)
{
+ rtnl_lock();
ip6mr_free_table(net->ipv6.mrt6);
+ net->ipv6.mrt6 = NULL;
+ rtnl_unlock();
}
#endif
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 5bbab6a..60109f4 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2073,6 +2073,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *
pol->sadb_x_policy_type = IPSEC_POLICY_NONE;
}
pol->sadb_x_policy_dir = dir+1;
+ pol->sadb_x_policy_reserved = 0;
pol->sadb_x_policy_id = xp->index;
pol->sadb_x_policy_priority = xp->priority;
@@ -3108,7 +3109,9 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
pol->sadb_x_policy_dir = dir+1;
+ pol->sadb_x_policy_reserved = 0;
pol->sadb_x_policy_id = xp->index;
+ pol->sadb_x_policy_priority = xp->priority;
/* Set sadb_comb's. */
if (x->id.proto == IPPROTO_AH)
@@ -3496,6 +3499,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
pol->sadb_x_policy_dir = dir + 1;
+ pol->sadb_x_policy_reserved = 0;
pol->sadb_x_policy_id = 0;
pol->sadb_x_policy_priority = 0;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 8ce9feb..067aa2a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -831,8 +831,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
- /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
- if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
+ /*
+ * Drop duplicate 802.11 retransmissions
+ * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
+ */
+ if (rx->skb->len >= 24 && rx->sta &&
+ !ieee80211_is_ctl(hdr->frame_control) &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
+ !is_multicast_ether_addr(hdr->addr1)) {
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
rx->sta->last_seq_ctrl[rx->seqno_idx] ==
hdr->seq_ctrl)) {
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e25e490..6e38ef0 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -606,6 +606,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
struct sockaddr_atmpvc pvc;
int state;
+ memset(&pvc, 0, sizeof(pvc));
pvc.sap_family = AF_ATMPVC;
pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
pvc.sap_addr.vpi = flow->vcc->vpi;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index b7cddb9..7f59944 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1467,6 +1467,7 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_wrropt opt;
+ memset(&opt, 0, sizeof(opt));
opt.flags = 0;
opt.allot = cl->allot;
opt.priority = cl->priority + 1;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 96eb168..3dd7207 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -205,6 +205,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
*/
void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
{
+ memset(q, 0, sizeof(struct sctp_outq));
+
q->asoc = asoc;
INIT_LIST_HEAD(&q->out_chunk_list);
INIT_LIST_HEAD(&q->control_chunk_list);
@@ -212,13 +214,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
- q->fast_rtx = 0;
- q->outstanding_bytes = 0;
q->empty = 1;
- q->cork = 0;
-
- q->malloced = 0;
- q->out_qlen = 0;
}
/* Free the outqueue structure and any related pending chunks.
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f432c57..add9f94 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5081,12 +5081,14 @@ EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb);
void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
{
+ struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
void *hdr = ((void **)skb->cb)[1];
struct nlattr *data = ((void **)skb->cb)[2];
nla_nest_end(skb, data);
genlmsg_end(skb, hdr);
- genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
+ nl80211_testmode_mcgrp.id, gfp);
}
EXPORT_SYMBOL(cfg80211_testmode_event);
#endif
@@ -7768,7 +7770,8 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
genlmsg_end(msg, hdr);
- genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
return;
nla_put_failure:
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index a58cf35..84717ce 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -582,7 +582,7 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
mutex_lock(&stream->device->lock);
switch (_IOC_NR(cmd)) {
case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
- put_user(SNDRV_COMPRESS_VERSION,
+ retval = put_user(SNDRV_COMPRESS_VERSION,
(int __user *)arg) ? -EFAULT : 0;
break;
case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):

View file

@ -0,0 +1,509 @@
diff --git a/Makefile b/Makefile
index 7f4df0c..b19d508 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 57
+SUBLEVEL = 58
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index f77e341..0cd2d50 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -256,9 +256,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
unsigned long flags;
spin_lock_irqsave(&portdev->ports_lock, flags);
- list_for_each_entry(port, &portdev->ports, list)
- if (port->cdev->dev == dev)
+ list_for_each_entry(port, &portdev->ports, list) {
+ if (port->cdev->dev == dev) {
+ kref_get(&port->kref);
goto out;
+ }
+ }
port = NULL;
out:
spin_unlock_irqrestore(&portdev->ports_lock, flags);
@@ -630,6 +633,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
port = filp->private_data;
+ /* Port is hot-unplugged. */
+ if (!port->guest_connected)
+ return -ENODEV;
+
if (!port_has_data(port)) {
/*
* If nothing's connected on the host just return 0 in
@@ -646,7 +653,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
if (ret < 0)
return ret;
}
- /* Port got hot-unplugged. */
+ /* Port got hot-unplugged while we were waiting above. */
if (!port->guest_connected)
return -ENODEV;
/*
@@ -789,14 +796,14 @@ static int port_fops_open(struct inode *inode, struct file *filp)
struct port *port;
int ret;
+ /* We get the port with a kref here */
port = find_port_by_devt(cdev->dev);
+ if (!port) {
+ /* Port was unplugged before we could proceed */
+ return -ENXIO;
+ }
filp->private_data = port;
- /* Prevent against a port getting hot-unplugged at the same time */
- spin_lock_irq(&port->portdev->ports_lock);
- kref_get(&port->kref);
- spin_unlock_irq(&port->portdev->ports_lock);
-
/*
* Don't allow opening of console port devices -- that's done
* via /dev/hvc
@@ -1254,14 +1261,6 @@ static void remove_port(struct kref *kref)
port = container_of(kref, struct port, kref);
- sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
- device_destroy(pdrvdata.class, port->dev->devt);
- cdev_del(port->cdev);
-
- kfree(port->name);
-
- debugfs_remove(port->debugfs_file);
-
kfree(port);
}
@@ -1291,12 +1290,14 @@ static void unplug_port(struct port *port)
spin_unlock_irq(&port->portdev->ports_lock);
if (port->guest_connected) {
+ /* Let the app know the port is going down. */
+ send_sigio_to_port(port);
+
+ /* Do this after sigio is actually sent */
port->guest_connected = false;
port->host_connected = false;
- wake_up_interruptible(&port->waitqueue);
- /* Let the app know the port is going down. */
- send_sigio_to_port(port);
+ wake_up_interruptible(&port->waitqueue);
}
if (is_console_port(port)) {
@@ -1315,6 +1316,14 @@ static void unplug_port(struct port *port)
*/
port->portdev = NULL;
+ sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+ device_destroy(pdrvdata.class, port->dev->devt);
+ cdev_del(port->cdev);
+
+ kfree(port->name);
+
+ debugfs_remove(port->debugfs_file);
+
/*
* Locks around here are not necessary - a port can't be
* opened after we removed the port struct from ports_list
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 54ec8905..034085d 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg,
u16 value)
{
return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
- && i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
+ || i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
}
static void adt7470_init_client(struct i2c_client *client)
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 073d5ad..7926162 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3493,11 +3493,21 @@ static int megasas_init_fw(struct megasas_instance *instance)
break;
}
- /*
- * We expect the FW state to be READY
- */
- if (megasas_transition_to_ready(instance, 0))
- goto fail_ready_state;
+ if (megasas_transition_to_ready(instance, 0)) {
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset
+ (instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+ dev_info(&instance->pdev->dev,
+ "megasas: FW restarted successfully from %s!\n",
+ __func__);
+
+ /*waitting for about 30 second before retry*/
+ ssleep(30);
+
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+ }
/* Check if MSI-X is supported while in ready state */
msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 62b6168..e705ed3 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -2926,7 +2926,7 @@ static void nsp32_do_bus_reset(nsp32_hw_data *data)
* reset SCSI bus
*/
nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST);
- udelay(RESET_HOLD_TIME);
+ mdelay(RESET_HOLD_TIME / 1000);
nsp32_write1(base, SCSI_BUS_CONTROL, 0);
for(i = 0; i < 5; i++) {
intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 07322ec..29fabff 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1025,6 +1025,9 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
{
int i, result;
+ if (sdev->skip_vpd_pages)
+ goto fail;
+
/* Ask for all the pages supported by this device */
result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
if (result)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a969ec1..aa54fad 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3890,7 +3890,8 @@ static void hub_events(void)
hub->hdev->children[i - 1];
dev_dbg(hub_dev, "warm reset port %d\n", i);
- if (!udev) {
+ if (!udev || !(portstatus &
+ USB_PORT_STAT_CONNECTION)) {
status = hub_port_reset(hub, i,
NULL, HUB_BH_RESET_TIME,
true);
@@ -3900,8 +3901,8 @@ static void hub_events(void)
usb_lock_device(udev);
status = usb_reset_device(udev);
usb_unlock_device(udev);
+ connect_change = 0;
}
- connect_change = 0;
}
if (connect_change)
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 6d0c62a..6dd3b61 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -369,7 +369,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
if (blobptr + attrsize > blobend)
break;
if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
- if (!attrsize)
+ if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
break;
if (!ses->domainName) {
ses->domainName =
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 73fea28..a5fcf19 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -38,6 +38,7 @@
#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
#define MAX_SERVER_SIZE 15
#define MAX_SHARE_SIZE 80
+#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9cc574c..e7fe81d 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1698,7 +1698,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (string == NULL)
goto out_nomem;
- if (strnlen(string, 256) == 256) {
+ if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
+ == CIFS_MAX_DOMAINNAME_LEN) {
printk(KERN_WARNING "CIFS: domain name too"
" long\n");
goto cifs_parse_mount_err;
@@ -2356,8 +2357,8 @@ cifs_put_smb_ses(struct cifs_ses *ses)
#ifdef CONFIG_KEYS
-/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
-#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
+/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
+#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
/* Populate username and pw fields from keyring if possible */
static int
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 551d0c2..de9b1c1 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -198,7 +198,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
bytes_ret = 0;
} else
bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
- 256, nls_cp);
+ CIFS_MAX_DOMAINNAME_LEN, nls_cp);
bcc_ptr += 2 * bytes_ret;
bcc_ptr += 2; /* account for null terminator */
@@ -256,8 +256,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
/* copy domain */
if (ses->domainName != NULL) {
- strncpy(bcc_ptr, ses->domainName, 256);
- bcc_ptr += strnlen(ses->domainName, 256);
+ strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
} /* else we will send a null domain name
so the server will default to its own domain */
*bcc_ptr = 0;
diff --git a/fs/dcache.c b/fs/dcache.c
index e498de2..9d39de4 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1556,7 +1556,7 @@ EXPORT_SYMBOL(d_find_any_alias);
*/
struct dentry *d_obtain_alias(struct inode *inode)
{
- static const struct qstr anonstring = { .name = "" };
+ static const struct qstr anonstring = { .name = "/", .len = 1 };
struct dentry *tmp;
struct dentry *res;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index b80bc84..9dc6e76 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -527,8 +527,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
*/
void debugfs_remove_recursive(struct dentry *dentry)
{
- struct dentry *child;
- struct dentry *parent;
+ struct dentry *child, *next, *parent;
if (!dentry)
return;
@@ -538,61 +537,37 @@ void debugfs_remove_recursive(struct dentry *dentry)
return;
parent = dentry;
+ down:
mutex_lock(&parent->d_inode->i_mutex);
+ list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
+ if (!debugfs_positive(child))
+ continue;
- while (1) {
- /*
- * When all dentries under "parent" has been removed,
- * walk up the tree until we reach our starting point.
- */
- if (list_empty(&parent->d_subdirs)) {
- mutex_unlock(&parent->d_inode->i_mutex);
- if (parent == dentry)
- break;
- parent = parent->d_parent;
- mutex_lock(&parent->d_inode->i_mutex);
- }
- child = list_entry(parent->d_subdirs.next, struct dentry,
- d_u.d_child);
- next_sibling:
-
- /*
- * If "child" isn't empty, walk down the tree and
- * remove all its descendants first.
- */
+ /* perhaps simple_empty(child) makes more sense */
if (!list_empty(&child->d_subdirs)) {
mutex_unlock(&parent->d_inode->i_mutex);
parent = child;
- mutex_lock(&parent->d_inode->i_mutex);
- continue;
+ goto down;
}
- __debugfs_remove(child, parent);
- if (parent->d_subdirs.next == &child->d_u.d_child) {
- /*
- * Try the next sibling.
- */
- if (child->d_u.d_child.next != &parent->d_subdirs) {
- child = list_entry(child->d_u.d_child.next,
- struct dentry,
- d_u.d_child);
- goto next_sibling;
- }
-
- /*
- * Avoid infinite loop if we fail to remove
- * one dentry.
- */
- mutex_unlock(&parent->d_inode->i_mutex);
- break;
- }
- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ up:
+ if (!__debugfs_remove(child, parent))
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
- parent = dentry->d_parent;
+ mutex_unlock(&parent->d_inode->i_mutex);
+ child = parent;
+ parent = parent->d_parent;
mutex_lock(&parent->d_inode->i_mutex);
- __debugfs_remove(dentry, parent);
+
+ if (child != dentry) {
+ next = list_entry(child->d_u.d_child.next, struct dentry,
+ d_u.d_child);
+ goto up;
+ }
+
+ if (!__debugfs_remove(child, parent))
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
mutex_unlock(&parent->d_inode->i_mutex);
- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index e42b468..b1c3f2a 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -687,11 +687,8 @@ repeat_in_this_group:
ino = ext4_find_next_zero_bit((unsigned long *)
inode_bitmap_bh->b_data,
EXT4_INODES_PER_GROUP(sb), ino);
- if (ino >= EXT4_INODES_PER_GROUP(sb)) {
- if (++group == ngroups)
- group = 0;
- continue;
- }
+ if (ino >= EXT4_INODES_PER_GROUP(sb))
+ goto next_group;
if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
ext4_error(sb, "reserved inode found cleared - "
"inode=%lu", ino + 1);
@@ -709,6 +706,9 @@ repeat_in_this_group:
goto got; /* we grabbed the inode! */
if (ino < EXT4_INODES_PER_GROUP(sb))
goto repeat_in_this_group;
+next_group:
+ if (++group == ngroups)
+ group = 0;
}
err = -ENOSPC;
goto out;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b93de81..8bbb14c 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3232,7 +3232,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
if (test_opt(sb, DIOREAD_NOLOCK)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
- "both data=journal and delalloc");
+ "both data=journal and dioread_nolock");
goto failed_mount;
}
if (test_opt(sb, DELALLOC))
@@ -4397,6 +4397,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+ if (test_opt2(sb, EXPLICIT_DELALLOC)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and delalloc");
+ err = -EINVAL;
+ goto restore_opts;
+ }
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and dioread_nolock");
+ err = -EINVAL;
+ goto restore_opts;
+ }
+ }
+
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
ext4_abort(sb, "Abort forced by user");
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 176a939..16cad53 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -71,6 +71,8 @@ struct trace_iterator {
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
+ cpumask_var_t started;
+
/* The below is zeroed out in pipe_read */
struct trace_seq seq;
struct trace_entry *ent;
@@ -83,7 +85,7 @@ struct trace_iterator {
loff_t pos;
long idx;
- cpumask_var_t started;
+ /* All new field here will be zeroed out in pipe_read */
};
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 13cd224..5dd9626 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3478,6 +3478,7 @@ waitagain:
memset(&iter->seq, 0,
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
+ cpumask_clear(iter->started);
iter->pos = -1;
trace_event_read_lock();
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 35ae568..9593f27 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -15,7 +15,8 @@ const char *map_type__name[MAP__NR_TYPES] = {
static inline int is_anon_memory(const char *filename)
{
- return strcmp(filename, "//anon") == 0;
+ return !strcmp(filename, "//anon") ||
+ !strcmp(filename, "/anon_hugepage (deleted)");
}
static inline int is_no_dso_memory(const char *filename)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,579 @@
diff --git a/Makefile b/Makefile
index efa1453..0027fbe 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 59
+SUBLEVEL = 60
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 017d48a..f8b0260 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -213,6 +213,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
e820_add_region(start, end - start, type);
}
+void xen_ignore_unusable(struct e820entry *list, size_t map_size)
+{
+ struct e820entry *entry;
+ unsigned int i;
+
+ for (i = 0, entry = list; i < map_size; i++, entry++) {
+ if (entry->type == E820_UNUSABLE)
+ entry->type = E820_RAM;
+ }
+}
+
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
@@ -251,6 +262,17 @@ char * __init xen_memory_setup(void)
}
BUG_ON(rc);
+ /*
+ * Xen won't allow a 1:1 mapping to be created to UNUSABLE
+ * regions, so if we're using the machine memory map leave the
+ * region as RAM as it is in the pseudo-physical map.
+ *
+ * UNUSABLE regions in domUs are not handled and will need
+ * a patch in the future.
+ */
+ if (xen_initial_domain())
+ xen_ignore_unusable(map, memmap.nr_entries);
+
/* Make sure the Xen-supplied memory map is well-ordered. */
sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index f63a588..f5c35be 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
/* Disable sending Early R_OK.
* With "cached read" HDD testing and multiple ports busy on a SATA
- * host controller, 3726 PMP will very rarely drop a deferred
+ * host controller, 3x26 PMP will very rarely drop a deferred
* R_OK that was intended for the host. Symptom will be all
* 5 drives under test will timeout, get reset, and recover.
*/
- if (vendor == 0x1095 && devid == 0x3726) {
+ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
u32 reg;
err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
if (err_mask) {
rc = -EIO;
- reason = "failed to read Sil3726 Private Register";
+ reason = "failed to read Sil3x26 Private Register";
goto fail;
}
reg &= ~0x1;
err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
if (err_mask) {
rc = -EIO;
- reason = "failed to write Sil3726 Private Register";
+ reason = "failed to write Sil3x26 Private Register";
goto fail;
}
}
@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
u16 devid = sata_pmp_gscr_devid(gscr);
struct ata_link *link;
- if (vendor == 0x1095 && devid == 0x3726) {
- /* sil3726 quirks */
+ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
+ /* sil3x26 quirks */
ata_for_each_link(link, ap, EDGE) {
/* link reports offline after LPM */
link->flags |= ATA_LFLAG_NO_LPM;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index dde62bf..d031932 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -502,6 +502,8 @@
will not assert AGPBUSY# and will only
be delivered when out of C3. */
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1<<9)
+#define INSTPM_SYNC_FLUSH (1<<5)
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c17325c..99a9df8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -767,6 +767,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
+
+ /* Flush the TLB for this page */
+ if (INTEL_INFO(dev)->gen >= 6) {
+ u32 reg = RING_INSTPM(ring->mmio_base);
+ I915_WRITE(reg,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ ring->name);
+ }
}
static int
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 18054d9..dbec2ff 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -522,9 +522,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
- memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
+ memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
data->flags = 1; /* has quality information */
- memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
+ memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
sizeof(struct iw_quality) * data->length);
kfree(addr);
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index a66b93b..1662fcc 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
goto exit;
err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
- USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
+ USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
if (err < 0)
goto exit;
+ memcpy(&ret, buf, sizeof(ret));
+
if (ret & 0x80) {
err = -EIO;
goto exit;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 91a375f..17fad3b 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -390,6 +390,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
mem = (unsigned long)
dt_alloc(size + 4, __alignof__(struct device_node));
+ memset((void *)mem, 0, size);
+
((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
pr_debug(" unflattening %lx...\n", mem);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e1b4f80..5c87270 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&port->erp_action);
- else
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ else {
+ spin_lock(port->adapter->scsi_host->host_lock);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
zfcp_erp_action_dismiss_lun(sdev);
+ spin_unlock(port->adapter->scsi_host->host_lock);
+ }
}
static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
{
struct scsi_device *sdev;
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ spin_lock(port->adapter->scsi_host->host_lock);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
_zfcp_erp_lun_reopen(sdev, clear, id, 0);
+ spin_unlock(port->adapter->scsi_host->host_lock);
}
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -1435,8 +1440,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
atomic_set_mask(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
- shost_for_each_device(sdev, adapter->scsi_host)
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host)
atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
@@ -1470,11 +1477,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
- shost_for_each_device(sdev, adapter->scsi_host) {
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host) {
atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
@@ -1488,16 +1497,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
{
struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+ unsigned long flags;
atomic_set_mask(mask, &port->status);
if (!common_mask)
return;
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
atomic_set_mask(common_mask,
&sdev_to_zfcp(sdev)->status);
+ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
@@ -1512,6 +1524,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
+ unsigned long flags;
atomic_clear_mask(mask, &port->status);
@@ -1521,13 +1534,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
if (clear_counter)
atomic_set(&port->erp_counter, 0);
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) {
atomic_clear_mask(common_mask,
&sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
+ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index e76d003..52c6b59 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
{
- spin_lock_irq(&qdio->req_q_lock);
if (atomic_read(&qdio->req_q_free) ||
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return 1;
- spin_unlock_irq(&qdio->req_q_lock);
return 0;
}
@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
{
long ret;
- spin_unlock_irq(&qdio->req_q_lock);
- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
- zfcp_qdio_sbal_check(qdio), 5 * HZ);
+ ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
+ zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return -EIO;
@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
}
- spin_lock_irq(&qdio->req_q_lock);
return -EIO;
}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 417c133..33dcad6 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -324,7 +324,7 @@ static void init_evtchn_cpu_bindings(void)
for_each_possible_cpu(i)
memset(per_cpu(cpu_evtchn_mask, i),
- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
+ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
}
static inline void clear_evtchn(int port)
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index dc9a913..2d8be51 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
- bio_put(bio);
- /* to be detected by submit_seg_bio() */
+ /* to be detected by nilfs_segbuf_submit_bio() */
}
if (!uptodate)
@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
bio->bi_private = segbuf;
bio_get(bio);
submit_bio(mode, bio);
+ segbuf->sb_nbio++;
if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
bio_put(bio);
err = -EOPNOTSUPP;
goto failed;
}
- segbuf->sb_nbio++;
bio_put(bio);
wi->bio = NULL;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 6c6c20e..b305b31 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -530,6 +530,63 @@ do { \
? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
+#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
+ lock, ret) \
+do { \
+ DEFINE_WAIT(__wait); \
+ \
+ for (;;) { \
+ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (signal_pending(current)) { \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ spin_unlock_irq(&lock); \
+ ret = schedule_timeout(ret); \
+ spin_lock_irq(&lock); \
+ if (!ret) \
+ break; \
+ } \
+ finish_wait(&wq, &__wait); \
+} while (0)
+
+/**
+ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
+ * The condition is checked under the lock. This is expected
+ * to be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ * and reacquired afterwards.
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ *
+ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
+ * was interrupted by a signal, and the remaining jiffies otherwise
+ * if the condition evaluated to true before the timeout elapsed.
+ */
+#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
+ timeout) \
+({ \
+ int __ret = timeout; \
+ \
+ if (!(condition)) \
+ __wait_event_interruptible_lock_irq_timeout( \
+ wq, condition, lock, __ret); \
+ __ret; \
+})
+
#define __wait_event_killable(wq, condition, ret) \
do { \
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a64b94e..575d092 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -128,6 +128,7 @@ struct worker {
};
struct work_struct *current_work; /* L: work being processed */
+ work_func_t current_func; /* L: current_work's fn */
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
struct list_head scheduled; /* L: scheduled works */
struct task_struct *task; /* I: worker task */
@@ -838,7 +839,8 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
struct hlist_node *tmp;
hlist_for_each_entry(worker, tmp, bwh, hentry)
- if (worker->current_work == work)
+ if (worker->current_work == work &&
+ worker->current_func == work->func)
return worker;
return NULL;
}
@@ -848,9 +850,27 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
* @gcwq: gcwq of interest
* @work: work to find worker for
*
- * Find a worker which is executing @work on @gcwq. This function is
- * identical to __find_worker_executing_work() except that this
- * function calculates @bwh itself.
+ * Find a worker which is executing @work on @gcwq by searching
+ * @gcwq->busy_hash which is keyed by the address of @work. For a worker
+ * to match, its current execution should match the address of @work and
+ * its work function. This is to avoid unwanted dependency between
+ * unrelated work executions through a work item being recycled while still
+ * being executed.
+ *
+ * This is a bit tricky. A work item may be freed once its execution
+ * starts and nothing prevents the freed area from being recycled for
+ * another work item. If the same work item address ends up being reused
+ * before the original execution finishes, workqueue will identify the
+ * recycled work item as currently executing and make it wait until the
+ * current execution finishes, introducing an unwanted dependency.
+ *
+ * This function checks the work item address, work function and workqueue
+ * to avoid false positives. Note that this isn't complete as one may
+ * construct a work function which can introduce dependency onto itself
+ * through a recycled work item. Well, if somebody wants to shoot oneself
+ * in the foot that badly, there's only so much we can do, and if such
+ * deadlock actually occurs, it should be easy to locate the culprit work
+ * function.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
@@ -1721,10 +1741,9 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
*nextp = n;
}
-static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
+static void cwq_activate_delayed_work(struct work_struct *work)
{
- struct work_struct *work = list_first_entry(&cwq->delayed_works,
- struct work_struct, entry);
+ struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
trace_workqueue_activate_work(work);
@@ -1733,6 +1752,14 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
cwq->nr_active++;
}
+static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
+{
+ struct work_struct *work = list_first_entry(&cwq->delayed_works,
+ struct work_struct, entry);
+
+ cwq_activate_delayed_work(work);
+}
+
/**
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
* @cwq: cwq of interest
@@ -1804,7 +1831,6 @@ __acquires(&gcwq->lock)
struct global_cwq *gcwq = cwq->gcwq;
struct hlist_head *bwh = busy_worker_head(gcwq, work);
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
- work_func_t f = work->func;
int work_color;
struct worker *collision;
#ifdef CONFIG_LOCKDEP
@@ -1833,6 +1859,7 @@ __acquires(&gcwq->lock)
debug_work_deactivate(work);
hlist_add_head(&worker->hentry, bwh);
worker->current_work = work;
+ worker->current_func = work->func;
worker->current_cwq = cwq;
work_color = get_work_color(work);
@@ -1870,7 +1897,7 @@ __acquires(&gcwq->lock)
lock_map_acquire_read(&cwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
trace_workqueue_execute_start(work);
- f(work);
+ worker->current_func(work);
/*
* While we must be careful to not use "work" after this, the trace
* point will only record its address.
@@ -1880,11 +1907,10 @@ __acquires(&gcwq->lock)
lock_map_release(&cwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
- printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
- "%s/0x%08x/%d\n",
- current->comm, preempt_count(), task_pid_nr(current));
- printk(KERN_ERR " last function: ");
- print_symbol("%s\n", (unsigned long)f);
+ pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
+ " last function: %pf\n",
+ current->comm, preempt_count(), task_pid_nr(current),
+ worker->current_func);
debug_show_held_locks(current);
dump_stack();
}
@@ -1898,6 +1924,7 @@ __acquires(&gcwq->lock)
/* we're done with it, release */
hlist_del_init(&worker->hentry);
worker->current_work = NULL;
+ worker->current_func = NULL;
worker->current_cwq = NULL;
cwq_dec_nr_in_flight(cwq, work_color, false);
}
@@ -2625,6 +2652,18 @@ static int try_to_grab_pending(struct work_struct *work)
smp_rmb();
if (gcwq == get_work_gcwq(work)) {
debug_work_deactivate(work);
+
+ /*
+ * A delayed work item cannot be grabbed directly
+ * because it might have linked NO_COLOR work items
+ * which, if left on the delayed_list, will confuse
+ * cwq->nr_active management later on and cause
+ * stall. Make sure the work item is activated
+ * before grabbing.
+ */
+ if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
+ cwq_activate_delayed_work(work);
+
list_del_init(&work->entry);
cwq_dec_nr_in_flight(get_work_cwq(work),
get_work_color(work),

View file

@ -0,0 +1,444 @@
diff --git a/Makefile b/Makefile
index 0027fbe..bc4dd5b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 60
+SUBLEVEL = 61
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index feab3ba..b17dd69 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -979,6 +979,7 @@ config RELOCATABLE
must live at a different physical address than the primary
kernel.
+# This value must have zeroes in the bottom 60 bits otherwise lots will break
config PAGE_OFFSET
hex
default "0xc000000000000000"
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index f072e97..2e6c4e5a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -211,9 +211,19 @@ extern long long virt_phys_offset;
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
#else
+#ifdef CONFIG_PPC64
+/*
+ * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
+ * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
+ */
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
+#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
+
+#else /* 32-bit, non book E */
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
#endif
+#endif
/*
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 3251d4b..d1a9674 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -978,6 +978,10 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
ec_skip_dsdt_scan, "HP Folio 13", {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
+ {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
{},
};
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 7dda4f7..d63a06b 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -154,6 +154,8 @@ static ssize_t show_mem_removable(struct device *dev,
container_of(dev, struct memory_block, dev);
for (i = 0; i < sections_per_block; i++) {
+ if (!present_section_nr(mem->start_section_nr + i))
+ continue;
pfn = section_nr_to_pfn(mem->start_section_nr + i);
ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
}
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index bb80853..ca72d1f 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -69,7 +69,7 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
}
static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
- unsigned int num)
+ size_t num)
{
unsigned int i;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d031932..6884d01 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3741,7 +3741,7 @@
#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
/* legacy values */
#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 21ee782..e1978a2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -29,7 +29,9 @@
#include "drmP.h"
#include "ttm/ttm_bo_driver.h"
-#define VMW_PPN_SIZE sizeof(unsigned long)
+#define VMW_PPN_SIZE (sizeof(unsigned long))
+/* A future safe maximum remap size. */
+#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
struct page *pages[],
@@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
{
SVGAFifoCmdDefineGMR2 define_cmd;
SVGAFifoCmdRemapGMR2 remap_cmd;
- uint32_t define_size = sizeof(define_cmd) + 4;
- uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
uint32_t *cmd;
uint32_t *cmd_orig;
+ uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
+ uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
+ uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
+ uint32_t remap_pos = 0;
+ uint32_t cmd_size = define_size + remap_size;
uint32_t i;
- cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
+ cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
define_cmd.gmrId = gmr_id;
define_cmd.numPages = num_pages;
+ *cmd++ = SVGA_CMD_DEFINE_GMR2;
+ memcpy(cmd, &define_cmd, sizeof(define_cmd));
+ cmd += sizeof(define_cmd) / sizeof(*cmd);
+
+ /*
+ * Need to split the command if there are too many
+ * pages that goes into the gmr.
+ */
+
remap_cmd.gmrId = gmr_id;
remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
- remap_cmd.offsetPages = 0;
- remap_cmd.numPages = num_pages;
- *cmd++ = SVGA_CMD_DEFINE_GMR2;
- memcpy(cmd, &define_cmd, sizeof(define_cmd));
- cmd += sizeof(define_cmd) / sizeof(uint32);
+ while (num_pages > 0) {
+ unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
+
+ remap_cmd.offsetPages = remap_pos;
+ remap_cmd.numPages = nr;
- *cmd++ = SVGA_CMD_REMAP_GMR2;
- memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
- cmd += sizeof(remap_cmd) / sizeof(uint32);
+ *cmd++ = SVGA_CMD_REMAP_GMR2;
+ memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
+ cmd += sizeof(remap_cmd) / sizeof(*cmd);
- for (i = 0; i < num_pages; ++i) {
- if (VMW_PPN_SIZE <= 4)
- *cmd = page_to_pfn(*pages++);
- else
- *((uint64_t *)cmd) = page_to_pfn(*pages++);
+ for (i = 0; i < nr; ++i) {
+ if (VMW_PPN_SIZE <= 4)
+ *cmd = page_to_pfn(*pages++);
+ else
+ *((uint64_t *)cmd) = page_to_pfn(*pages++);
- cmd += VMW_PPN_SIZE / sizeof(*cmd);
+ cmd += VMW_PPN_SIZE / sizeof(*cmd);
+ }
+
+ num_pages -= nr;
+ remap_pos += nr;
}
- vmw_fifo_commit(dev_priv, define_size + remap_size);
+ BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
+
+ vmw_fifo_commit(dev_priv, cmd_size);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 3e40a64..b290a8e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
struct ieee80211_conf *cur_conf = &priv->hw->conf;
bool txok;
int slot;
+ int hdrlen, padsize;
slot = strip_drv_header(priv, skb);
if (slot < 0) {
@@ -504,6 +505,15 @@ send_mac80211:
ath9k_htc_tx_clear_slot(priv, slot);
+ /* Remove padding before handing frame back to mac80211 */
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+
+ padsize = hdrlen & 3;
+ if (padsize && skb->len > hdrlen + padsize) {
+ memmove(skb->data + padsize, skb->data, hdrlen);
+ skb_pull(skb, padsize);
+ }
+
/* Send status to mac80211 */
ieee80211_tx_status(priv->hw, skb);
}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 409ed06..3223daa 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -4415,9 +4415,9 @@ il4965_irq_tasklet(struct il_priv *il)
set_bit(S_RFKILL, &il->status);
} else {
clear_bit(S_RFKILL, &il->status);
- wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
il_force_reset(il, true);
}
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
handled |= CSR_INT_BIT_RF_KILL;
}
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 52a5f62..2d88ce8 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -97,9 +97,12 @@ target_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
buf[7] = 0x2; /* CmdQue=1 */
- snprintf(&buf[8], 8, "LIO-ORG");
- snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
- snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
+ memcpy(&buf[8], "LIO-ORG ", 8);
+ memset(&buf[16], 0x20, 16);
+ memcpy(&buf[16], dev->se_sub_dev->t10_wwn.model,
+ min_t(size_t, strlen(dev->se_sub_dev->t10_wwn.model), 16));
+ memcpy(&buf[32], dev->se_sub_dev->t10_wwn.revision,
+ min_t(size_t, strlen(dev->se_sub_dev->t10_wwn.revision), 4));
buf[4] = 31; /* Set additional length to 31 */
return 0;
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index 6f4dd83..3749688 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv)
pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno);
- /* Try for up to 200s */
- for (timeout = 0; timeout < 20; timeout++) {
+ /* Try for up to 400ms */
+ for (timeout = 0; timeout < 40; timeout++) {
if (pv->established)
goto established;
if (!hvsi_get_packet(pv))
diff --git a/fs/bio.c b/fs/bio.c
index 84da885..c0e5a4e 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -787,12 +787,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
int bio_uncopy_user(struct bio *bio)
{
struct bio_map_data *bmd = bio->bi_private;
- int ret = 0;
+ struct bio_vec *bvec;
+ int ret = 0, i;
- if (!bio_flagged(bio, BIO_NULL_MAPPED))
- ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
- bmd->nr_sgvecs, bio_data_dir(bio) == READ,
- 0, bmd->is_our_pages);
+ if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+ /*
+ * if we're in a workqueue, the request is orphaned, so
+ * don't copy into a random user address space, just free.
+ */
+ if (current->mm)
+ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+ bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+ 0, bmd->is_our_pages);
+ else if (bmd->is_our_pages)
+ __bio_for_each_segment(bvec, bio, i, 0)
+ __free_page(bvec->bv_page);
+ }
bio_free_map_data(bmd);
bio_put(bio);
return ret;
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 9197a1b..b6f17c0 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -3047,6 +3047,14 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
dir_index = (u32) filp->f_pos;
+ /*
+ * NFSv4 reserves cookies 1 and 2 for . and .. so we add
+ * the value we return to the vfs is one greater than the
+ * one we use internally.
+ */
+ if (dir_index)
+ dir_index--;
+
if (dir_index > 1) {
struct dir_table_slot dirtab_slot;
@@ -3086,7 +3094,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
if (p->header.flag & BT_INTERNAL) {
jfs_err("jfs_readdir: bad index table");
DT_PUTPAGE(mp);
- filp->f_pos = -1;
+ filp->f_pos = DIREND;
return 0;
}
} else {
@@ -3094,7 +3102,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
/*
* self "."
*/
- filp->f_pos = 0;
+ filp->f_pos = 1;
if (filldir(dirent, ".", 1, 0, ip->i_ino,
DT_DIR))
return 0;
@@ -3102,7 +3110,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
/*
* parent ".."
*/
- filp->f_pos = 1;
+ filp->f_pos = 2;
if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR))
return 0;
@@ -3123,24 +3131,25 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
/*
* Legacy filesystem - OS/2 & Linux JFS < 0.3.6
*
- * pn = index = 0: First entry "."
- * pn = 0; index = 1: Second entry ".."
+ * pn = 0; index = 1: First entry "."
+ * pn = 0; index = 2: Second entry ".."
* pn > 0: Real entries, pn=1 -> leftmost page
* pn = index = -1: No more entries
*/
dtpos = filp->f_pos;
- if (dtpos == 0) {
+ if (dtpos < 2) {
/* build "." entry */
+ filp->f_pos = 1;
if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino,
DT_DIR))
return 0;
- dtoffset->index = 1;
+ dtoffset->index = 2;
filp->f_pos = dtpos;
}
if (dtoffset->pn == 0) {
- if (dtoffset->index == 1) {
+ if (dtoffset->index == 2) {
/* build ".." entry */
if (filldir(dirent, "..", 2, filp->f_pos,
@@ -3233,6 +3242,12 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
}
jfs_dirent->position = unique_pos++;
}
+ /*
+ * We add 1 to the index because we may
+ * use a value of 2 internally, and NFSv4
+ * doesn't like that.
+ */
+ jfs_dirent->position++;
} else {
jfs_dirent->position = dtpos;
len = min(d_namleft, DTLHDRDATALEN_LEGACY);
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index b97a3dd..6997cdd 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -233,10 +233,13 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
pgfrom_base -= copy;
vto = kmap_atomic(*pgto);
- vfrom = kmap_atomic(*pgfrom);
- memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
+ if (*pgto != *pgfrom) {
+ vfrom = kmap_atomic(*pgfrom);
+ memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
+ kunmap_atomic(vfrom);
+ } else
+ memmove(vto + pgto_base, vto + pgfrom_base, copy);
flush_dcache_page(*pgto);
- kunmap_atomic(vfrom);
kunmap_atomic(vto);
} while ((len -= copy) != 0);
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index d7ccf28..4589acd 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -173,11 +173,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_opti9xx_pnpids);
#endif /* CONFIG_PNP */
-#ifdef OPTi93X
-#define DEV_NAME "opti93x"
-#else
-#define DEV_NAME "opti92x"
-#endif
+#define DEV_NAME KBUILD_MODNAME
static char * snd_opti9xx_names[] = {
"unknown",
@@ -1126,7 +1122,7 @@ static void __devexit snd_opti9xx_pnp_remove(struct pnp_card_link * pcard)
static struct pnp_card_driver opti9xx_pnpc_driver = {
.flags = PNP_DRIVER_RES_DISABLE,
- .name = "opti9xx",
+ .name = DEV_NAME,
.id_table = snd_opti9xx_pnpids,
.probe = snd_opti9xx_pnp_probe,
.remove = __devexit_p(snd_opti9xx_pnp_remove),

View file

@ -0,0 +1,567 @@
diff --git a/Makefile b/Makefile
index bc4dd5b..3f23cb7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 61
+SUBLEVEL = 62
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/m32r/boot/compressed/Makefile b/arch/m32r/boot/compressed/Makefile
index 177716b..01729c2 100644
--- a/arch/m32r/boot/compressed/Makefile
+++ b/arch/m32r/boot/compressed/Makefile
@@ -43,9 +43,9 @@ endif
OBJCOPYFLAGS += -R .empty_zero_page
-suffix_$(CONFIG_KERNEL_GZIP) = gz
-suffix_$(CONFIG_KERNEL_BZIP2) = bz2
-suffix_$(CONFIG_KERNEL_LZMA) = lzma
+suffix-$(CONFIG_KERNEL_GZIP) = gz
+suffix-$(CONFIG_KERNEL_BZIP2) = bz2
+suffix-$(CONFIG_KERNEL_LZMA) = lzma
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
$(call if_changed,ld)
diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
index 370d608..28a0952 100644
--- a/arch/m32r/boot/compressed/misc.c
+++ b/arch/m32r/boot/compressed/misc.c
@@ -28,7 +28,7 @@ static unsigned long free_mem_ptr;
static unsigned long free_mem_end_ptr;
#ifdef CONFIG_KERNEL_BZIP2
-static void *memset(void *s, int c, size_t n)
+void *memset(void *s, int c, size_t n)
{
char *ss = s;
@@ -39,6 +39,16 @@ static void *memset(void *s, int c, size_t n)
#endif
#ifdef CONFIG_KERNEL_GZIP
+void *memcpy(void *dest, const void *src, size_t n)
+{
+ char *d = dest;
+ const char *s = src;
+ while (n--)
+ *d++ = *s++;
+
+ return dest;
+}
+
#define BOOT_HEAP_SIZE 0x10000
#include "../../../../lib/decompress_inflate.c"
#endif
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 8c45818..8375622 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3737,10 +3737,6 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
break;
case OpMem8:
ctxt->memop.bytes = 1;
- if (ctxt->memop.type == OP_REG) {
- ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1);
- fetch_register_operand(&ctxt->memop);
- }
goto mem_common;
case OpMem16:
ctxt->memop.bytes = 2;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d9f8358..80103bb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3750,11 +3750,17 @@ static int bond_neigh_init(struct neighbour *n)
* The bonding ndo_neigh_setup is called at init time beofre any
* slave exists. So we must declare proxy setup function which will
* be used at run time to resolve the actual slave neigh param setup.
+ *
+ * It's also called by master devices (such as vlans) to setup their
+ * underlying devices. In that case - do nothing, we're already set up from
+ * our init.
*/
static int bond_neigh_setup(struct net_device *dev,
struct neigh_parms *parms)
{
- parms->neigh_setup = bond_neigh_init;
+ /* modify only our neigh_parms */
+ if (parms->dev == dev)
+ parms->neigh_setup = bond_neigh_init;
return 0;
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 2205db7..1b44047 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -524,6 +524,7 @@ rx_status_loop:
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
dev->stats.rx_dropped++;
+ kfree_skb(new_skb);
goto rx_next;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5151f06..77ce8b2 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -642,6 +642,28 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
return 0;
}
+static unsigned long iov_pages(const struct iovec *iv, int offset,
+ unsigned long nr_segs)
+{
+ unsigned long seg, base;
+ int pages = 0, len, size;
+
+ while (nr_segs && (offset >= iv->iov_len)) {
+ offset -= iv->iov_len;
+ ++iv;
+ --nr_segs;
+ }
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ base = (unsigned long)iv[seg].iov_base + offset;
+ len = iv[seg].iov_len - offset;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+ pages += size;
+ offset = 0;
+ }
+
+ return pages;
+}
/* Get packet from user space buffer */
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
@@ -688,31 +710,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (unlikely(count > UIO_MAXIOV))
goto err;
- if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
- zerocopy = true;
-
- if (zerocopy) {
- /* Userspace may produce vectors with count greater than
- * MAX_SKB_FRAGS, so we need to linearize parts of the skb
- * to let the rest of data to be fit in the frags.
- */
- if (count > MAX_SKB_FRAGS) {
- copylen = iov_length(iv, count - MAX_SKB_FRAGS);
- if (copylen < vnet_hdr_len)
- copylen = 0;
- else
- copylen -= vnet_hdr_len;
- }
- /* There are 256 bytes to be copied in skb, so there is enough
- * room for skb expand head in case it is used.
- * The rest buffer is mapped from userspace.
- */
- if (copylen < vnet_hdr.hdr_len)
- copylen = vnet_hdr.hdr_len;
- if (!copylen)
- copylen = GOODCOPY_LEN;
+ if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
+ copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
linear = copylen;
- } else {
+ if (iov_pages(iv, vnet_hdr_len + copylen, count)
+ <= MAX_SKB_FRAGS)
+ zerocopy = true;
+ }
+
+ if (!zerocopy) {
copylen = len;
linear = vnet_hdr.hdr_len;
}
@@ -724,9 +730,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (zerocopy)
err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
- else
+ else {
err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
len);
+ if (!err && m && m->msg_control) {
+ struct ubuf_info *uarg = m->msg_control;
+ uarg->callback(uarg);
+ }
+ }
+
if (err)
goto err_kfree;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index c896b8f..194f879 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -615,8 +615,9 @@ static ssize_t tun_get_user(struct tun_struct *tun,
int offset = 0;
if (!(tun->flags & TUN_NO_PI)) {
- if ((len -= sizeof(pi)) > count)
+ if (len < sizeof(pi))
return -EINVAL;
+ len -= sizeof(pi);
if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
return -EFAULT;
@@ -624,8 +625,9 @@ static ssize_t tun_get_user(struct tun_struct *tun,
}
if (tun->flags & TUN_VNET_HDR) {
- if ((len -= tun->vnet_hdr_sz) > count)
+ if (len < tun->vnet_hdr_sz)
return -EINVAL;
+ len -= tun->vnet_hdr_sz;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
return -EFAULT;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1a9e2a9..a50cb9c 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1603,6 +1603,7 @@ void vhost_zerocopy_callback(struct ubuf_info *ubuf)
struct vhost_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
+ vhost_poll_queue(&vq->poll);
/* set len = 1 to mark this desc buffers done DMA */
vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index ba45e6b..f5a21d0 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -123,6 +123,8 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
#define ICMPV6_NOT_NEIGHBOUR 2
#define ICMPV6_ADDR_UNREACH 3
#define ICMPV6_PORT_UNREACH 4
+#define ICMPV6_POLICY_FAIL 5
+#define ICMPV6_REJECT_ROUTE 6
/*
* Codes for Time Exceeded
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 9069071..ca670d9 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1155,7 +1155,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
mld2q = (struct mld2_query *)icmp6_hdr(skb);
if (!mld2q->mld2q_nsrcs)
group = &mld2q->mld2q_mca;
- max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1;
+
+ max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
}
if (!group)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 69b7ca3..ebd4b21 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1442,16 +1442,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
atomic_set(&p->refcnt, 1);
p->reachable_time =
neigh_rand_reach_time(p->base_reachable_time);
+ dev_hold(dev);
+ p->dev = dev;
+ write_pnet(&p->net, hold_net(net));
+ p->sysctl_table = NULL;
if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
+ release_net(net);
+ dev_put(dev);
kfree(p);
return NULL;
}
- dev_hold(dev);
- p->dev = dev;
- write_pnet(&p->net, hold_net(net));
- p->sysctl_table = NULL;
write_lock_bh(&tbl->lock);
p->next = tbl->parms.next;
tbl->parms.next = p;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0c28508..77d1550 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -19,6 +19,9 @@
#include <net/sock.h>
#include <net/net_ratelimit.h>
+static int zero = 0;
+static int ushort_max = USHRT_MAX;
+
#ifdef CONFIG_RPS
static int rps_sock_flow_sysctl(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -197,7 +200,9 @@ static struct ctl_table netns_core_table[] = {
.data = &init_net.core.sysctl_somaxconn,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .extra1 = &zero,
+ .extra2 = &ushort_max,
+ .proc_handler = proc_dointvec_minmax
},
{ }
};
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 30b88d7..424704a 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -71,7 +71,6 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/prefetch.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/ip.h>
@@ -1772,10 +1771,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
if (!c)
continue;
- if (IS_LEAF(c)) {
- prefetch(rcu_dereference_rtnl(p->child[idx]));
+ if (IS_LEAF(c))
return (struct leaf *) c;
- }
/* Rescan start scanning in new node */
p = (struct tnode *) c;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index a9077f4..b6ae92a 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
*/
static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
{
- u64 offs;
- u32 delta, t, bic_target, max_cnt;
+ u32 delta, bic_target, max_cnt;
+ u64 offs, t;
ca->ack_cnt++; /* count the number of ACKs */
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
* if the cwnd < 1 million packets !!!
*/
+ t = (s32)(tcp_time_stamp - ca->epoch_start);
+ t += msecs_to_jiffies(ca->delay_min >> 3);
/* change the unit from HZ to bictcp_HZ */
- t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
- - ca->epoch_start) << BICTCP_HZ) / HZ;
+ t <<= BICTCP_HZ;
+ do_div(t, HZ);
if (t < ca->bic_K) /* t - K */
offs = ca->bic_K - t;
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
return;
/* Discard delay samples right after fast recovery */
- if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+ if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
return;
delay = (rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d427f1b..abfa007 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -910,12 +910,10 @@ retry:
if (ifp->flags & IFA_F_OPTIMISTIC)
addr_flags |= IFA_F_OPTIMISTIC;
- ift = !max_addresses ||
- ipv6_count_addresses(idev) < max_addresses ?
- ipv6_add_addr(idev, &addr, tmp_plen,
- ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
- addr_flags) : NULL;
- if (!ift || IS_ERR(ift)) {
+ ift = ipv6_add_addr(idev, &addr, tmp_plen,
+ ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
+ addr_flags);
+ if (IS_ERR(ift)) {
in6_ifa_put(ifp);
in6_dev_put(idev);
printk(KERN_INFO
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 27ac95a..dbf20f6 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -917,6 +917,14 @@ static const struct icmp6_err {
.err = ECONNREFUSED,
.fatal = 1,
},
+ { /* POLICY_FAIL */
+ .err = EACCES,
+ .fatal = 1,
+ },
+ { /* REJECT_ROUTE */
+ .err = EACCES,
+ .fatal = 1,
+ },
};
int icmpv6_err_convert(u8 type, u8 code, int *err)
@@ -928,7 +936,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
switch (type) {
case ICMPV6_DEST_UNREACH:
fatal = 1;
- if (code <= ICMPV6_PORT_UNREACH) {
+ if (code < ARRAY_SIZE(tab_unreach)) {
*err = tab_unreach[code].err;
fatal = tab_unreach[code].fatal;
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index c3a007d..5bb77a6 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -949,14 +949,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
#ifdef CONFIG_IPV6_SUBTREES
- if (fn->subtree)
- fn = fib6_lookup_1(fn->subtree, args + 1);
+ if (fn->subtree) {
+ struct fib6_node *sfn;
+ sfn = fib6_lookup_1(fn->subtree,
+ args + 1);
+ if (!sfn)
+ goto backtrack;
+ fn = sfn;
+ }
#endif
- if (!fn || fn->fn_flags & RTN_RTINFO)
+ if (fn->fn_flags & RTN_RTINFO)
return fn;
}
}
-
+#ifdef CONFIG_IPV6_SUBTREES
+backtrack:
+#endif
if (fn->fn_flags & RTN_ROOT)
break;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 843d6eb..5cc78e6 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -441,7 +441,6 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
int len;
- int err;
u8 *opt;
if (!dev->addr_len)
@@ -451,14 +450,12 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
if (llinfo)
len += ndisc_opt_addr_space(dev);
- skb = sock_alloc_send_skb(sk,
- (MAX_HEADER + sizeof(struct ipv6hdr) +
- len + hlen + tlen),
- 1, &err);
+ skb = alloc_skb((MAX_HEADER + sizeof(struct ipv6hdr) +
+ len + hlen + tlen), GFP_ATOMIC);
if (!skb) {
ND_PRINTK0(KERN_ERR
- "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n",
- __func__, err);
+ "ICMPv6 ND: %s() failed to allocate an skb.\n",
+ __func__);
return NULL;
}
@@ -486,6 +483,11 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
csum_partial(hdr,
len, 0));
+ /* Manually assign socket ownership as we avoid calling
+ * sock_alloc_send_pskb() to bypass wmem buffer limits
+ */
+ skb_set_owner_w(skb, sk);
+
return skb;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7ee7121..c4717e8 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1571,7 +1571,7 @@ ipv6_pktoptions:
if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
if (np->rxopt.bits.rxtclass)
- np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
+ np->rcv_tclass = ipv6_tclass(ipv6_hdr(opt_skb));
if (ipv6_opt_accepted(sk, opt_skb)) {
skb_set_owner_r(opt_skb, sk);
opt_skb = xchg(&np->pktoptions, opt_skb);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index f08b9166..caa5aff 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -86,7 +86,7 @@ struct htb_class {
unsigned int children;
struct htb_class *parent; /* parent class */
- int prio; /* these two are used only by leaves... */
+ u32 prio; /* these two are used only by leaves... */
int quantum; /* but stored for parent-to-leaf return */
union {
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 527e3f0..dd625af 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -53,6 +53,7 @@ struct eth_bearer {
struct tipc_bearer *bearer;
struct net_device *dev;
struct packet_type tipc_packet_type;
+ struct work_struct setup;
struct work_struct cleanup;
};
@@ -138,6 +139,17 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
}
/**
+ * setup_bearer - setup association between Ethernet bearer and interface
+ */
+static void setup_bearer(struct work_struct *work)
+{
+ struct eth_bearer *eb_ptr =
+ container_of(work, struct eth_bearer, setup);
+
+ dev_add_pack(&eb_ptr->tipc_packet_type);
+}
+
+/**
* enable_bearer - attach TIPC bearer to an Ethernet interface
*/
@@ -181,7 +193,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
eb_ptr->tipc_packet_type.func = recv_msg;
eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
- dev_add_pack(&eb_ptr->tipc_packet_type);
+ INIT_WORK(&eb_ptr->setup, setup_bearer);
+ schedule_work(&eb_ptr->setup);
/* Associate TIPC bearer with Ethernet bearer */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,681 @@
diff --git a/Makefile b/Makefile
index 94ce941..580b364 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 63
+SUBLEVEL = 64
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 505f27e..8d1724c 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -51,7 +51,7 @@ static char *pre_emph_names[] = {
* or from atom. Note that atom operates on
* dw units.
*/
-static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
@@ -101,7 +101,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
- radeon_copy_swap(base, send, send_bytes, true);
+ radeon_atom_copy_swap(base, send, send_bytes, true);
args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
@@ -138,7 +138,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
recv_bytes = recv_size;
if (recv && recv_size)
- radeon_copy_swap(recv, base + 16, recv_bytes, false);
+ radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
return recv_bytes;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index ced9370..2f755e2 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1425,8 +1425,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- /* some early dce3.2 boards have a bug in their transmitter control table */
- if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
+ /* some dce3.x boards have a bug in their transmitter control table.
+ * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
+ * does the same thing and more.
+ */
+ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
+ (rdev->family != CHIP_RS880))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 44d87b6..9ed94a8 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -27,6 +27,8 @@
#include "radeon.h"
#include "atom.h"
+extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
+
#define TARGET_HW_I2C_CLOCK 50
/* these are a limitation of ProcessI2cChannelTransaction not the hw */
@@ -77,7 +79,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
}
if (!(flags & HW_I2C_WRITE))
- memcpy(buf, base, num);
+ radeon_atom_copy_swap(buf, base, num, false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 300099d..15d1f08 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -534,7 +534,8 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
- u32 tmp;
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
@@ -557,18 +558,34 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
- if (other_mode)
+ if (other_mode) {
tmp = 0; /* 1/2 */
- else
+ buffer_alloc = 1;
+ } else {
tmp = 2; /* whole */
- } else
+ buffer_alloc = 2;
+ }
+ } else {
tmp = 0;
+ buffer_alloc = 0;
+ }
/* second controller of the pair uses second half of the lb */
if (radeon_crtc->crtc_id % 2)
tmp += 4;
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+ }
+
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 81e744f..52aabf2 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -472,6 +472,10 @@
# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 38d87e1..c54d295 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -715,13 +715,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
(ctx->bios + data_offset +
le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
+ u8 *num_dst_objs = (u8 *)
+ ((u8 *)router_src_dst_table + 1 +
+ (router_src_dst_table->ucNumberOfSrc * 2));
+ u16 *dst_objs = (u16 *)(num_dst_objs + 1);
int enum_id;
router.router_id = router_obj_id;
- for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
- enum_id++) {
+ for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
if (le16_to_cpu(path->usConnObjectId) ==
- le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
+ le16_to_cpu(dst_objs[enum_id]))
break;
}
@@ -1622,7 +1625,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
kfree(edid);
}
}
- record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+ record += fake_edid_record->ucFakeEDIDLength ?
+ fake_edid_record->ucFakeEDIDLength + 2 :
+ sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 5099bd3..b367d1d 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -174,10 +174,13 @@ int rs400_gart_enable(struct radeon_device *rdev)
/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
* AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
- WREG32_MC(RS480_MC_MISC_CNTL,
- (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
+ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+ tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
+ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
} else {
- WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
+ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+ tmp |= RS480_GART_INDEX_REG_EN;
+ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
}
/* Enable gart */
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index e22b460..bd1f18c 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -411,7 +411,8 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
- u32 tmp;
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
@@ -426,16 +427,30 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
- if (other_mode)
+ if (other_mode) {
tmp = 0; /* 1/2 */
- else
+ buffer_alloc = 1;
+ } else {
tmp = 2; /* whole */
- } else
+ buffer_alloc = 2;
+ }
+ } else {
tmp = 0;
+ buffer_alloc = 0;
+ }
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
DC_LB_MEMORY_CONFIG(tmp));
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 45e240d..1767ae7 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -57,6 +57,10 @@
#define DMIF_ADDR_CALC 0xC00
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
#define SRBM_STATUS 0xE50
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index fa09daf..ea10bbe 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -170,7 +170,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm_tt_unbind(ttm);
}
- if (likely(ttm->pages != NULL)) {
+ if (ttm->state == tt_unbound) {
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index ab59fdf..e937a58 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -821,6 +821,64 @@ static int search(__s32 *array, __s32 value, unsigned n)
return -1;
}
+static const char * const hid_report_names[] = {
+ "HID_INPUT_REPORT",
+ "HID_OUTPUT_REPORT",
+ "HID_FEATURE_REPORT",
+};
+/**
+ * hid_validate_values - validate existing device report's value indexes
+ *
+ * @device: hid device
+ * @type: which report type to examine
+ * @id: which report ID to examine (0 for first)
+ * @field_index: which report field to examine
+ * @report_counts: expected number of values
+ *
+ * Validate the number of values in a given field of a given report, after
+ * parsing.
+ */
+struct hid_report *hid_validate_values(struct hid_device *hid,
+ unsigned int type, unsigned int id,
+ unsigned int field_index,
+ unsigned int report_counts)
+{
+ struct hid_report *report;
+
+ if (type > HID_FEATURE_REPORT) {
+ hid_err(hid, "invalid HID report type %u\n", type);
+ return NULL;
+ }
+
+ if (id >= HID_MAX_IDS) {
+ hid_err(hid, "invalid HID report id %u\n", id);
+ return NULL;
+ }
+
+ /*
+ * Explicitly not using hid_get_report() here since it depends on
+ * ->numbered being checked, which may not always be the case when
+ * drivers go to access report values.
+ */
+ report = hid->report_enum[type].report_id_hash[id];
+ if (!report) {
+ hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
+ return NULL;
+ }
+ if (report->maxfield <= field_index) {
+ hid_err(hid, "not enough fields in %s %u\n",
+ hid_report_names[type], id);
+ return NULL;
+ }
+ if (report->field[field_index]->report_count < report_counts) {
+ hid_err(hid, "not enough values in %s %u field %u\n",
+ hid_report_names[type], id, field_index);
+ return NULL;
+ }
+ return report;
+}
+EXPORT_SYMBOL_GPL(hid_validate_values);
+
/**
* hid_match_report - check if driver's raw_event should be called
*
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 73b67ab..2652846 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -454,7 +454,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
struct hid_report *report;
struct hid_report_enum *output_report_enum;
u8 *data = (u8 *)(&dj_report->device_index);
- int i;
+ unsigned int i;
output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
@@ -464,7 +464,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
return -ENODEV;
}
- for (i = 0; i < report->field[0]->report_count; i++)
+ for (i = 0; i < DJREPORT_SHORT_LENGTH - 1; i++)
report->field[0]->value[i] = data[i];
usbhid_submit_report(hdev, report, USB_DIR_OUT);
@@ -783,6 +783,12 @@ static int logi_dj_probe(struct hid_device *hdev,
goto hid_parse_fail;
}
+ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
+ 0, DJREPORT_SHORT_LENGTH - 1)) {
+ retval = -ENODEV;
+ goto hid_parse_fail;
+ }
+
/* Starts the usb device and connects to upper interfaces hiddev and
* hidraw */
retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index f6ba81d..f348f7f 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -70,21 +70,13 @@ static int zpff_init(struct hid_device *hid)
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
- struct list_head *report_list =
- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
- int error;
+ int i, error;
- if (list_empty(report_list)) {
- hid_err(hid, "no output report found\n");
- return -ENODEV;
- }
-
- report = list_entry(report_list->next, struct hid_report, list);
-
- if (report->maxfield < 4) {
- hid_err(hid, "not enough fields in report\n");
- return -ENODEV;
+ for (i = 0; i < 4; i++) {
+ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
+ if (!report)
+ return -ENODEV;
}
zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 7b3c068..62311ad 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -311,8 +311,9 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
- new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
new_buf->u.page = rx_buf->u.page;
+ new_buf->page_offset = rx_buf->page_offset ^ (PAGE_SIZE >> 1);
+ new_buf->dma_addr = state->dma_addr + new_buf->page_offset;
new_buf->len = rx_buf->len;
new_buf->flags = EFX_RX_BUF_PAGE;
++rx_queue->added_count;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 425e201..1db819b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -618,6 +618,11 @@ static const struct usb_device_id products [] = {
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* Telit modules */
+ USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (kernel_ulong_t) &wwan_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 72cb121..8ff177d 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -2161,6 +2161,13 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
int i;
/*
+ * First check if temperature compensation is supported.
+ */
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC))
+ return 0;
+
+ /*
* Read TSSI boundaries for temperature compensation from
* the EEPROM.
*
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index a506360..0c2f912 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -18,6 +18,12 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
old->tgid == new->tgid) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_PATH):
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ /* dont merge two permission events */
+ if ((old->mask & FAN_ALL_PERM_EVENTS) &&
+ (new->mask & FAN_ALL_PERM_EVENTS))
+ return false;
+#endif
if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry))
return true;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 8c933a8..3572ecd 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -737,6 +737,10 @@ void hid_output_report(struct hid_report *report, __u8 *data);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
+struct hid_report *hid_validate_values(struct hid_device *hid,
+ unsigned int type, unsigned int id,
+ unsigned int field_index,
+ unsigned int report_counts);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
void hid_disconnect(struct hid_device *hid);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 89fb74e..e45ffad 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1063,7 +1063,7 @@ struct perf_cpu_context {
int exclusive;
struct list_head rotation_list;
int jiffies_interval;
- struct pmu *active_pmu;
+ struct pmu *unique_pmu;
struct perf_cgroup *cgrp;
};
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 4eb1ed3..519c252 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -3476,6 +3476,7 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
const char *buffer)
{
struct cgroup_event *event = NULL;
+ struct cgroup *cgrp_cfile;
unsigned int efd, cfd;
struct file *efile = NULL;
struct file *cfile = NULL;
@@ -3531,6 +3532,16 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
goto fail;
}
+ /*
+ * The file to be monitored must be in the same cgroup as
+ * cgroup.event_control is.
+ */
+ cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
+ if (cgrp_cfile != cgrp) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
if (!event->cft->register_event || !event->cft->unregister_event) {
ret = -EINVAL;
goto fail;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8e810ba..8e82398 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -368,6 +368,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+ if (cpuctx->unique_pmu != pmu)
+ continue; /* ensure we process each cpuctx once */
/*
* perf_cgroup_events says at least one
@@ -391,9 +393,10 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
if (mode & PERF_CGROUP_SWIN) {
WARN_ON_ONCE(cpuctx->cgrp);
- /* set cgrp before ctxsw in to
- * allow event_filter_match() to not
- * have to pass task around
+ /*
+ * set cgrp before ctxsw in to allow
+ * event_filter_match() to not have to pass
+ * task around
*/
cpuctx->cgrp = perf_cgroup_from_task(task);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
@@ -4332,7 +4335,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
- if (cpuctx->active_pmu != pmu)
+ if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_task_ctx(&cpuctx->ctx, task_event);
@@ -4478,7 +4481,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
- if (cpuctx->active_pmu != pmu)
+ if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
@@ -4674,7 +4677,7 @@ got_name:
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
- if (cpuctx->active_pmu != pmu)
+ if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
vma->vm_flags & VM_EXEC);
@@ -5750,8 +5753,8 @@ static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
- if (cpuctx->active_pmu == old_pmu)
- cpuctx->active_pmu = pmu;
+ if (cpuctx->unique_pmu == old_pmu)
+ cpuctx->unique_pmu = pmu;
}
}
@@ -5886,7 +5889,7 @@ skip_type:
cpuctx->ctx.pmu = pmu;
cpuctx->jiffies_interval = 1;
INIT_LIST_HEAD(&cpuctx->rotation_list);
- cpuctx->active_pmu = pmu;
+ cpuctx->unique_pmu = pmu;
}
got_cpu_context:
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index da4512f..33c8b60 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5190,11 +5190,15 @@ static void task_fork_fair(struct task_struct *p)
cfs_rq = task_cfs_rq(current);
curr = cfs_rq->curr;
- if (unlikely(task_cpu(p) != this_cpu)) {
- rcu_read_lock();
- __set_task_cpu(p, this_cpu);
- rcu_read_unlock();
- }
+ /*
+ * Not only the cpu but also the task_group of the parent might have
+ * been changed after parent->se.parent,cfs_rq were copied to
+ * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
+ * of child point to valid ones.
+ */
+ rcu_read_lock();
+ __set_task_cpu(p, this_cpu);
+ rcu_read_unlock();
update_curr(cfs_rq);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 2fdb05d..1ff51c9 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1610,9 +1610,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
asoc->outqueue.outstanding_bytes;
sackh.num_gap_ack_blocks = 0;
sackh.num_dup_tsns = 0;
- chunk->subh.sack_hdr = &sackh;
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
- SCTP_CHUNK(chunk));
+ SCTP_SACKH(&sackh));
break;
case SCTP_CMD_DISCARD_PACKET:
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 9b0c0b8..55ab5e4 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -2045,6 +2045,9 @@ sub process_file($) {
$section_counter = 0;
while (<IN>) {
+ while (s/\\\s*$//) {
+ $_ .= <IN>;
+ }
if ($state == 0) {
if (/$doc_start/o) {
$state = 1; # next line is always the function name
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 9593f27..5f5080f 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -16,6 +16,7 @@ const char *map_type__name[MAP__NR_TYPES] = {
static inline int is_anon_memory(const char *filename)
{
return !strcmp(filename, "//anon") ||
+ !strcmp(filename, "/dev/zero (deleted)") ||
!strcmp(filename, "/anon_hugepage (deleted)");
}

View file

@ -0,0 +1,544 @@
diff --git a/Makefile b/Makefile
index 580b364..3a8897f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 64
+SUBLEVEL = 65
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index df1b604..bd70df6 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -479,6 +479,22 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
},
},
+ { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
+ .callback = set_pci_reboot,
+ .ident = "Dell PowerEdge C6100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+ },
+ },
+ { /* Some C6100 machines were shipped with vendor being 'Dell'. */
+ .callback = set_pci_reboot,
+ .ident = "Dell PowerEdge C6100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+ },
+ },
{ }
};
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 1e40637..454548c 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -845,10 +845,13 @@ void __init efi_enter_virtual_mode(void)
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA)
- continue;
+ if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
+#ifdef CONFIG_X86_64
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
+#endif
+ continue;
+ }
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 069725c..eee6cd3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -625,7 +625,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
case AUX_NATIVE_REPLY_DEFER:
- udelay(100);
+ /*
+ * For now, just give more slack to branch devices. We
+ * could check the DPCD for I2C bit rate capabilities,
+ * and if available, adjust the interval. We could also
+ * be more careful with DP-to-Legacy adapters where a
+ * long legacy cable may force very low I2C bit rates.
+ */
+ if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT)
+ usleep_range(500, 600);
+ else
+ usleep_range(300, 400);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 1209f15..2f555d7 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -835,13 +835,22 @@ int radeon_device_init(struct radeon_device *rdev,
return r;
}
if ((radeon_testing & 1)) {
- radeon_test_moves(rdev);
+ if (rdev->accel_working)
+ radeon_test_moves(rdev);
+ else
+ DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
}
if ((radeon_testing & 2)) {
- radeon_test_syncing(rdev);
+ if (rdev->accel_working)
+ radeon_test_syncing(rdev);
+ else
+ DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
}
if (radeon_benchmarking) {
- radeon_benchmark(rdev, radeon_benchmarking);
+ if (rdev->accel_working)
+ radeon_benchmark(rdev, radeon_benchmarking);
+ else
+ DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
}
return 0;
}
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index 3c31bc6..128f011 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -66,26 +66,13 @@ int lg2ff_init(struct hid_device *hid)
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
- struct list_head *report_list =
- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
int error;
- if (list_empty(report_list)) {
- hid_err(hid, "no output report found\n");
+ /* Check that the report looks ok */
+ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
+ if (!report)
return -ENODEV;
- }
-
- report = list_entry(report_list->next, struct hid_report, list);
-
- if (report->maxfield < 1) {
- hid_err(hid, "output report is empty\n");
- return -ENODEV;
- }
- if (report->field[0]->report_count < 7) {
- hid_err(hid, "not enough values in the field\n");
- return -ENODEV;
- }
lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
if (!lg2ff)
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
index f98644c..91f981f 100644
--- a/drivers/hid/hid-lg3ff.c
+++ b/drivers/hid/hid-lg3ff.c
@@ -68,10 +68,11 @@ static int hid_lg3ff_play(struct input_dev *dev, void *data,
int x, y;
/*
- * Maxusage should always be 63 (maximum fields)
- * likely a better way to ensure this data is clean
+ * Available values in the field should always be 63, but we only use up to
+ * 35. Instead, clear the entire area, however big it is.
*/
- memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage);
+ memset(report->field[0]->value, 0,
+ sizeof(__s32) * report->field[0]->report_count);
switch (effect->type) {
case FF_CONSTANT:
@@ -131,32 +132,14 @@ static const signed short ff3_joystick_ac[] = {
int lg3ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
- struct hid_report *report;
- struct hid_field *field;
const signed short *ff_bits = ff3_joystick_ac;
int error;
int i;
- /* Find the report to use */
- if (list_empty(report_list)) {
- hid_err(hid, "No output report found\n");
- return -1;
- }
-
/* Check that the report looks ok */
- report = list_entry(report_list->next, struct hid_report, list);
- if (!report) {
- hid_err(hid, "NULL output report\n");
- return -1;
- }
-
- field = report->field[0];
- if (!field) {
- hid_err(hid, "NULL field\n");
- return -1;
- }
+ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
+ return -ENODEV;
/* Assume single fixed device G940 */
for (i = 0; ff_bits[i] >= 0; i++)
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 6ecc9e2..44bb0a5 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -339,33 +339,15 @@ static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *at
int lg4ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
- struct hid_report *report;
- struct hid_field *field;
struct lg4ff_device_entry *entry;
struct usb_device_descriptor *udesc;
int error, i, j;
__u16 bcdDevice, rev_maj, rev_min;
- /* Find the report to use */
- if (list_empty(report_list)) {
- hid_err(hid, "No output report found\n");
- return -1;
- }
-
/* Check that the report looks ok */
- report = list_entry(report_list->next, struct hid_report, list);
- if (!report) {
- hid_err(hid, "NULL output report\n");
+ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -1;
- }
-
- field = report->field[0];
- if (!field) {
- hid_err(hid, "NULL field\n");
- return -1;
- }
/* Check what wheel has been connected */
for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index 27bc54f..1d978daa 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -130,27 +130,14 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
int lgff_init(struct hid_device* hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
- struct hid_report *report;
- struct hid_field *field;
const signed short *ff_bits = ff_joystick;
int error;
int i;
- /* Find the report to use */
- if (list_empty(report_list)) {
- hid_err(hid, "No output report found\n");
- return -1;
- }
-
/* Check that the report looks ok */
- report = list_entry(report_list->next, struct hid_report, list);
- field = report->field[0];
- if (!field) {
- hid_err(hid, "NULL field\n");
- return -1;
- }
+ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
+ return -ENODEV;
for (i = 0; i < ARRAY_SIZE(devices); i++) {
if (dev->id.vendor == devices[i].idVendor &&
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 70d62f5..73bea49 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -489,16 +489,25 @@ static int applesmc_init_smcreg_try(void)
{
struct applesmc_registers *s = &smcreg;
bool left_light_sensor, right_light_sensor;
+ unsigned int count;
u8 tmp[1];
int ret;
if (s->init_complete)
return 0;
- ret = read_register_count(&s->key_count);
+ ret = read_register_count(&count);
if (ret)
return ret;
+ if (s->cache && s->key_count != count) {
+ pr_warn("key count changed from %d to %d\n",
+ s->key_count, count);
+ kfree(s->cache);
+ s->cache = NULL;
+ }
+ s->key_count = count;
+
if (!s->cache)
s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
if (!s->cache)
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3ac4156..4caa8e6 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
*/
INIT_WORK_ONSTACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work);
- flush_work(&req.work);
+ flush_workqueue(ps->metadata_wq);
return req.result;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index ff62ddc..448050c 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -721,17 +721,16 @@ static int calc_max_buckets(void)
*/
static int init_hash_tables(struct dm_snapshot *s)
{
- sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
+ sector_t hash_size, cow_dev_size, max_buckets;
/*
* Calculate based on the size of the original volume or
* the COW volume...
*/
cow_dev_size = get_dev_size(s->cow->bdev);
- origin_dev_size = get_dev_size(s->origin->bdev);
max_buckets = calc_max_buckets();
- hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
+ hash_size = cow_dev_size >> s->store->chunk_shift;
hash_size = min(hash_size, max_buckets);
if (hash_size < 64)
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 628545d..1f34a34 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1220,6 +1220,8 @@ device_release_WPADEV(pDevice);
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pDevice->flags &= ~DEVICE_FLAGS_OPENED;
+
device_free_tx_bufs(pDevice);
device_free_rx_bufs(pDevice);
device_free_int_bufs(pDevice);
@@ -1231,7 +1233,6 @@ device_release_WPADEV(pDevice);
usb_free_urb(pDevice->pInterruptURB);
BSSvClearNodeDBTable(pDevice, 0);
- pDevice->flags &=(~DEVICE_FLAGS_OPENED);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 336b82d..371fe69 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -684,6 +684,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
if ((index & ~USB_DIR_IN) == 0)
return 0;
ret = findintfep(ps->dev, index);
+ if (ret < 0) {
+ /*
+ * Some not fully compliant Win apps seem to get
+ * index wrong and have the endpoint number here
+ * rather than the endpoint address (with the
+ * correct direction). Win does let this through,
+ * so we'll not reject it here but leave it to
+ * the device to not break KVM. But we warn.
+ */
+ ret = findintfep(ps->dev, index ^ 0x80);
+ if (ret >= 0)
+ dev_info(&ps->dev->dev,
+ "%s: process %i (%s) requesting ep %02x but needs %02x\n",
+ __func__, task_pid_nr(current),
+ current->comm, index, index ^ 0x80);
+ }
if (ret >= 0)
ret = checkintf(ps, ret);
break;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 8331893..e0478b7 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -287,7 +287,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
}
- cmd->command_trb = xhci->cmd_ring->enqueue;
+ cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
xhci_ring_cmd_db(xhci);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1770ed5..87ee28e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -122,6 +122,16 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
return TRB_TYPE_LINK_LE32(link->control);
}
+union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
+{
+ /* Enqueue pointer can be left pointing to the link TRB,
+ * we must handle that
+ */
+ if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
+ return ring->enq_seg->next->trbs;
+ return ring->enqueue;
+}
+
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
@@ -847,8 +857,12 @@ remove_finished_td:
/* Otherwise ring the doorbell(s) to restart queued transfers */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
- ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
+
+ /* Clear stopped_td and stopped_trb if endpoint is not halted */
+ if (!(ep->ep_state & EP_HALTED)) {
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
+ }
/*
* Drop the lock and complete the URBs in the cancelled TD list.
@@ -1390,6 +1404,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
inc_deq(xhci, xhci->cmd_ring);
return;
}
+ /* There is no command to handle if we get a stop event when the
+ * command ring is empty, event->cmd_trb points to the next
+ * unset command
+ */
+ if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
+ return;
}
switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1504946..c8ea954 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2582,15 +2582,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
if (command) {
cmd_completion = command->completion;
cmd_status = &command->status;
- command->command_trb = xhci->cmd_ring->enqueue;
-
- /* Enqueue pointer can be left pointing to the link TRB,
- * we must handle that
- */
- if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
- command->command_trb =
- xhci->cmd_ring->enq_seg->next->trbs;
-
+ command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
} else {
cmd_completion = &virt_dev->cmd_completion;
@@ -2598,7 +2590,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
}
init_completion(cmd_completion);
- cmd_trb = xhci->cmd_ring->dequeue;
+ cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
if (!ctx_change)
ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
udev->slot_id, must_succeed);
@@ -3383,14 +3375,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
/* Attempt to submit the Reset Device command to the command ring */
spin_lock_irqsave(&xhci->lock, flags);
- reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
-
- /* Enqueue pointer can be left pointing to the link TRB,
- * we must handle that
- */
- if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
- reset_device_cmd->command_trb =
- xhci->cmd_ring->enq_seg->next->trbs;
+ reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
ret = xhci_queue_reset_device(xhci, slot_id);
@@ -3594,7 +3579,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
union xhci_trb *cmd_trb;
spin_lock_irqsave(&xhci->lock, flags);
- cmd_trb = xhci->cmd_ring->dequeue;
+ cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3721,7 +3706,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
spin_lock_irqsave(&xhci->lock, flags);
- cmd_trb = xhci->cmd_ring->dequeue;
+ cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
udev->slot_id);
if (ret) {
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index a54a408..71dd138 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1811,6 +1811,7 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
union xhci_trb *cmd_trb);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id);
+union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
/* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 84717ce..7895983 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -663,7 +663,8 @@ static int snd_compress_dev_disconnect(struct snd_device *device)
struct snd_compr *compr;
compr = device->device_data;
- snd_unregister_device(compr->direction, compr->card, compr->device);
+ snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
+ compr->device);
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,347 @@
diff --git a/Makefile b/Makefile
index e9587ab..9f440dd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 66
+SUBLEVEL = 67
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 45ba99f..71d7d72 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -810,14 +810,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
else {
/*
- * The kernel should never fault on its own address space.
+ * The kernel should never fault on its own address space,
+ * unless pagefault_disable() was called before.
*/
- if (fault_space == 0)
+ if (fault_space == 0 && !in_atomic())
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Kernel Fault", regs, code, fault_address);
-
}
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 24b23a4..126f38d 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -935,7 +935,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
ld r7, HSTATE_DSCR(r13)
- std r8, VCPU_DSCR(r7)
+ std r8, VCPU_DSCR(r9)
mtspr SPRN_DSCR, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d98b2a6..817eeb6 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1435,12 +1435,11 @@ ctl_table random_table[] = {
static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
-static int __init random_int_secret_init(void)
+int random_int_secret_init(void)
{
get_random_bytes(random_int_secret, sizeof(random_int_secret));
return 0;
}
-late_initcall(random_int_secret_init);
/*
* Get a random word for internal kernel use only. Similar to urandom but
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 15d1f08..ad72295 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1912,7 +1912,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
- rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.max_hw_contexts = 4;
rdev->config.evergreen.sq_num_cf_insts = 2;
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 8df050d..1c53745 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -310,7 +310,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
+ error = put_user(0, p);
+ break;
case WDIOC_KEEPALIVE:
ts72xx_wdt_kick(wdt);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index e712a8c..b1aa7fd 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1268,6 +1268,8 @@ retry:
s_min_extra_isize) {
tried_min_extra_isize++;
new_extra_isize = s_min_extra_isize;
+ kfree(is); is = NULL;
+ kfree(bs); bs = NULL;
goto retry;
}
error = -1;
diff --git a/fs/statfs.c b/fs/statfs.c
index 43e6b6f..d1812b2 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -87,7 +87,7 @@ int user_statfs(const char __user *pathname, struct kstatfs *st)
int fd_statfs(int fd, struct kstatfs *st)
{
- struct file *file = fget(fd);
+ struct file *file = fget_raw(fd);
int error = -EBADF;
if (file) {
error = vfs_statfs(&file->f_path, st);
diff --git a/include/linux/random.h b/include/linux/random.h
index ac621ce..7e58ad2 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -56,6 +56,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes);
extern void get_random_bytes_arch(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
+extern int random_int_secret_init(void);
#ifndef MODULE
extern const struct file_operations random_fops, urandom_fops;
diff --git a/init/main.c b/init/main.c
index 02c1384..db8e381 100644
--- a/init/main.c
+++ b/init/main.c
@@ -68,6 +68,7 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
+#include <linux/random.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -779,6 +780,7 @@ static void __init do_basic_setup(void)
do_ctors();
usermodehelper_enable();
do_initcalls();
+ random_int_secret_init();
}
static void __init do_pre_smp_initcalls(void)
diff --git a/mm/mmap.c b/mm/mmap.c
index ed884dd..69367e4 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -6,6 +6,7 @@
* Address space accounting code <alan@lxorguk.ukuu.org.uk>
*/
+#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/mm.h>
@@ -392,6 +393,34 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr,
return vma;
}
+static unsigned long count_vma_pages_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long end)
+{
+ unsigned long nr_pages = 0;
+ struct vm_area_struct *vma;
+
+ /* Find first overlaping mapping */
+ vma = find_vma_intersection(mm, addr, end);
+ if (!vma)
+ return 0;
+
+ nr_pages = (min(end, vma->vm_end) -
+ max(addr, vma->vm_start)) >> PAGE_SHIFT;
+
+ /* Iterate over the rest of the overlaps */
+ for (vma = vma->vm_next; vma; vma = vma->vm_next) {
+ unsigned long overlap_len;
+
+ if (vma->vm_start > end)
+ break;
+
+ overlap_len = min(end, vma->vm_end) - vma->vm_start;
+ nr_pages += overlap_len >> PAGE_SHIFT;
+ }
+
+ return nr_pages;
+}
+
void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
struct rb_node **rb_link, struct rb_node *rb_parent)
{
@@ -1245,6 +1274,23 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long charged = 0;
struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
+ /* Check against address space limit. */
+ if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
+ unsigned long nr_pages;
+
+ /*
+ * MAP_FIXED may remove pages of mappings that intersects with
+ * requested mapping. Account for the pages it would unmap.
+ */
+ if (!(vm_flags & MAP_FIXED))
+ return -ENOMEM;
+
+ nr_pages = count_vma_pages_range(mm, addr, addr + len);
+
+ if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages))
+ return -ENOMEM;
+ }
+
/* Clear old maps */
error = -ENOMEM;
munmap_back:
@@ -1255,10 +1301,6 @@ munmap_back:
goto munmap_back;
}
- /* Check against address space limit. */
- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
- return -ENOMEM;
-
/*
* Set 'VM_NORESERVE' if we should not account for the
* memory use of this mapping.
@@ -1833,9 +1875,28 @@ int expand_downwards(struct vm_area_struct *vma,
return error;
}
+/*
+ * Note how expand_stack() refuses to expand the stack all the way to
+ * abut the next virtual mapping, *unless* that mapping itself is also
+ * a stack mapping. We want to leave room for a guard page, after all
+ * (the guard page itself is not added here, that is done by the
+ * actual page faulting logic)
+ *
+ * This matches the behavior of the guard page logic (see mm/memory.c:
+ * check_stack_guard_page()), which only allows the guard page to be
+ * removed under these circumstances.
+ */
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
+ struct vm_area_struct *next;
+
+ address &= PAGE_MASK;
+ next = vma->vm_next;
+ if (next && next->vm_start == address + PAGE_SIZE) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ }
return expand_upwards(vma, address);
}
@@ -1858,6 +1919,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
+ struct vm_area_struct *prev;
+
+ address &= PAGE_MASK;
+ prev = vma->vm_prev;
+ if (prev && prev->vm_end == address) {
+ if (!(prev->vm_flags & VM_GROWSDOWN))
+ return -ENOMEM;
+ }
return expand_downwards(vma, address);
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 353b32a..33abb78 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6832,6 +6832,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+ SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index 520ef96..711299c 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -295,19 +295,6 @@ static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
usX2Y_clients_stop(usX2Y);
}
-static void usX2Y_error_sequence(struct usX2Ydev *usX2Y,
- struct snd_usX2Y_substream *subs, struct urb *urb)
-{
- snd_printk(KERN_ERR
-"Sequence Error!(hcd_frame=%i ep=%i%s;wait=%i,frame=%i).\n"
-"Most probably some urb of usb-frame %i is still missing.\n"
-"Cause could be too long delays in usb-hcd interrupt handling.\n",
- usb_get_current_frame_number(usX2Y->dev),
- subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
- usX2Y->wait_iso_frame, urb->start_frame, usX2Y->wait_iso_frame);
- usX2Y_clients_stop(usX2Y);
-}
-
static void i_usX2Y_urb_complete(struct urb *urb)
{
struct snd_usX2Y_substream *subs = urb->context;
@@ -324,12 +311,9 @@ static void i_usX2Y_urb_complete(struct urb *urb)
usX2Y_error_urb_status(usX2Y, subs, urb);
return;
}
- if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
- subs->completed_urb = urb;
- else {
- usX2Y_error_sequence(usX2Y, subs, urb);
- return;
- }
+
+ subs->completed_urb = urb;
+
{
struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
*playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index 8e40b6e..1da1eca 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -244,13 +244,8 @@ static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
usX2Y_error_urb_status(usX2Y, subs, urb);
return;
}
- if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
- subs->completed_urb = urb;
- else {
- usX2Y_error_sequence(usX2Y, subs, urb);
- return;
- }
+ subs->completed_urb = urb;
capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,841 @@
diff --git a/Makefile b/Makefile
index 656c45f09128..2f9a0467ede5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 68
+SUBLEVEL = 69
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 37aabd772fbb..d2d58258aea6 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -195,6 +195,8 @@ common_stext:
ldw MEM_PDC_HI(%r0),%r6
depd %r6, 31, 32, %r3 /* move to upper word */
+ mfctl %cr30,%r6 /* PCX-W2 firmware bug */
+
ldo PDC_PSW(%r0),%arg0 /* 21 */
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
@@ -203,6 +205,8 @@ common_stext:
copy %r0,%arg3
stext_pdc_ret:
+ mtctl %r6,%cr30 /* restore task thread info */
+
/* restore rfi target address*/
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index 829df49dee99..41ebbfebb333 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
+ size_t size;
int tmp;
- if (copy_from_user(buf, buffer, count))
+ size = min(count, sizeof(buf));
+ if (copy_from_user(buf, buffer, size))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0);
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index d78869a00b11..b08caaa59813 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -343,7 +343,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sp = regs->areg[1];
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
sp = current->sas_ss_sp + current->sas_ss_size;
}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index e47c224d7c28..37fb4d6069a2 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1287,14 +1287,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
* should be retried. To be used from EH.
*
* SCSI midlayer limits the number of retries to scmd->allowed.
- * scmd->retries is decremented for commands which get retried
+ * scmd->allowed is incremented for commands which get retried
* due to unrelated failures (qc->err_mask is zero).
*/
void ata_eh_qc_retry(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
- if (!qc->err_mask && scmd->retries)
- scmd->retries--;
+ if (!qc->err_mask)
+ scmd->allowed++;
__ata_eh_qc_complete(qc);
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6116e3b75393..e9f1ef5d9340 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -420,9 +420,16 @@ long drm_ioctl(struct file *filp,
asize = drv_size;
}
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+ u32 drv_size;
+
ioctl = &drm_ioctls[nr];
- cmd = ioctl->cmd;
+
+ drv_size = _IOC_SIZE(ioctl->cmd);
usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+
+ cmd = ioctl->cmd;
} else
goto err_i1;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2f755e2aeb86..6f4627fe24a1 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1430,7 +1430,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
* does the same thing and more.
*/
if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
- (rdev->family != CHIP_RS880))
+ (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ce5f0449e1b6..75e66c612505 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1357,6 +1357,7 @@ static int raid1_spare_active(struct mddev *mddev)
}
}
if (rdev
+ && rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
count++;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index f7febd8e3720..99a102d186ce 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1534,6 +1534,7 @@ static int raid10_spare_active(struct mddev *mddev)
}
sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
} else if (tmp->rdev
+ && tmp->rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
count++;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 00baa7e094f4..e2131ca64a5f 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -60,7 +60,7 @@
#define FLEXCAN_MCR_BCC BIT(16)
#define FLEXCAN_MCR_LPRIO_EN BIT(13)
#define FLEXCAN_MCR_AEN BIT(12)
-#define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf)
+#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f)
#define FLEXCAN_MCR_IDAM_A (0 << 8)
#define FLEXCAN_MCR_IDAM_B (1 << 8)
#define FLEXCAN_MCR_IDAM_C (2 << 8)
@@ -701,9 +701,11 @@ static int flexcan_chip_start(struct net_device *dev)
*
*/
reg_mcr = flexcan_read(&regs->mcr);
+ reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
- FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS;
+ FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS |
+ FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
flexcan_write(reg_mcr, &regs->mcr);
@@ -744,6 +746,10 @@ static int flexcan_chip_start(struct net_device *dev)
&regs->cantxfg[i].can_ctrl);
}
+ /* Abort any pending TX, mark Mailbox as INACTIVE */
+ flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
+ &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+
/* acceptance mask/acceptance code (accept everything) */
flexcan_write(0x0, &regs->rxgmask);
flexcan_write(0x0, &regs->rx14mask);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 21bc827c5fa6..9adb21a1133e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -343,7 +343,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
(bool)GET_RX_DESC_PAGGR(pdesc));
rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
if (phystatus) {
- p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
+ p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
+ stats->rx_bufshift);
rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
p_drvinfo);
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0d279c445a30..e9313f85bc70 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -777,6 +777,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
}
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index cf3059216958..c0d612ff8519 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1957,6 +1957,7 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
+ memset(&DevInfo, 0, sizeof(DevInfo));
DevInfo.MaxRDMBufferSize = BUFFER_4K;
DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
DevInfo.u32RxAlignmentCorrection = 0;
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 1c380d687963..1c9245119567 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -153,6 +153,9 @@ ssize_t oz_cdev_write(struct file *filp, const char __user *buf, size_t count,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 8659cd94639a..f6904d819ca8 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -119,6 +119,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Alcor Micro Corp. Hub */
{ USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* MicroTouch Systems touchscreen */
+ { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -152,6 +155,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Broadcom BCM92035DGROM BT dongle */
{ USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* MAYA44USB sound device */
+ { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 3e4c27dd1590..904e8341b2c9 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -916,6 +916,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
/* Crucible Devices */
{ USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 1b8af461b522..a7019d1e3058 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1307,3 +1307,9 @@
* Manufacturer: Crucible Technologies
*/
#define FTDI_CT_COMET_PID 0x8e08
+
+/*
+ * Product: Z3X Box
+ * Manufacturer: Smart GSM Team
+ */
+#define FTDI_Z3X_PID 0x0011
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index b3440c66a995..57277bccb9ef 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -707,6 +707,222 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index c1a3e603279c..7f464c513ba0 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -95,7 +95,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
if (insert_inode_locked(inode) < 0) {
rc = -EINVAL;
- goto fail_unlock;
+ goto fail_put;
}
inode_init_owner(inode, parent, mode);
@@ -156,7 +156,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
fail_drop:
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
-fail_unlock:
clear_nlink(inode);
unlock_new_inode(inode);
fail_put:
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 9cd928f7a7c6..0d37a6fd18af 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -30,29 +30,64 @@ static RAW_NOTIFIER_HEAD(clockevents_chain);
/* Protection for the above */
static DEFINE_RAW_SPINLOCK(clockevents_lock);
-/**
- * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
- * @latch: value to convert
- * @evt: pointer to clock event device descriptor
- *
- * Math helper, returns latch value converted to nanoseconds (bound checked)
- */
-u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
+static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
+ bool ismax)
{
u64 clc = (u64) latch << evt->shift;
+ u64 rnd;
if (unlikely(!evt->mult)) {
evt->mult = 1;
WARN_ON(1);
}
+ rnd = (u64) evt->mult - 1;
+
+ /*
+ * Upper bound sanity check. If the backwards conversion is
+ * not equal latch, we know that the above shift overflowed.
+ */
+ if ((clc >> evt->shift) != (u64)latch)
+ clc = ~0ULL;
+
+ /*
+ * Scaled math oddities:
+ *
+ * For mult <= (1 << shift) we can safely add mult - 1 to
+ * prevent integer rounding loss. So the backwards conversion
+ * from nsec to device ticks will be correct.
+ *
+ * For mult > (1 << shift), i.e. device frequency is > 1GHz we
+ * need to be careful. Adding mult - 1 will result in a value
+ * which when converted back to device ticks can be larger
+ * than latch by up to (mult - 1) >> shift. For the min_delta
+ * calculation we still want to apply this in order to stay
+ * above the minimum device ticks limit. For the upper limit
+ * we would end up with a latch value larger than the upper
+ * limit of the device, so we omit the add to stay below the
+ * device upper boundary.
+ *
+ * Also omit the add if it would overflow the u64 boundary.
+ */
+ if ((~0ULL - clc > rnd) &&
+ (!ismax || evt->mult <= (1U << evt->shift)))
+ clc += rnd;
do_div(clc, evt->mult);
- if (clc < 1000)
- clc = 1000;
- if (clc > KTIME_MAX)
- clc = KTIME_MAX;
- return clc;
+ /* Deltas less than 1usec are pointless noise */
+ return clc > 1000 ? clc : 1000;
+}
+
+/**
+ * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
+ * @latch: value to convert
+ * @evt: pointer to clock event device descriptor
+ *
+ * Math helper, returns latch value converted to nanoseconds (bound checked)
+ */
+u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
+{
+ return cev_delta2ns(latch, evt, false);
}
EXPORT_SYMBOL_GPL(clockevent_delta2ns);
@@ -318,8 +353,8 @@ static void clockevents_config(struct clock_event_device *dev,
sec = 600;
clockevents_calc_mult_shift(dev, freq, sec);
- dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
- dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
+ dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
+ dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
}
/**
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 6096e89bee55..8c2f278e5eb7 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -419,7 +419,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
if (miter->addr) {
miter->__offset += miter->consumed;
- if (miter->__flags & SG_MITER_TO_SG)
+ if ((miter->__flags & SG_MITER_TO_SG) &&
+ !PageSlab(miter->page))
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
diff --git a/mm/swap.c b/mm/swap.c
index 5c13f1338972..f689e9a03204 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -30,6 +30,7 @@
#include <linux/backing-dev.h>
#include <linux/memcontrol.h>
#include <linux/gfp.h>
+#include <linux/hugetlb.h>
#include "internal.h"
@@ -68,13 +69,26 @@ static void __put_compound_page(struct page *page)
{
compound_page_dtor *dtor;
- __page_cache_release(page);
+ if (!PageHuge(page))
+ __page_cache_release(page);
dtor = get_compound_page_dtor(page);
(*dtor)(page);
}
static void put_compound_page(struct page *page)
{
+ /*
+ * hugetlbfs pages cannot be split from under us. If this is a
+ * hugetlbfs page, check refcount on head page and release the page if
+ * the refcount becomes zero.
+ */
+ if (PageHuge(page)) {
+ page = compound_head(page);
+ if (put_page_testzero(page))
+ __put_compound_page(page);
+ return;
+ }
+
if (unlikely(PageTail(page))) {
/* __split_huge_page_refcount can run under us */
struct page *page_head = compound_trans_head(page);
@@ -159,8 +173,20 @@ bool __get_page_tail(struct page *page)
*/
unsigned long flags;
bool got = false;
- struct page *page_head = compound_trans_head(page);
+ struct page *page_head;
+
+ /*
+ * If this is a hugetlbfs page it cannot be split under us. Simply
+ * increment refcount for the head page.
+ */
+ if (PageHuge(page)) {
+ page_head = compound_head(page);
+ atomic_inc(&page_head->_count);
+ got = true;
+ goto out;
+ }
+ page_head = compound_trans_head(page);
if (likely(page != page_head && get_page_unless_zero(page_head))) {
/*
* page_head wasn't a dangling pointer but it
@@ -178,6 +204,7 @@ bool __get_page_tail(struct page *page)
if (unlikely(!got))
put_page(page_head);
}
+out:
return got;
}
EXPORT_SYMBOL(__get_page_tail);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 05160dbeaf44..226be1364ef3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -789,12 +789,15 @@ struct tpt_led_trigger {
* that the scan completed.
* @SCAN_ABORTED: Set for our scan work function when the driver reported
* a scan complete for an aborted scan.
+ * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
+ * cancelled.
*/
enum {
SCAN_SW_SCANNING,
SCAN_HW_SCANNING,
SCAN_COMPLETED,
SCAN_ABORTED,
+ SCAN_HW_CANCELLED,
};
/**
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index bcc57f93adc4..9520749bd1e3 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -259,6 +259,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
enum ieee80211_band band;
int i, ielen, n_chans;
+ if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
+ return false;
+
do {
if (local->hw_scan_band == IEEE80211_NUM_BANDS)
return false;
@@ -844,7 +847,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
if (!local->scan_req)
goto out;
+ /*
+ * We have a scan running and the driver already reported completion,
+ * but the worker hasn't run yet or is stuck on the mutex - mark it as
+ * cancelled.
+ */
+ if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
+ test_bit(SCAN_COMPLETED, &local->scanning)) {
+ set_bit(SCAN_HW_CANCELLED, &local->scanning);
+ goto out;
+ }
+
if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
+ /*
+ * Make sure that __ieee80211_scan_completed doesn't trigger a
+ * scan on another band.
+ */
+ set_bit(SCAN_HW_CANCELLED, &local->scanning);
if (local->ops->cancel_hw_scan)
drv_cancel_hw_scan(local, local->scan_sdata);
goto out;
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 47b117f3f567..b992a49fbe08 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -183,6 +183,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ sta->last_rx = jiffies;
+
if (ieee80211_is_data_qos(mgmt->frame_control)) {
struct ieee80211_hdr *hdr = (void *) skb->data;
u8 *qc = ieee80211_get_qos_ctl(hdr);
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index e30e1be30a21..6355540fdb0d 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -49,6 +49,8 @@ static struct snd_pcm *snd_pcm_get(struct snd_card *card, int device)
struct snd_pcm *pcm;
list_for_each_entry(pcm, &snd_pcm_devices, list) {
+ if (pcm->internal)
+ continue;
if (pcm->card == card && pcm->device == device)
return pcm;
}
@@ -60,6 +62,8 @@ static int snd_pcm_next(struct snd_card *card, int device)
struct snd_pcm *pcm;
list_for_each_entry(pcm, &snd_pcm_devices, list) {
+ if (pcm->internal)
+ continue;
if (pcm->card == card && pcm->device > device)
return pcm->device;
else if (pcm->card->number > card->number)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 33abb782a54b..810f1fc2c3f4 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6833,6 +6833,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
+ SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4),
SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 6c028c470601..fec98cf3b2c0 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -413,6 +413,7 @@ static int hp_supply_event(struct snd_soc_dapm_widget *w,
hubs->hp_startup_mode);
break;
}
+ break;
case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 9ae82a4eb71e..e39364b6b080 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1590,7 +1590,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
w->active ? "active" : "inactive");
list_for_each_entry(p, &w->sources, list_sink) {
- if (p->connected && !p->connected(w, p->sink))
+ if (p->connected && !p->connected(w, p->source))
continue;
if (p->connect)

View file

@ -0,0 +1,424 @@
diff --git a/Makefile b/Makefile
index 2f9a0467..d7c0a383 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 69
+SUBLEVEL = 70
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 650d5923..94b0650e 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -14,27 +14,15 @@
.text
.align 5
- .word 0
-
-1: subs r2, r2, #4 @ 1 do we have enough
- blt 5f @ 1 bytes to align with?
- cmp r3, #2 @ 1
- strltb r1, [r0], #1 @ 1
- strleb r1, [r0], #1 @ 1
- strb r1, [r0], #1 @ 1
- add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
-/*
- * The pointer is now aligned and the length is adjusted. Try doing the
- * memset again.
- */
ENTRY(memset)
ands r3, r0, #3 @ 1 unaligned?
- bne 1b @ 1
+ mov ip, r0 @ preserve r0 as return value
+ bne 6f @ 1
/*
- * we know that the pointer in r0 is aligned to a word boundary.
+ * we know that the pointer in ip is aligned to a word boundary.
*/
- orr r1, r1, r1, lsl #8
+1: orr r1, r1, r1, lsl #8
orr r1, r1, r1, lsl #16
mov r3, r1
cmp r2, #16
@@ -43,29 +31,28 @@ ENTRY(memset)
#if ! CALGN(1)+0
/*
- * We need an extra register for this loop - save the return address and
- * use the LR
+ * We need 2 extra registers for this loop - use r8 and the LR
*/
- str lr, [sp, #-4]!
- mov ip, r1
+ stmfd sp!, {r8, lr}
+ mov r8, r1
mov lr, r1
2: subs r2, r2, #64
- stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time.
- stmgeia r0!, {r1, r3, ip, lr}
- stmgeia r0!, {r1, r3, ip, lr}
- stmgeia r0!, {r1, r3, ip, lr}
+ stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
+ stmgeia ip!, {r1, r3, r8, lr}
+ stmgeia ip!, {r1, r3, r8, lr}
+ stmgeia ip!, {r1, r3, r8, lr}
bgt 2b
- ldmeqfd sp!, {pc} @ Now <64 bytes to go.
+ ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
/*
* No need to correct the count; we're only testing bits from now on
*/
tst r2, #32
- stmneia r0!, {r1, r3, ip, lr}
- stmneia r0!, {r1, r3, ip, lr}
+ stmneia ip!, {r1, r3, r8, lr}
+ stmneia ip!, {r1, r3, r8, lr}
tst r2, #16
- stmneia r0!, {r1, r3, ip, lr}
- ldr lr, [sp], #4
+ stmneia ip!, {r1, r3, r8, lr}
+ ldmfd sp!, {r8, lr}
#else
@@ -74,54 +61,63 @@ ENTRY(memset)
* whole cache lines at once.
*/
- stmfd sp!, {r4-r7, lr}
+ stmfd sp!, {r4-r8, lr}
mov r4, r1
mov r5, r1
mov r6, r1
mov r7, r1
- mov ip, r1
+ mov r8, r1
mov lr, r1
cmp r2, #96
- tstgt r0, #31
+ tstgt ip, #31
ble 3f
- and ip, r0, #31
- rsb ip, ip, #32
- sub r2, r2, ip
- movs ip, ip, lsl #(32 - 4)
- stmcsia r0!, {r4, r5, r6, r7}
- stmmiia r0!, {r4, r5}
- tst ip, #(1 << 30)
- mov ip, r1
- strne r1, [r0], #4
+ and r8, ip, #31
+ rsb r8, r8, #32
+ sub r2, r2, r8
+ movs r8, r8, lsl #(32 - 4)
+ stmcsia ip!, {r4, r5, r6, r7}
+ stmmiia ip!, {r4, r5}
+ tst r8, #(1 << 30)
+ mov r8, r1
+ strne r1, [ip], #4
3: subs r2, r2, #64
- stmgeia r0!, {r1, r3-r7, ip, lr}
- stmgeia r0!, {r1, r3-r7, ip, lr}
+ stmgeia ip!, {r1, r3-r8, lr}
+ stmgeia ip!, {r1, r3-r8, lr}
bgt 3b
- ldmeqfd sp!, {r4-r7, pc}
+ ldmeqfd sp!, {r4-r8, pc}
tst r2, #32
- stmneia r0!, {r1, r3-r7, ip, lr}
+ stmneia ip!, {r1, r3-r8, lr}
tst r2, #16
- stmneia r0!, {r4-r7}
- ldmfd sp!, {r4-r7, lr}
+ stmneia ip!, {r4-r7}
+ ldmfd sp!, {r4-r8, lr}
#endif
4: tst r2, #8
- stmneia r0!, {r1, r3}
+ stmneia ip!, {r1, r3}
tst r2, #4
- strne r1, [r0], #4
+ strne r1, [ip], #4
/*
* When we get here, we've got less than 4 bytes to zero. We
* may have an unaligned pointer as well.
*/
5: tst r2, #2
- strneb r1, [r0], #1
- strneb r1, [r0], #1
+ strneb r1, [ip], #1
+ strneb r1, [ip], #1
tst r2, #1
- strneb r1, [r0], #1
+ strneb r1, [ip], #1
mov pc, lr
+
+6: subs r2, r2, #4 @ 1 do we have enough
+ blt 5b @ 1 bytes to align with?
+ cmp r3, #2 @ 1
+ strltb r1, [ip], #1 @ 1
+ strleb r1, [ip], #1 @ 1
+ strb r1, [ip], #1 @ 1
+ add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
+ b 1b
ENDPROC(memset)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 9d7f1723..093bf0a2 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -88,6 +88,7 @@ struct xenvif {
unsigned long credit_usec;
unsigned long remaining_credit;
struct timer_list credit_timeout;
+ u64 credit_window_start;
/* Statistics */
unsigned long rx_gso_checksum_fixup;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 221f4265..cfaaf685 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -273,8 +273,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->credit_bytes = vif->remaining_credit = ~0UL;
vif->credit_usec = 0UL;
init_timer(&vif->credit_timeout);
- /* Initialize 'expires' now: it's used to track the credit window. */
- vif->credit_timeout.expires = jiffies;
+ vif->credit_window_start = get_jiffies_64();
dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0d22cff0..d8ffbf84 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1197,9 +1197,8 @@ out:
static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
{
- unsigned long now = jiffies;
- unsigned long next_credit =
- vif->credit_timeout.expires +
+ u64 now = get_jiffies_64();
+ u64 next_credit = vif->credit_window_start +
msecs_to_jiffies(vif->credit_usec / 1000);
/* Timer could already be pending in rare cases. */
@@ -1207,8 +1206,8 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
return true;
/* Passed the point where we can replenish credit? */
- if (time_after_eq(now, next_credit)) {
- vif->credit_timeout.expires = now;
+ if (time_after_eq64(now, next_credit)) {
+ vif->credit_window_start = now;
tx_add_credit(vif);
}
@@ -1220,6 +1219,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
tx_credit_callback;
mod_timer(&vif->credit_timeout,
next_credit);
+ vif->credit_window_start = next_credit;
return true;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 54fc9880..21b9ed1d 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -680,6 +680,7 @@ static int pci_pm_suspend(struct device *dev)
goto Fixup;
}
+ pci_dev->state_saved = false;
if (pm->suspend) {
pci_power_t prev = pci_dev->current_state;
int error;
@@ -826,6 +827,7 @@ static int pci_pm_freeze(struct device *dev)
return 0;
}
+ pci_dev->state_saved = false;
if (pm->freeze) {
int error;
@@ -914,6 +916,7 @@ static int pci_pm_poweroff(struct device *dev)
goto Fixup;
}
+ pci_dev->state_saved = false;
if (pm->poweroff) {
int error;
@@ -1032,6 +1035,7 @@ static int pci_pm_runtime_suspend(struct device *dev)
if (!pm || !pm->runtime_suspend)
return -ENOSYS;
+ pci_dev->state_saved = false;
error = pm->runtime_suspend(dev);
suspend_report_result(pm->runtime_suspend, error);
if (error)
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index eea85daf..be76ebac 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -206,7 +206,8 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
return ret;
}
-static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align)
+static int _pci_assign_resource(struct pci_dev *dev, int resno,
+ resource_size_t size, resource_size_t min_align)
{
struct resource *res = dev->resource + resno;
struct pci_bus *bus;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index aa54fad7..82fce32c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1335,6 +1335,7 @@ static int hub_configure(struct usb_hub *hub,
return 0;
fail:
+ hdev->maxchild = 0;
dev_err (hub_dev, "config failed, %s (err %d)\n",
message, ret);
/* hub_disconnect() frees urb and descriptor */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 57277bcc..a5477554 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1391,6 +1391,23 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1545, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1546, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1547, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1565, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1566, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1567, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1589, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1590, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1591, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1592, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1594, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5dd96266..022940f0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -590,9 +590,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
if (isspace(ch)) {
parser->buffer[parser->idx] = 0;
parser->cont = false;
- } else {
+ } else if (parser->idx < parser->size - 1) {
parser->cont = true;
parser->buffer[parser->idx++] = ch;
+ } else {
+ ret = -EINVAL;
+ goto out;
}
*ppos += read;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 27747881..200707c6 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -35,7 +35,7 @@ again:
struct iphdr _iph;
ip:
iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
- if (!iph)
+ if (!iph || iph->ihl < 5)
return false;
if (ip_is_fragment(iph))
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 93faf6a3..4a8c55bd 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1468,7 +1468,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
msglen = origlen = end - dptr;
if (msglen > datalen)
- return NF_DROP;
+ return NF_ACCEPT;
ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
if (ret != NF_ACCEPT)
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 02a6e3f4..ef917bf2 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1282,23 +1282,34 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
return 0;
}
-static int generic_hdmi_init(struct hda_codec *codec)
+static int generic_hdmi_init_per_pins(struct hda_codec *codec)
{
struct hdmi_spec *spec = codec->spec;
int pin_idx;
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = &spec->pins[pin_idx];
- hda_nid_t pin_nid = per_pin->pin_nid;
struct hdmi_eld *eld = &per_pin->sink_eld;
- hdmi_init_pin(codec, pin_nid);
- snd_hda_jack_detect_enable(codec, pin_nid, pin_nid);
-
per_pin->codec = codec;
INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld);
snd_hda_eld_proc_new(codec, eld, pin_idx);
}
+ return 0;
+}
+
+static int generic_hdmi_init(struct hda_codec *codec)
+{
+ struct hdmi_spec *spec = codec->spec;
+ int pin_idx;
+
+ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ struct hdmi_spec_per_pin *per_pin = &spec->pins[pin_idx];
+ hda_nid_t pin_nid = per_pin->pin_nid;
+
+ hdmi_init_pin(codec, pin_nid);
+ snd_hda_jack_detect_enable(codec, pin_nid, pin_nid);
+ }
snd_hda_jack_report_sync(codec);
return 0;
}
@@ -1343,6 +1354,7 @@ static int patch_generic_hdmi(struct hda_codec *codec)
return -EINVAL;
}
codec->patch_ops = generic_hdmi_patch_ops;
+ generic_hdmi_init_per_pins(codec);
init_channel_allocations();

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,602 @@
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 99d4e442b77d..8bb57d7c12ea 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -22,6 +22,7 @@ Supported adapters:
* Intel Panther Point (PCH)
* Intel Lynx Point (PCH)
* Intel Lynx Point-LP (PCH)
+ * Intel Avoton (SOC)
Datasheets: Publicly available at the Intel website
On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Makefile b/Makefile
index 2ea579016292..ce277ff0fd72 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 73
+SUBLEVEL = 74
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 425162e22af5..2f53b892fd80 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -15,6 +15,8 @@
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/wait.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#include <asm/unistd.h>
#include "init.h"
#include "os.h"
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 850246206b12..585c3b279feb 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -117,6 +117,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
lock_sock(sk);
sg_init_table(ctx->sgl.sg, 1);
sg_set_page(ctx->sgl.sg, page, size, offset);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index a19c027b29bd..918a3b4148b8 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -381,6 +381,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
lock_sock(sk);
if (!ctx->more && ctx->used)
goto unlock;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 5ef7ba6b6a76..d21da2f0f508 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -368,9 +368,10 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
if (!err) {
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct ablkcipher_request *abreq = aead_request_ctx(areq);
- u8 *iv = (u8 *)(abreq + 1) +
- crypto_ablkcipher_reqsize(ctx->enc);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
+ struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+ + ctx->reqoff);
+ u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
err = crypto_authenc_genicv(areq, iv, 0);
}
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 32fe1bb5decb..18d64ad0433c 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -271,7 +271,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
}
/* compute plaintext into mac */
- get_data_to_compute(cipher, pctx, plain, cryptlen);
+ if (cryptlen)
+ get_data_to_compute(cipher, pctx, plain, cryptlen);
out:
return err;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 60662545cd14..c20f1578d393 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -268,6 +268,30 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 40cc0cf2ded6..e6939e13e338 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -664,6 +664,13 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"),
},
},
+ {
+ .ident = "Dell XPS421",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
+ },
+ },
{ }
};
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 5a1817eedd1b..c81b8da669ea 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -69,10 +69,14 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
u32 val;
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+ u32 out_mask, out_shadow;
- val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR);
+ out_mask = in_be32(mm->regs + GPIO_DIR);
- return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio);
+ val = in_be32(mm->regs + GPIO_DAT) & ~out_mask;
+ out_shadow = mpc8xxx_gc->data & out_mask;
+
+ return (val | out_shadow) & mpc8xxx_gpio2mask(gpio);
}
static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ea8736bc257d..bc625f6c5b4c 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -105,6 +105,7 @@ config I2C_I801
Panther Point (PCH)
Lynx Point (PCH)
Lynx Point-LP (PCH)
+ Avoton (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index d88ec812160e..d63e130690e9 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -53,6 +53,7 @@
Panther Point (PCH) 0x1e22 32 hard yes yes yes
Lynx Point (PCH) 0x8c22 32 hard yes yes yes
Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes
+ Avoton (SOC) 0x1f3c 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -145,6 +146,7 @@
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
+#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c
#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
@@ -639,6 +641,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMBUS) },
{ 0, }
};
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 332597980817..20026e3a234a 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -71,7 +71,7 @@ config INPUT_SPARSEKMAP
comment "Userland interfaces"
config INPUT_MOUSEDEV
- tristate "Mouse interface" if EXPERT
+ tristate "Mouse interface"
default y
help
Say Y here if you want your mouse to be accessible as char devices
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index f354813a13e8..69a45701d94a 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -2,7 +2,7 @@
# Input core configuration
#
menuconfig INPUT_KEYBOARD
- bool "Keyboards" if EXPERT || !X86
+ bool "Keyboards"
default y
help
Say Y here, and a list of supported keyboards will be displayed.
@@ -67,7 +67,7 @@ config KEYBOARD_ATARI
module will be called atakbd.
config KEYBOARD_ATKBD
- tristate "AT keyboard" if EXPERT || !X86
+ tristate "AT keyboard"
default y
select SERIO
select SERIO_LIBPS2
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 55f2c2293ec6..93d4182a8916 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -2,7 +2,7 @@
# Input core configuration
#
config SERIO
- tristate "Serial I/O support" if EXPERT || !X86
+ tristate "Serial I/O support"
default y
help
Say Yes here if you have any input device that uses serial I/O to
@@ -19,7 +19,7 @@ config SERIO
if SERIO
config SERIO_I8042
- tristate "i8042 PC Keyboard controller" if EXPERT || !X86
+ tristate "i8042 PC Keyboard controller"
default y
depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
(!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN
@@ -168,7 +168,7 @@ config SERIO_MACEPS2
module will be called maceps2.
config SERIO_LIBPS2
- tristate "PS/2 driver library" if EXPERT
+ tristate "PS/2 driver library"
depends on SERIO_I8042 || SERIO_I8042=n
help
Say Y here if you are using a driver for device connected
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 00e5fcac8fdf..cbee842f8b6b 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -198,6 +198,13 @@ static void enclosure_remove_links(struct enclosure_component *cdev)
{
char name[ENCLOSURE_NAME_SIZE];
+ /*
+ * In odd circumstances, like multipath devices, something else may
+ * already have removed the links, so check for this condition first.
+ */
+ if (!cdev->dev->kobj.sd)
+ return;
+
enclosure_link_name(cdev, name);
sysfs_remove_link(&cdev->dev->kobj, name);
sysfs_remove_link(&cdev->cdev.kobj, "device");
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 5f53fbbf67be..ff1af41e4b5d 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -46,7 +46,8 @@
defined(CONFIG_MACH_LITTLETON) ||\
defined(CONFIG_MACH_ZYLONITE2) ||\
defined(CONFIG_ARCH_VIPER) ||\
- defined(CONFIG_MACH_STARGATE2)
+ defined(CONFIG_MACH_STARGATE2) ||\
+ defined(CONFIG_ARCH_VERSATILE)
#include <asm/mach-types.h>
@@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
#define SMC_IRQ_FLAGS (-1) /* from resource */
/* We actually can't write halfwords properly if not word aligned */
@@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define RPC_LSA_DEFAULT RPC_LED_TX_RX
#define RPC_LSB_DEFAULT RPC_LED_100_10
-#elif defined(CONFIG_ARCH_VERSATILE)
-
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 1
-#define SMC_NOWAIT 1
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_inl(a, r) readl((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_outl(v, a, r) writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
#elif defined(CONFIG_MN10300)
/*
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index b0fefc435c03..599240af2244 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1219,7 +1219,7 @@ static void complete_scsi_command(struct CommandList *cp)
"has check condition: aborted command: "
"ASC: 0x%x, ASCQ: 0x%x\n",
cp, asc, ascq);
- cmd->result = DID_SOFT_ERROR << 16;
+ cmd->result |= DID_SOFT_ERROR << 16;
break;
}
/* Must be some other type of check condition */
@@ -4466,7 +4466,7 @@ reinit_after_soft_reset:
hpsa_hba_inquiry(h);
hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
start_controller_lockup_detector(h);
- return 1;
+ return 0;
clean4:
hpsa_free_sg_chain_blocks(h);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index d109cc3a17b6..51ee663c1310 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -211,7 +211,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
qc->tf.nsect = 0;
}
- ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis);
+ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
task->uldd_task = qc;
if (ata_is_atapi(qc->tf.protocol)) {
memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e411f1865298..4a9dd86bce8e 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1542,6 +1542,8 @@ static int acm_reset_resume(struct usb_interface *intf)
static const struct usb_device_id acm_ids[] = {
/* quirky and broken devices */
+ { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
+ .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 91293b68df5a..8ccbf5e6b549 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1962,25 +1962,25 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
iflag = tty->termios->c_iflag;
/* Change the number of bits */
- if (cflag & CSIZE) {
- switch (cflag & CSIZE) {
- case CS5:
- lData = LCR_BITS_5;
- break;
+ switch (cflag & CSIZE) {
+ case CS5:
+ lData = LCR_BITS_5;
+ break;
- case CS6:
- lData = LCR_BITS_6;
- break;
+ case CS6:
+ lData = LCR_BITS_6;
+ break;
- case CS7:
- lData = LCR_BITS_7;
- break;
- default:
- case CS8:
- lData = LCR_BITS_8;
- break;
- }
+ case CS7:
+ lData = LCR_BITS_7;
+ break;
+
+ default:
+ case CS8:
+ lData = LCR_BITS_8;
+ break;
}
+
/* Change the Parity bit */
if (cflag & PARENB) {
if (cflag & PARODD) {
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index a1a9062954c4..1dcccd43d629 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -290,24 +290,22 @@ static void pl2303_set_termios(struct tty_struct *tty,
dbg("0xa1:0x21:0:0 %d - %x %x %x %x %x %x %x", i,
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
- if (cflag & CSIZE) {
- switch (cflag & CSIZE) {
- case CS5:
- buf[6] = 5;
- break;
- case CS6:
- buf[6] = 6;
- break;
- case CS7:
- buf[6] = 7;
- break;
- default:
- case CS8:
- buf[6] = 8;
- break;
- }
- dbg("%s - data bits = %d", __func__, buf[6]);
+ switch (cflag & CSIZE) {
+ case CS5:
+ buf[6] = 5;
+ break;
+ case CS6:
+ buf[6] = 6;
+ break;
+ case CS7:
+ buf[6] = 7;
+ break;
+ default:
+ case CS8:
+ buf[6] = 8;
+ break;
}
+ dbg("%s - data bits = %d", __func__, buf[6]);
/* For reference buf[0]:buf[3] baud rate value */
/* NOTE: Only the values defined in baud_sup are supported !
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index f06c9a8f3d37..003ef9019ad8 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -396,22 +396,20 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
}
/* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */
- if (cflag & CSIZE) {
- switch (cflag & CSIZE) {
- case CS5:
- buf[1] |= SET_UART_FORMAT_SIZE_5;
- break;
- case CS6:
- buf[1] |= SET_UART_FORMAT_SIZE_6;
- break;
- case CS7:
- buf[1] |= SET_UART_FORMAT_SIZE_7;
- break;
- default:
- case CS8:
- buf[1] |= SET_UART_FORMAT_SIZE_8;
- break;
- }
+ switch (cflag & CSIZE) {
+ case CS5:
+ buf[1] |= SET_UART_FORMAT_SIZE_5;
+ break;
+ case CS6:
+ buf[1] |= SET_UART_FORMAT_SIZE_6;
+ break;
+ case CS7:
+ buf[1] |= SET_UART_FORMAT_SIZE_7;
+ break;
+ default:
+ case CS8:
+ buf[1] |= SET_UART_FORMAT_SIZE_8;
+ break;
}
/* Set Stop bit2 : 0:1bit 1:2bit */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index cabddb5da071..a7ea637bf215 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4041,11 +4041,17 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
return;
switch (task->tk_status) {
- case -NFS4ERR_STALE_STATEID:
- case -NFS4ERR_EXPIRED:
case 0:
renew_lease(data->res.server, data->timestamp);
break;
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_EXPIRED:
+ task->tk_status = 0;
+ break;
default:
if (nfs4_async_handle_error(task, data->res.server, NULL) ==
-EAGAIN) {
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 15e53b1766a6..dcd3f9796817 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -50,7 +50,7 @@ static void resume_irqs(bool want_early)
bool is_early = desc->action &&
desc->action->flags & IRQF_EARLY_RESUME;
- if (is_early != want_early)
+ if (!is_early && want_early)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7949b5d1663f..19e784237f7a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1042,6 +1042,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
if (!up->pending) {
struct msghdr msg = { .msg_flags = flags|MSG_MORE };
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index a32caa72bd7d..8b5afc1d2e60 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -406,10 +406,10 @@ static int wm8731_set_dai_fmt(struct snd_soc_dai *codec_dai,
iface |= 0x0001;
break;
case SND_SOC_DAIFMT_DSP_A:
- iface |= 0x0003;
+ iface |= 0x0013;
break;
case SND_SOC_DAIFMT_DSP_B:
- iface |= 0x0013;
+ iface |= 0x0003;
break;
default:
return -EINVAL;
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 9d242351e6e8..d5ab3351c2bc 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -1265,6 +1265,8 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
/* disable POBCTRL, SOFT_ST and BUFDCOPEN */
snd_soc_write(codec, WM8990_ANTIPOP2, 0x0);
+
+ codec->cache_sync = 1;
break;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,743 @@
diff --git a/Makefile b/Makefile
index 8045c75414ae..bbdd7ab3e0e3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 76
+SUBLEVEL = 77
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index a53a5a3c3c2e..f01ad2992a20 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -37,7 +37,13 @@
#include "signal.h"
-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+static const char *handler[]= {
+ "prefetch abort",
+ "data abort",
+ "address exception",
+ "interrupt",
+ "undefined instruction",
+};
void *vectors_page;
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 8c6202bb6aeb..9c19b45db36d 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -422,7 +422,7 @@ static struct platform_device lcdc_device = {
.resource = lcdc_resources,
.dev = {
.platform_data = &lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -498,7 +498,7 @@ static struct platform_device hdmi_lcdc_device = {
.id = 1,
.dev = {
.platform_data = &hdmi_lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 92e05b6c84f8..a65708f2f015 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -266,12 +266,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. "m" is a random variable that should be in L1 */
- alternative_input(
- ASM_NOP8 ASM_NOP2,
- "emms\n\t" /* clear stack tags */
- "fildl %P[addr]", /* set F?P to defined value */
- X86_FEATURE_FXSAVE_LEAK,
- [addr] "m" (tsk->thread.fpu.has_fpu));
+ if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
+ asm volatile(
+ "fnclex\n\t"
+ "emms\n\t"
+ "fildl %P[addr]" /* set F?P to defined value */
+ : : [addr] "m" (tsk->thread.fpu.has_fpu));
+ }
return fpu_restore_checking(&tsk->thread.fpu);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3551ad82ba8f..650286be8da1 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14671,6 +14671,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Clear this out for sanity. */
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
+ tw32(TG3PCI_REG_BASE_ADDR, 0);
+
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
&pci_state_reg);
if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 4ebbe6f609d0..5699105112d3 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1776,7 +1776,7 @@ static int xgmac_probe(struct platform_device *pdev)
if (device_can_wakeup(priv->device))
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
- ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+ ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index f4d2da0db1b1..8b3da6eb5bc4 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3029,7 +3029,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev->hw_features = NETIF_F_SG | NETIF_F_TSO
| NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
+ dev->features = NETIF_F_SG | NETIF_F_TSO
| NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
| NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index ad973ffc9ff3..32f0bcd5e30d 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1995,7 +1995,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
| NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
- /*| NETIF_F_FRAGLIST */
;
ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_HW_VLAN_TX;
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 5e5b791f12e9..d240c0624d46 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1026,7 +1026,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
dev_set_drvdata(&op->dev, ndev);
SET_NETDEV_DEV(ndev, &op->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
- ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ ndev->features = NETIF_F_SG;
ndev->netdev_ops = &temac_netdev_ops;
ndev->ethtool_ops = &temac_ethtool_ops;
#if 0
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9c365e192a31..fde716dec196 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1494,7 +1494,7 @@ static int __devinit axienet_of_probe(struct platform_device *op)
SET_NETDEV_DEV(ndev, &op->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
- ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ ndev->features = NETIF_F_SG;
ndev->netdev_ops = &axienet_netdev_ops;
ndev->ethtool_ops = &axienet_ethtool_ops;
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index a4a3516b6bbf..3b3a7e07bbf1 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case HDLCDRVCTL_CALIBRATE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (bi.data.calibrate > INT_MAX / s->par.bitrate)
+ return -EINVAL;
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
return 0;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 5a6412ecce73..d8faeb628aa7 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1058,6 +1058,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
case SIOCYAMGCFG:
+ memset(&yi, 0, sizeof(yi));
yi.cfg.mask = 0xffffffff;
yi.cfg.iobase = yp->iobase;
yi.cfg.irq = yp->irq;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 2d59138db7f3..a6ed38284134 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -321,7 +321,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
return -EINVAL;
nvdev->start_remove = true;
- cancel_delayed_work_sync(&ndevctx->dwork);
cancel_work_sync(&ndevctx->work);
netif_tx_disable(ndev);
rndis_filter_device_remove(hdev);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 77ce8b2bee6d..f5b9de48bb82 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -797,11 +797,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
const struct sk_buff *skb,
const struct iovec *iv, int len)
{
- struct macvlan_dev *vlan;
int ret;
int vnet_hdr_len = 0;
int vlan_offset = 0;
- int copied;
+ int copied, total;
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
@@ -816,7 +815,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
return -EFAULT;
}
- copied = vnet_hdr_len;
+ total = copied = vnet_hdr_len;
+ total += skb->len;
if (!vlan_tx_tag_present(skb))
len = min_t(int, skb->len, len);
@@ -831,6 +831,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
len = min_t(int, skb->len + VLAN_HLEN, len);
+ total += VLAN_HLEN;
copy = min_t(int, vlan_offset, len);
ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
@@ -848,16 +849,9 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
}
ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
- copied += len;
done:
- rcu_read_lock_bh();
- vlan = rcu_dereference_bh(q->vlan);
- if (vlan)
- macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
- rcu_read_unlock_bh();
-
- return ret ? ret : copied;
+ return ret ? ret : total;
}
static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
@@ -911,7 +905,9 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
}
ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
- ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
+ ret = min_t(ssize_t, ret, len);
+ if (ret > 0)
+ iocb->ki_pos = ret;
out:
return ret;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 194f8798b7ed..5b1a1b51fdb0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -903,6 +903,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
+ if (ret > 0)
+ iocb->ki_pos = ret;
out:
tun_put(tun);
return ret;
diff --git a/include/linux/net.h b/include/linux/net.h
index 45232814fc03..ff8097592f1d 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -215,7 +215,7 @@ struct proto_ops {
int offset, size_t size, int flags);
ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
- void (*set_peek_off)(struct sock *sk, int val);
+ int (*set_peek_off)(struct sock *sk, int val);
};
#define DECLARE_SOCKADDR(type, dst, src) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index dc6c6878700c..5b465010f102 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1702,6 +1702,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
+static inline int dev_rebuild_header(struct sk_buff *skb)
+{
+ const struct net_device *dev = skb->dev;
+
+ if (!dev->header_ops || !dev->header_ops->rebuild)
+ return 0;
+ return dev->header_ops->rebuild(skb);
+}
+
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
static inline int unregister_gifconf(unsigned int family)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4b6c546774b2..3a5b31796edf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7906,7 +7906,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
runtime_enabled = quota != RUNTIME_INF;
runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
- account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
+ /*
+ * If we need to toggle cfs_bandwidth_used, off->on must occur
+ * before making related changes, and on->off must occur afterwards
+ */
+ if (runtime_enabled && !runtime_was_enabled)
+ cfs_bandwidth_usage_inc();
raw_spin_lock_irq(&cfs_b->lock);
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;
@@ -7932,6 +7937,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
unthrottle_cfs_rq(cfs_rq);
raw_spin_unlock_irq(&rq->lock);
}
+ if (runtime_was_enabled && !runtime_enabled)
+ cfs_bandwidth_usage_dec();
out_unlock:
mutex_unlock(&cfs_constraints_mutex);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index efe9253bd235..363df3702c20 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1393,13 +1393,14 @@ static inline bool cfs_bandwidth_used(void)
return static_key_false(&__cfs_bandwidth_used);
}
-void account_cfs_bandwidth_used(int enabled, int was_enabled)
+void cfs_bandwidth_usage_inc(void)
{
- /* only need to count groups transitioning between enabled/!enabled */
- if (enabled && !was_enabled)
- static_key_slow_inc(&__cfs_bandwidth_used);
- else if (!enabled && was_enabled)
- static_key_slow_dec(&__cfs_bandwidth_used);
+ static_key_slow_inc(&__cfs_bandwidth_used);
+}
+
+void cfs_bandwidth_usage_dec(void)
+{
+ static_key_slow_dec(&__cfs_bandwidth_used);
}
#else /* HAVE_JUMP_LABEL */
static bool cfs_bandwidth_used(void)
@@ -1407,7 +1408,8 @@ static bool cfs_bandwidth_used(void)
return true;
}
-void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
+void cfs_bandwidth_usage_inc(void) {}
+void cfs_bandwidth_usage_dec(void) {}
#endif /* HAVE_JUMP_LABEL */
/*
@@ -1769,6 +1771,13 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
if (idle)
goto out_unlock;
+ /*
+ * if we have relooped after returning idle once, we need to update our
+ * status as actually running, so that other cpus doing
+ * __start_cfs_bandwidth will stop trying to cancel us.
+ */
+ cfs_b->timer_active = 1;
+
__refill_cfs_bandwidth_runtime(cfs_b);
if (!throttled) {
@@ -1829,7 +1838,13 @@ static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
/* how long we wait to gather additional slack before distributing */
static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
-/* are we near the end of the current quota period? */
+/*
+ * Are we near the end of the current quota period?
+ *
+ * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
+ * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
+ * migrate_hrtimers, base is never cleared, so we are fine.
+ */
static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
{
struct hrtimer *refresh_timer = &cfs_b->period_timer;
@@ -1905,10 +1920,12 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
u64 expires;
/* confirm we're still not at a refresh boundary */
- if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
+ raw_spin_lock(&cfs_b->lock);
+ if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
+ raw_spin_unlock(&cfs_b->lock);
return;
+ }
- raw_spin_lock(&cfs_b->lock);
if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
runtime = cfs_b->runtime;
cfs_b->runtime = 0;
@@ -2033,11 +2050,11 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
* (timer_active==0 becomes visible before the hrtimer call-back
* terminates). In either case we ensure that it's re-programmed
*/
- while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
+ while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
+ hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
+ /* bounce the lock to allow do_sched_cfs_period_timer to run */
raw_spin_unlock(&cfs_b->lock);
- /* ensure cfs_b->lock is available while we wait */
- hrtimer_cancel(&cfs_b->period_timer);
-
+ cpu_relax();
raw_spin_lock(&cfs_b->lock);
/* if someone else restarted the timer then we're done */
if (cfs_b->timer_active)
@@ -5453,7 +5470,8 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
se->cfs_rq = parent->my_q;
se->my_q = cfs_rq;
- update_load_set(&se->load, 0);
+ /* guarantee group entities always have weight */
+ update_load_set(&se->load, NICE_0_LOAD);
se->parent = parent;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index acfa7017eb0a..4e2bd6c32da7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1140,7 +1140,8 @@ extern void init_cfs_rq(struct cfs_rq *cfs_rq);
extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
extern void unthrottle_offline_cfs_rqs(struct rq *rq);
-extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
+extern void cfs_bandwidth_usage_inc(void);
+extern void cfs_bandwidth_usage_dec(void);
#ifdef CONFIG_NO_HZ
enum rq_nohz_flag_bits {
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 8f453927cc51..c2029f732126 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -525,6 +525,23 @@ static const struct header_ops vlan_header_ops = {
.parse = eth_header_parse,
};
+static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr,
+ unsigned int len)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
+ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+}
+
+static const struct header_ops vlan_passthru_header_ops = {
+ .create = vlan_passthru_hard_header,
+ .rebuild = dev_rebuild_header,
+ .parse = eth_header_parse,
+};
+
static const struct net_device_ops vlan_netdev_ops;
static int vlan_dev_init(struct net_device *dev)
@@ -564,7 +581,7 @@ static int vlan_dev_init(struct net_device *dev)
dev->needed_headroom = real_dev->needed_headroom;
if (real_dev->features & NETIF_F_HW_VLAN_TX) {
- dev->header_ops = real_dev->header_ops;
+ dev->header_ops = &vlan_passthru_header_ops;
dev->hard_header_len = real_dev->hard_header_len;
} else {
dev->header_ops = &vlan_header_ops;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index ca670d96bae7..cef202a94112 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1744,7 +1744,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
u32 old;
struct net_bridge_mdb_htable *mdb;
- spin_lock(&br->multicast_lock);
+ spin_lock_bh(&br->multicast_lock);
if (!netif_running(br->dev))
goto unlock;
@@ -1776,7 +1776,7 @@ rollback:
}
unlock:
- spin_unlock(&br->multicast_lock);
+ spin_unlock_bh(&br->multicast_lock);
return err;
}
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index b856f87e63d2..1d9a52929bad 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -61,7 +61,6 @@ static struct genl_family net_drop_monitor_family = {
.hdrsize = 0,
.name = "NET_DM",
.version = 2,
- .maxattr = NET_DM_CMD_MAX,
};
static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
diff --git a/net/core/sock.c b/net/core/sock.c
index 561eb57f590c..832cf043a8f7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -795,7 +795,7 @@ set_rcvbuf:
case SO_PEEK_OFF:
if (sock->ops->set_peek_off)
- sock->ops->set_peek_off(sk, val);
+ ret = sock->ops->set_peek_off(sk, val);
else
ret = -EOPNOTSUPP;
break;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index d7b862ad4be4..5fcc8df3f179 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -110,6 +110,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = inet->inet_dport;
+
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
r->id.idiag_src[0] = inet->inet_rcv_saddr;
r->id.idiag_dst[0] = inet->inet_daddr;
@@ -227,12 +231,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
r->idiag_family = tw->tw_family;
r->idiag_retrans = 0;
+
r->id.idiag_if = tw->tw_bound_dev_if;
sock_diag_save_cookie(tw, r->id.idiag_cookie);
+
r->id.idiag_sport = tw->tw_sport;
r->id.idiag_dport = tw->tw_dport;
+
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
r->id.idiag_src[0] = tw->tw_rcv_saddr;
r->id.idiag_dst[0] = tw->tw_daddr;
+
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
@@ -714,8 +725,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = ireq->rmt_port;
+
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
r->id.idiag_src[0] = ireq->loc_addr;
r->id.idiag_dst[0] = ireq->rmt_addr;
+
r->idiag_expires = jiffies_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 6ac8bc29b43e..335b16fccb9d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2114,15 +2114,11 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
{
struct net *net = dev_net(idev->dev);
struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
- net->loopback_dev, 0);
+ net->loopback_dev, DST_NOCOUNT);
int err;
- if (!rt) {
- if (net_ratelimit())
- pr_warning("IPv6: Maximum number of routes reached,"
- " consider increasing route/max_size.\n");
+ if (!rt)
return ERR_PTR(-ENOMEM);
- }
in6_dev_hold(idev);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index df08d7779e1d..1c2dc50c9b44 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -716,7 +716,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
unsigned long cpu_flags;
size_t copied = 0;
u32 peek_seq = 0;
- u32 *seq;
+ u32 *seq, skb_len;
unsigned long used;
int target; /* Read at least this many bytes */
long timeo;
@@ -814,6 +814,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
}
continue;
found_ok_skb:
+ skb_len = skb->len;
/* Ok so how much can we use? */
used = skb->len - offset;
if (len < used)
@@ -846,7 +847,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
}
/* Partial read */
- if (used + offset < skb->len)
+ if (used + offset < skb_len)
continue;
} while (len > 0);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index b4c8b0022fee..ba2dffeff608 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
/* due to this, we will claim to support iWARP devices unless we
check node_type. */
- if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
+ if (ret || !cm_id->device ||
+ cm_id->device->node_type != RDMA_NODE_IB_CA)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI4 ret %d node type %d\n",
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index e59094981175..37be6e226d1b 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
&& rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
scat = &rm->data.op_sg[sg];
- ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
- ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
- return ret;
+ ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
+ return sizeof(struct rds_header) + ret;
}
/* FIXME we may overallocate here */
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index ce5f5b934ea1..bde7d69b440d 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1257,6 +1257,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_name) {
struct sockaddr_rose *srose;
+ struct full_sockaddr_rose *full_srose = msg->msg_name;
memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
srose = msg->msg_name;
@@ -1264,18 +1265,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
- if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
- struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
- for (n = 0 ; n < rose->dest_ndigis ; n++)
- full_srose->srose_digis[n] = rose->dest_digis[n];
- msg->msg_namelen = sizeof(struct full_sockaddr_rose);
- } else {
- if (rose->dest_ndigis >= 1) {
- srose->srose_ndigis = 1;
- srose->srose_digi = rose->dest_digis[0];
- }
- msg->msg_namelen = sizeof(struct sockaddr_rose);
- }
+ for (n = 0 ; n < rose->dest_ndigis ; n++)
+ full_srose->srose_digis[n] = rose->dest_digis[n];
+ msg->msg_namelen = sizeof(struct full_sockaddr_rose);
}
skb_free_datagram(sk, skb);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0540dd9b0387..c0a81a50bd4e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -524,13 +524,17 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
struct msghdr *, size_t, int);
-static void unix_set_peek_off(struct sock *sk, int val)
+static int unix_set_peek_off(struct sock *sk, int val)
{
struct unix_sock *u = unix_sk(sk);
- mutex_lock(&u->readlock);
+ if (mutex_lock_interruptible(&u->readlock))
+ return -EINTR;
+
sk->sk_peek_off = val;
mutex_unlock(&u->readlock);
+
+ return 0;
}
@@ -708,7 +712,9 @@ static int unix_autobind(struct socket *sock)
int err;
unsigned int retries = 0;
- mutex_lock(&u->readlock);
+ err = mutex_lock_interruptible(&u->readlock);
+ if (err)
+ return err;
err = 0;
if (u->addr)
@@ -841,7 +847,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
addr_len = err;
- mutex_lock(&u->readlock);
+ err = mutex_lock_interruptible(&u->readlock);
+ if (err)
+ goto out;
err = -EINVAL;
if (u->addr)

View file

@ -0,0 +1,416 @@
diff --git a/Makefile b/Makefile
index bbdd7ab3e0e3..e891990fbf1c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 77
+SUBLEVEL = 78
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 3b8a2d30d14e..ea34253cb9c6 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -9,6 +9,7 @@
#include <linux/perf_event.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/syscore_ops.h>
#include <asm/apic.h>
@@ -209,6 +210,18 @@ out:
return ret;
}
+static void ibs_eilvt_setup(void)
+{
+ /*
+ * Force LVT offset assignment for family 10h: The offsets are
+ * not assigned by the BIOS for this family, so the OS is
+ * responsible for doing it. If the OS assignment fails, fall
+ * back to BIOS settings and try to setup this.
+ */
+ if (boot_cpu_data.x86 == 0x10)
+ force_ibs_eilvt_setup();
+}
+
static inline int get_ibs_lvt_offset(void)
{
u64 val;
@@ -244,6 +257,36 @@ static void clear_APIC_ibs(void *dummy)
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
}
+#ifdef CONFIG_PM
+
+static int perf_ibs_suspend(void)
+{
+ clear_APIC_ibs(NULL);
+ return 0;
+}
+
+static void perf_ibs_resume(void)
+{
+ ibs_eilvt_setup();
+ setup_APIC_ibs(NULL);
+}
+
+static struct syscore_ops perf_ibs_syscore_ops = {
+ .resume = perf_ibs_resume,
+ .suspend = perf_ibs_suspend,
+};
+
+static void perf_ibs_pm_init(void)
+{
+ register_syscore_ops(&perf_ibs_syscore_ops);
+}
+
+#else
+
+static inline void perf_ibs_pm_init(void) { }
+
+#endif
+
static int __cpuinit
perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
@@ -270,18 +313,12 @@ static __init int amd_ibs_init(void)
if (!caps)
return -ENODEV; /* ibs not supported by the cpu */
- /*
- * Force LVT offset assignment for family 10h: The offsets are
- * not assigned by the BIOS for this family, so the OS is
- * responsible for doing it. If the OS assignment fails, fall
- * back to BIOS settings and try to setup this.
- */
- if (boot_cpu_data.x86 == 0x10)
- force_ibs_eilvt_setup();
+ ibs_eilvt_setup();
if (!ibs_eilvt_valid())
goto out;
+ perf_ibs_pm_init();
get_online_cpus();
ibs_caps = caps;
/* make ibs_caps visible to other cpus: */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 858432287ab6..2bf03a99c2f9 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1278,14 +1278,12 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{
u32 data;
- void *vapic;
if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
return;
- vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
- data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
- kunmap_atomic(vapic);
+ kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+ sizeof(u32));
apic_set_tpr(vcpu->arch.apic, data & 0xff);
}
@@ -1295,7 +1293,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
u32 data, tpr;
int max_irr, max_isr;
struct kvm_lapic *apic;
- void *vapic;
if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
return;
@@ -1310,17 +1307,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
- vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
- *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
- kunmap_atomic(vapic);
+ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+ sizeof(u32));
}
-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{
if (!irqchip_in_kernel(vcpu->kvm))
- return;
+ return -EINVAL;
+
+ if (vapic_addr) {
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.apic->vapic_cache,
+ vapic_addr, sizeof(u32)))
+ return -EINVAL;
+ }
vcpu->arch.apic->vapic_addr = vapic_addr;
+ return 0;
}
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 6f4ce2575d09..6aec0714398e 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -15,7 +15,7 @@ struct kvm_lapic {
bool irr_pending;
void *regs;
gpa_t vapic_addr;
- struct page *vapic_page;
+ struct gfn_to_hva_cache vapic_cache;
};
int kvm_create_lapic(struct kvm_vcpu *vcpu);
void kvm_free_lapic(struct kvm_vcpu *vcpu);
@@ -46,7 +46,7 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
+int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3663e0b38976..4b1be290f6e3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2728,8 +2728,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
- r = 0;
- kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
break;
}
case KVM_X86_SETUP_MCE: {
@@ -5075,33 +5074,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
!kvm_event_needs_reinjection(vcpu);
}
-static void vapic_enter(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
- struct page *page;
-
- if (!apic || !apic->vapic_addr)
- return;
-
- page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
-
- vcpu->arch.apic->vapic_page = page;
-}
-
-static void vapic_exit(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
- int idx;
-
- if (!apic || !apic->vapic_addr)
- return;
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- kvm_release_page_dirty(apic->vapic_page);
- mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
-}
-
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{
int max_irr, tpr;
@@ -5385,7 +5357,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
}
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- vapic_enter(vcpu);
r = 1;
while (r > 0) {
@@ -5442,8 +5413,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- vapic_exit(vcpu);
-
return r;
}
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 0f52799973d4..fac07d3ae4f3 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -53,7 +53,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
-#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
+#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 99a102d186ce..67a8393e3f86 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1117,7 +1117,7 @@ read_again:
/* Could not read all from this device, so we will
* need another r10_bio.
*/
- sectors_handled = (r10_bio->sectors + max_sectors
+ sectors_handled = (r10_bio->sector + max_sectors
- bio->bi_sector);
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
@@ -1125,7 +1125,7 @@ read_again:
bio->bi_phys_segments = 2;
else
bio->bi_phys_segments++;
- spin_unlock(&conf->device_lock);
+ spin_unlock_irq(&conf->device_lock);
/* Cannot call generic_make_request directly
* as that will be queued in __generic_make_request
* and subsequent mempool_alloc might block
@@ -2943,10 +2943,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (j == conf->copies) {
/* Cannot recover, so abort the recovery or
* record a bad block */
- put_buf(r10_bio);
- if (rb2)
- atomic_dec(&rb2->remaining);
- r10_bio = rb2;
if (any_working) {
/* problem is that there are bad blocks
* on other device(s)
@@ -2978,6 +2974,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
mirror->recovery_disabled
= mddev->recovery_disabled;
}
+ put_buf(r10_bio);
+ if (rb2)
+ atomic_dec(&rb2->remaining);
+ r10_bio = rb2;
break;
}
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidio.c b/drivers/staging/comedi/drivers/cb_pcidio.c
index 8f3215239a15..453ea2d57346 100644
--- a/drivers/staging/comedi/drivers/cb_pcidio.c
+++ b/drivers/staging/comedi/drivers/cb_pcidio.c
@@ -56,10 +56,6 @@ struct pcidio_board {
const char *name; /* name of the board */
int dev_id;
int n_8255; /* number of 8255 chips on board */
-
- /* indices of base address regions */
- int pcicontroler_badrindex;
- int dioregs_badrindex;
};
static const struct pcidio_board pcidio_boards[] = {
@@ -67,22 +63,16 @@ static const struct pcidio_board pcidio_boards[] = {
.name = "pci-dio24",
.dev_id = 0x0028,
.n_8255 = 1,
- .pcicontroler_badrindex = 1,
- .dioregs_badrindex = 2,
},
{
.name = "pci-dio24h",
.dev_id = 0x0014,
.n_8255 = 1,
- .pcicontroler_badrindex = 1,
- .dioregs_badrindex = 2,
},
{
.name = "pci-dio48h",
.dev_id = 0x000b,
.n_8255 = 2,
- .pcicontroler_badrindex = 0,
- .dioregs_badrindex = 1,
},
};
@@ -239,10 +229,15 @@ found:
if (comedi_pci_enable(pcidev, thisboard->name))
return -EIO;
- devpriv->dio_reg_base
- =
+ /*
+ * Use PCI BAR 2 region if non-zero length, else use PCI BAR 1 region.
+ * PCI BAR 1 is only used for older PCI-DIO48H boards. At some point
+ * the PCI-DIO48H was redesigned to use the same PCI interface chip
+ * (and same PCI BAR region) as the other boards.
+ */
+ devpriv->dio_reg_base =
pci_resource_start(devpriv->pci_dev,
- pcidio_boards[index].dioregs_badrindex);
+ (pci_resource_len(pcidev, 2) ? 2 : 1));
/*
* Allocate the subdevice structures. alloc_subdevice() is a
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 88e11fb346b6..d4ca8925f017 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1436,17 +1436,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
nilfs_clear_logs(&sci->sc_segbufs);
- err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
- if (unlikely(err))
- return err;
-
if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
sci->sc_freesegs,
sci->sc_nfreesegs,
NULL);
WARN_ON(err); /* do not happen */
+ sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
}
+
+ err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
+ if (unlikely(err))
+ return err;
+
nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
sci->sc_stage = prev_stage;
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index d86fb2057354..7e95698e4139 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1447,10 +1447,18 @@ static int soft_offline_huge_page(struct page *page, int flags)
return ret;
}
done:
- if (!PageHWPoison(hpage))
- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
- set_page_hwpoison_huge_page(hpage);
- dequeue_hwpoisoned_huge_page(hpage);
+ /* overcommit hugetlb page will be freed to buddy */
+ if (PageHuge(hpage)) {
+ if (!PageHWPoison(hpage))
+ atomic_long_add(1 << compound_trans_order(hpage),
+ &mce_bad_pages);
+ set_page_hwpoison_huge_page(hpage);
+ dequeue_hwpoisoned_huge_page(hpage);
+ } else {
+ SetPageHWPoison(page);
+ atomic_long_inc(&mce_bad_pages);
+ }
+
/* keep elevated page count for bad page */
return ret;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,939 @@
diff --git a/Makefile b/Makefile
index 7e9c23f19fe4..7b6c9ec4922b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 79
+SUBLEVEL = 80
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index cf02e971467f..0bf5ec2d5818 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -33,6 +33,7 @@
#include <linux/proc_fs.h>
#include <linux/acpi.h>
#include <linux/slab.h>
+#include <linux/regulator/machine.h>
#ifdef CONFIG_X86
#include <asm/mpspec.h>
#endif
@@ -921,6 +922,14 @@ void __init acpi_early_init(void)
goto error0;
}
+ /*
+ * If the system is using ACPI then we can be reasonably
+ * confident that any regulators are managed by the firmware
+ * so tell the regulator core it has everything it needs to
+ * know.
+ */
+ regulator_has_full_constraints();
+
return;
error0:
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 2cbd369e0fcb..7b4cfc54ddb4 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -942,7 +942,10 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
if (track->cb_dirty) {
tmp = track->cb_target_mask;
for (i = 0; i < 8; i++) {
- if ((tmp >> (i * 4)) & 0xF) {
+ u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
+
+ if (format != V_028C70_COLOR_INVALID &&
+ (tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 8c403d946acc..49b622904217 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2313,14 +2313,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA;
+
+ if (rdev->family >= CHIP_RV770)
+ cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
if (rdev->wb.use_event) {
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
- PACKET3_VC_ACTION_ENA |
- PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
@@ -2334,9 +2337,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
} else {
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
- PACKET3_VC_ACTION_ENA |
- PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b8e12af304a9..3cd9b0e6f5b3 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -747,7 +747,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
if (track->cb_dirty) {
tmp = track->cb_target_mask;
for (i = 0; i < 8; i++) {
- if ((tmp >> (i * 4)) & 0xF) {
+ u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
+
+ if (format != V_0280A0_COLOR_INVALID &&
+ (tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 12ceb829a03e..02bb23821ce2 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -873,6 +873,7 @@
#define PACKET3_INDIRECT_BUFFER 0x32
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
# define PACKET3_TC_ACTION_ENA (1 << 23)
# define PACKET3_VC_ACTION_ENA (1 << 24)
# define PACKET3_CB_ACTION_ENA (1 << 25)
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index c54d295c6be3..6d0c32b9e8aa 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2785,6 +2785,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
/* tell the bios not to handle mode switching */
bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
+ /* clear the vbios dpms state */
+ if (ASIC_IS_DCE4(rdev))
+ bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
+
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 6076e85aa5da..19d68c55c7e6 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1020,6 +1020,9 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
/* Add the default buses */
void radeon_i2c_init(struct radeon_device *rdev)
{
+ if (radeon_hw_i2c)
+ DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
+
if (rdev->is_atom_bios)
radeon_atombios_i2c_init(rdev);
else
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index bf6ca2d8a28b..4c5a7934b3e3 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -587,8 +587,10 @@ void radeon_pm_resume(struct radeon_device *rdev)
rdev->pm.current_clock_mode_index = 0;
rdev->pm.current_sclk = rdev->pm.default_sclk;
rdev->pm.current_mclk = rdev->pm.default_mclk;
- rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
- rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+ if (rdev->pm.power_state) {
+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+ }
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 828609fa4d28..c0d93d474ccb 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -57,13 +57,20 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
struct qib_sge *sge;
struct ib_wc wc;
u32 length;
+ enum ib_qp_type sqptype, dqptype;
qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
if (!qp) {
ibp->n_pkt_drops++;
return;
}
- if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
+
+ sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : sqp->ibqp.qp_type;
+ dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : qp->ibqp.qp_type;
+
+ if (dqptype != sqptype ||
!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++;
goto drop;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index abc6ac855598..4e1c6bfc9c8d 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -913,7 +913,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
/* If range covers entire pagetable, free it */
if (!(start_pfn > level_pfn ||
- last_pfn < level_pfn + level_size(level))) {
+ last_pfn < level_pfn + level_size(level) - 1)) {
dma_clear_pte(pte);
domain_flush_cache(domain, pte, sizeof(*pte));
free_pgtable_page(level_pte);
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index 84d2b91e4efb..e0cc5d6a9e46 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -79,6 +79,11 @@ static const struct sysfs_ops dm_sysfs_ops = {
.show = dm_attr_show,
};
+static void dm_kobject_release(struct kobject *kobj)
+{
+ complete(dm_get_completion_from_kobject(kobj));
+}
+
/*
* dm kobject is embedded in mapped_device structure
* no need to define release function here
@@ -86,6 +91,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
.default_attrs = dm_attrs,
+ .release = dm_kobject_release,
};
/*
@@ -104,5 +110,7 @@ int dm_sysfs_init(struct mapped_device *md)
*/
void dm_sysfs_exit(struct mapped_device *md)
{
- kobject_put(dm_kobject(md));
+ struct kobject *kobj = dm_kobject(md);
+ kobject_put(kobj);
+ wait_for_completion(dm_get_completion_from_kobject(kobj));
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 32370ea32e22..d26fddf7c1fb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -194,6 +194,9 @@ struct mapped_device {
/* sysfs handle */
struct kobject kobj;
+ /* wait until the kobject is released */
+ struct completion kobj_completion;
+
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
};
@@ -1891,6 +1894,7 @@ static struct mapped_device *alloc_dev(int minor)
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
+ init_completion(&md->kobj_completion);
md->disk->major = _major;
md->disk->first_minor = minor;
@@ -2705,6 +2709,13 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
return md;
}
+struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
+{
+ struct mapped_device *md = container_of(kobj, struct mapped_device, kobj);
+
+ return &md->kobj_completion;
+}
+
int dm_suspended_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED, &md->flags);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index b7dacd59d8d7..1174e9654882 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
+#include <linux/completion.h>
/*
* Suspend feature flags
@@ -123,6 +124,7 @@ int dm_sysfs_init(struct mapped_device *md);
void dm_sysfs_exit(struct mapped_device *md);
struct kobject *dm_kobject(struct mapped_device *md);
struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
+struct completion *dm_get_completion_from_kobject(struct kobject *kobj);
/*
* Targets for linear and striped mappings
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index ff3beed6ad2d..79c02d90ce39 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -244,6 +244,10 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
return -EINVAL;
}
+ /*
+ * We need to set this before the dm_tm_new_block() call below.
+ */
+ ll->nr_blocks = nr_blocks;
for (i = old_blocks; i < blocks; i++) {
struct dm_block *b;
struct disk_index_entry idx;
@@ -251,6 +255,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
if (r < 0)
return r;
+
idx.blocknr = cpu_to_le64(dm_block_location(b));
r = dm_tm_unlock(ll->tm, b);
@@ -265,7 +270,6 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
return r;
}
- ll->nr_blocks = nr_blocks;
return 0;
}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 2a822d9e4684..e6f08d945709 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1022,11 +1022,22 @@ static void atmci_start_request(struct atmel_mci *host,
iflags |= ATMCI_CMDRDY;
cmd = mrq->cmd;
cmdflags = atmci_prepare_command(slot->mmc, cmd);
- atmci_send_command(host, cmd, cmdflags);
+
+ /*
+ * DMA transfer should be started before sending the command to avoid
+ * unexpected errors especially for read operations in SDIO mode.
+ * Unfortunately, in PDC mode, command has to be sent before starting
+ * the transfer.
+ */
+ if (host->submit_data != &atmci_submit_data_dma)
+ atmci_send_command(host, cmd, cmdflags);
if (data)
host->submit_data(host, data);
+ if (host->submit_data == &atmci_submit_data_dma)
+ atmci_send_command(host, cmd, cmdflags);
+
if (mrq->stop) {
host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 6f87c7464eca..a06231f25f06 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -596,7 +596,6 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
ecc_stat >>= 4;
} while (--no_subpages);
- mtd->ecc_stats.corrected += ret;
pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
return ret;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d08c0d8ec22e..a7f6dcea0d76 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1984,10 +1984,6 @@ void pci_enable_ari(struct pci_dev *dev)
if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
return;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
- if (!pos)
- return;
-
bridge = dev->bus->self;
if (!bridge || !pci_is_pcie(bridge))
return;
@@ -2006,10 +2002,14 @@ void pci_enable_ari(struct pci_dev *dev)
return;
pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
- ctrl |= PCI_EXP_DEVCTL2_ARI;
+ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
+ ctrl |= PCI_EXP_DEVCTL2_ARI;
+ bridge->ari_enabled = 1;
+ } else {
+ ctrl &= ~PCI_EXP_DEVCTL2_ARI;
+ bridge->ari_enabled = 0;
+ }
pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
-
- bridge->ari_enabled = 1;
}
/**
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 5f8844c1eaad..5f2eddbbff27 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -34,11 +34,11 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
-#include <linux/mod_devicetable.h>
#include <linux/log2.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/dmi.h>
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
@@ -377,6 +377,51 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
+/*
+ * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes.
+ */
+static bool alarm_disable_quirk;
+
+static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
+{
+ alarm_disable_quirk = true;
+ pr_info("rtc-cmos: BIOS has alarm-disable quirk. ");
+ pr_info("RTC alarms disabled\n");
+ return 0;
+}
+
+static const struct dmi_system_id rtc_quirks[] __initconst = {
+ /* https://bugzilla.novell.com/show_bug.cgi?id=805740 */
+ {
+ .callback = set_alarm_disable_quirk,
+ .ident = "IBM Truman",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "4852570"),
+ },
+ },
+ /* https://bugzilla.novell.com/show_bug.cgi?id=812592 */
+ {
+ .callback = set_alarm_disable_quirk,
+ .ident = "Gigabyte GA-990XA-UD3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"),
+ },
+ },
+ /* http://permalink.gmane.org/gmane.linux.kernel/1604474 */
+ {
+ .callback = set_alarm_disable_quirk,
+ .ident = "Toshiba Satellite L300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
+ },
+ },
+ {}
+};
+
static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -385,6 +430,9 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
if (!is_valid_irq(cmos->irq))
return -EINVAL;
+ if (alarm_disable_quirk)
+ return 0;
+
spin_lock_irqsave(&rtc_lock, flags);
if (enabled)
@@ -1166,6 +1214,8 @@ static int __init cmos_init(void)
platform_driver_registered = true;
}
+ dmi_check_system(rtc_quirks);
+
if (retval == 0)
return 0;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3d8f662e4fe9..b7a034a3259e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -572,7 +572,9 @@ static void spi_pump_messages(struct kthread_work *work)
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
- "failed to transfer one message from queue\n");
+ "failed to transfer one message from queue: %d\n", ret);
+ master->cur_msg->status = ret;
+ spi_finalize_current_message(master);
return;
}
}
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 1585db1aa365..a73bc268907f 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -103,7 +103,7 @@ int ore_verify_layout(unsigned total_comps, struct ore_layout *layout)
layout->max_io_length =
(BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - layout->stripe_unit) *
- layout->group_width;
+ (layout->group_width - layout->parity);
if (layout->parity) {
unsigned stripe_length =
(layout->group_width - layout->parity) *
@@ -286,7 +286,8 @@ int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
if (length) {
ore_calc_stripe_info(layout, offset, length, &ios->si);
ios->length = ios->si.length;
- ios->nr_pages = (ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
+ ios->nr_pages = ((ios->offset & (PAGE_SIZE - 1)) +
+ ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
if (layout->parity)
_ore_post_alloc_raid_stuff(ios);
}
@@ -536,6 +537,7 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
u64 H = LmodS - G * T;
u32 N = div_u64(H, U);
+ u32 Nlast;
/* "H - (N * U)" is just "H % U" so it's bound to u32 */
u32 C = (u32)(H - (N * U)) / stripe_unit + G * group_width;
@@ -568,6 +570,10 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
si->length = T - H;
if (si->length > length)
si->length = length;
+
+ Nlast = div_u64(H + si->length + U - 1, U);
+ si->maxdevUnits = Nlast - N;
+
si->M = M;
}
EXPORT_SYMBOL(ore_calc_stripe_info);
@@ -583,13 +589,16 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
int ret;
if (per_dev->bio == NULL) {
- unsigned pages_in_stripe = ios->layout->group_width *
- (ios->layout->stripe_unit / PAGE_SIZE);
- unsigned nr_pages = ios->nr_pages * ios->layout->group_width /
- (ios->layout->group_width -
- ios->layout->parity);
- unsigned bio_size = (nr_pages + pages_in_stripe) /
- ios->layout->group_width;
+ unsigned bio_size;
+
+ if (!ios->reading) {
+ bio_size = ios->si.maxdevUnits;
+ } else {
+ bio_size = (ios->si.maxdevUnits + 1) *
+ (ios->layout->group_width - ios->layout->parity) /
+ ios->layout->group_width;
+ }
+ bio_size *= (ios->layout->stripe_unit / PAGE_SIZE);
per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
if (unlikely(!per_dev->bio)) {
@@ -609,8 +618,12 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
added_len = bio_add_pc_page(q, per_dev->bio, pages[pg],
pglen, pgbase);
if (unlikely(pglen != added_len)) {
- ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=%u\n",
- per_dev->bio->bi_vcnt);
+ /* If bi_vcnt == bi_max then this is a SW BUG */
+ ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=0x%x "
+ "bi_max=0x%x BIO_MAX=0x%x cur_len=0x%x\n",
+ per_dev->bio->bi_vcnt,
+ per_dev->bio->bi_max_vecs,
+ BIO_MAX_PAGES_KMALLOC, cur_len);
ret = -ENOMEM;
goto out;
}
@@ -1099,7 +1112,7 @@ int ore_truncate(struct ore_layout *layout, struct ore_components *oc,
size_attr->attr = g_attr_logical_length;
size_attr->attr.val_ptr = &size_attr->newsize;
- ORE_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
+ ORE_DBGMSG2("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
_LLU(oc->comps->obj.id), _LLU(obj_size), i);
ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
&size_attr->attr);
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 2fa0089a02a8..46549c778001 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -33,25 +33,27 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
if (whence == SEEK_DATA || whence == SEEK_HOLE)
return -EINVAL;
+ mutex_lock(&i->i_mutex);
hpfs_lock(s);
/*printk("dir lseek\n");*/
if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
- mutex_lock(&i->i_mutex);
pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1;
while (pos != new_off) {
if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh);
else goto fail;
if (pos == 12) goto fail;
}
- mutex_unlock(&i->i_mutex);
+ hpfs_add_pos(i, &filp->f_pos);
ok:
+ filp->f_pos = new_off;
hpfs_unlock(s);
- return filp->f_pos = new_off;
-fail:
mutex_unlock(&i->i_mutex);
+ return new_off;
+fail:
/*printk("illegal lseek: %016llx\n", new_off);*/
hpfs_unlock(s);
+ mutex_unlock(&i->i_mutex);
return -ESPIPE;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a7ea637bf215..d5faa264ecc2 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6394,7 +6394,7 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
switch (err) {
case 0:
case -NFS4ERR_WRONGSEC:
- case -NFS4ERR_NOTSUPP:
+ case -ENOTSUPP:
goto out;
default:
err = nfs4_handle_exception(server, err, &exception);
@@ -6426,7 +6426,7 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
* Fall back on "guess and check" method if
* the server doesn't support SECINFO_NO_NAME
*/
- if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
+ if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
err = nfs4_find_root_sec(server, fhandle, info);
goto out_freepage;
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c8ac9a1461c2..8cf722f2e2eb 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -2955,7 +2955,8 @@ out_overflow:
return -EIO;
}
-static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
+static bool __decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected,
+ int *nfs_retval)
{
__be32 *p;
uint32_t opnum;
@@ -2965,19 +2966,32 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
if (unlikely(!p))
goto out_overflow;
opnum = be32_to_cpup(p++);
- if (opnum != expected) {
- dprintk("nfs: Server returned operation"
- " %d but we issued a request for %d\n",
- opnum, expected);
- return -EIO;
- }
+ if (unlikely(opnum != expected))
+ goto out_bad_operation;
nfserr = be32_to_cpup(p);
- if (nfserr != NFS_OK)
- return nfs4_stat_to_errno(nfserr);
- return 0;
+ if (nfserr == NFS_OK)
+ *nfs_retval = 0;
+ else
+ *nfs_retval = nfs4_stat_to_errno(nfserr);
+ return true;
+out_bad_operation:
+ dprintk("nfs: Server returned operation"
+ " %d but we issued a request for %d\n",
+ opnum, expected);
+ *nfs_retval = -EREMOTEIO;
+ return false;
out_overflow:
print_overflow_msg(__func__, xdr);
- return -EIO;
+ *nfs_retval = -EIO;
+ return false;
+}
+
+static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
+{
+ int retval;
+
+ __decode_op_hdr(xdr, expected, &retval);
+ return retval;
}
/* Dummy routine */
@@ -4680,11 +4694,12 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
uint32_t savewords, bmlen, i;
int status;
- status = decode_op_hdr(xdr, OP_OPEN);
- if (status != -EIO)
- nfs_increment_open_seqid(status, res->seqid);
- if (!status)
- status = decode_stateid(xdr, &res->stateid);
+ if (!__decode_op_hdr(xdr, OP_OPEN, &status))
+ return status;
+ nfs_increment_open_seqid(status, res->seqid);
+ if (status)
+ return status;
+ status = decode_stateid(xdr, &res->stateid);
if (unlikely(status))
return status;
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 4f334d51b38b..acc4ff3702cf 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -487,7 +487,7 @@ static inline void audit_syscall_exit(void *pt_regs)
{
if (unlikely(current->audit_context)) {
int success = is_syscall_success(pt_regs);
- int return_code = regs_return_value(pt_regs);
+ long return_code = regs_return_value(pt_regs);
__audit_syscall_exit(success, return_code);
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e132a2d24740..1c2470de8052 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1237,6 +1237,7 @@ struct sched_entity {
struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
+ unsigned long watchdog_stamp;
unsigned int time_slice;
int nr_cpus_allowed;
diff --git a/include/scsi/osd_ore.h b/include/scsi/osd_ore.h
index a5f9b960dfc8..6ca3265a4dca 100644
--- a/include/scsi/osd_ore.h
+++ b/include/scsi/osd_ore.h
@@ -102,6 +102,7 @@ struct ore_striping_info {
unsigned unit_off;
unsigned cur_pg;
unsigned cur_comp;
+ unsigned maxdevUnits;
};
struct ore_io_state;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index eff0b1e96331..32f0cb8f1fe8 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -988,7 +988,8 @@ static void timekeeping_adjust(s64 offset)
*
* Returns the unconsumed cycles.
*/
-static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
+static cycle_t logarithmic_accumulation(cycle_t offset, int shift,
+ unsigned int *clock_set)
{
u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
u64 raw_nsecs;
@@ -1010,7 +1011,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
timekeeper.xtime.tv_sec += leap;
timekeeper.wall_to_monotonic.tv_sec -= leap;
if (leap)
- clock_was_set_delayed();
+ *clock_set = 1;
}
/* Accumulate raw time */
@@ -1042,6 +1043,7 @@ static void update_wall_time(void)
struct clocksource *clock;
cycle_t offset;
int shift = 0, maxshift;
+ unsigned int clock_set = 0;
unsigned long flags;
write_seqlock_irqsave(&timekeeper.lock, flags);
@@ -1077,7 +1079,7 @@ static void update_wall_time(void)
maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
shift = min(shift, maxshift);
while (offset >= timekeeper.cycle_interval) {
- offset = logarithmic_accumulation(offset, shift);
+ offset = logarithmic_accumulation(offset, shift, &clock_set);
if(offset < timekeeper.cycle_interval<<shift)
shift--;
}
@@ -1131,7 +1133,7 @@ static void update_wall_time(void)
timekeeper.xtime.tv_sec += leap;
timekeeper.wall_to_monotonic.tv_sec -= leap;
if (leap)
- clock_was_set_delayed();
+ clock_set = 1;
}
timekeeping_update(false);
@@ -1139,6 +1141,8 @@ static void update_wall_time(void)
out:
write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ if (clock_set)
+ clock_was_set_delayed();
}
/**
diff --git a/mm/slub.c b/mm/slub.c
index 71de9b5685fa..c6f225fa9a87 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4520,7 +4520,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
page = c->partial;
if (page) {
- x = page->pobjects;
+ node = page_to_nid(page);
+ if (flags & SO_TOTAL)
+ WARN_ON_ONCE(1);
+ else if (flags & SO_OBJECTS)
+ WARN_ON_ONCE(1);
+ else
+ x = page->pages;
total += x;
nodes[node] += x;
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index f21486a2ac48..0658fb926983 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1331,9 +1331,13 @@ call_refreshresult(struct rpc_task *task)
task->tk_action = call_refresh;
switch (status) {
case 0:
- if (rpcauth_uptodatecred(task))
+ if (rpcauth_uptodatecred(task)) {
task->tk_action = call_allocate;
- return;
+ return;
+ }
+ /* Use rate-limiting and a max number of retries if refresh
+ * had status 0 but failed to update the cred.
+ */
case -ETIMEDOUT:
rpc_delay(task, 3*HZ);
case -EAGAIN:
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index a7f61d52f05c..1249e17b61ff 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -1914,7 +1914,19 @@ static int filename_trans_read(struct policydb *p, void *fp)
if (rc)
goto out;
- hashtab_insert(p->filename_trans, ft, otype);
+ rc = hashtab_insert(p->filename_trans, ft, otype);
+ if (rc) {
+ /*
+ * Do not return -EEXIST to the caller, or the system
+ * will not boot.
+ */
+ if (rc != -EEXIST)
+ goto out;
+ /* But free memory to avoid memory leak. */
+ kfree(ft);
+ kfree(name);
+ kfree(otype);
+ }
}
hash_eval(p->filename_trans, "filenametr");
return 0;
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index ab2f682fd44c..00772661894c 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -34,6 +34,7 @@
#include <string.h>
#include <ctype.h>
#include <sched.h>
+#include <cpuid.h>
#define MSR_TSC 0x10
#define MSR_NEHALEM_PLATFORM_INFO 0xCE
@@ -932,7 +933,7 @@ void check_cpuid()
eax = ebx = ecx = edx = 0;
- asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
+ __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
genuine_intel = 1;
@@ -941,7 +942,7 @@ void check_cpuid()
fprintf(stderr, "%.4s%.4s%.4s ",
(char *)&ebx, (char *)&edx, (char *)&ecx);
- asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
+ __get_cpuid(1, &fms, &ebx, &ecx, &edx);
family = (fms >> 8) & 0xf;
model = (fms >> 4) & 0xf;
stepping = fms & 0xf;
@@ -963,7 +964,7 @@ void check_cpuid()
* This check is valid for both Intel and AMD.
*/
ebx = ecx = edx = 0;
- asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
+ __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
if (max_level < 0x80000007) {
fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
@@ -974,7 +975,7 @@ void check_cpuid()
* Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
* this check is valid for both Intel and AMD
*/
- asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
+ __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
has_invariant_tsc = edx & (1 << 8);
if (!has_invariant_tsc) {
@@ -987,7 +988,7 @@ void check_cpuid()
* this check is valid for both Intel and AMD
*/
- asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
+ __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
has_aperf = ecx & (1 << 0);
if (!has_aperf) {
fprintf(stderr, "No APERF MSR\n");

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,633 @@
diff --git a/Makefile b/Makefile
index 5e1e1d6e0736..ee80efa38844 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 81
+SUBLEVEL = 82
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 99348c0eaa41..78be2459c9a1 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -61,7 +61,7 @@ ENTRY(startup_continue)
.quad 0 # cr12: tracing off
.quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off
- .quad 0 # cr15: linkage stack operations
+ .quad .Llinkage_stack # cr15: linkage stack operations
.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
@@ -69,12 +69,15 @@ ENTRY(startup_continue)
.Lparmaddr:
.quad PARMAREA
.align 64
-.Lduct: .long 0,0,0,0,.Lduald,0,0,0
+.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
.long 0,0,0,0,0,0,0,0
+.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
.align 128
.Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
+.Llinkage_stack:
+ .long 0,0,0x89000000,0,0,0,0x8a000000,0
ENTRY(_ehead)
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index a90d45e9dfb0..27c50f4d90cb 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -12,6 +12,8 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/init.h>
+#include <asm/setup.h>
+#include <asm/ipl.h>
#define ESSA_SET_STABLE 1
#define ESSA_SET_UNUSED 2
@@ -41,6 +43,14 @@ void __init cmma_init(void)
if (!cmma_flag)
return;
+ /*
+ * Disable CMM for dump, otherwise the tprot based memory
+ * detection can fail because of unstable pages.
+ */
+ if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
+ cmma_flag = 0;
+ return;
+ }
asm volatile(
" .insn rrf,0xb9ab0000,%1,%1,0,0\n"
"0: la %0,0\n"
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2b461b496a78..36751e211bb8 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -101,6 +101,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
atomic_inc(&bb.done);
submit_bio(type, bio);
+
+ /*
+ * We can loop for a long time in here, if someone does
+ * full device discards (like mkfs). Be nice and allow
+ * us to schedule out to avoid softlocking if preempt
+ * is disabled.
+ */
+ cond_resched();
}
/* Wait for bios in-flight */
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 4e86393a09cf..a81cdd7b9d83 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1303,13 +1303,16 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateConnected:
blkfront_connect(info);
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's Closing state -- fallthrough */
case XenbusStateClosing:
blkfront_closing(info);
break;
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 54a3a6d09819..59596692df3e 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev)
struct raw_device_data *rawdev;
struct block_device *bdev;
- if (number <= 0 || number >= MAX_RAW_MINORS)
+ if (number <= 0 || number >= max_raw_minors)
return -EINVAL;
rawdev = &raw_devices[number];
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 060b96064469..d25205309d45 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2279,6 +2279,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* ensure previous Tx parameters are not still forced */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
if (qib_compat_ddr_negotiate) {
ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3aed841ce84b..17b918d3d6b3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4678,23 +4678,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return sectors * (raid_disks - conf->max_degraded);
}
+static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ safe_put_page(percpu->spare_page);
+ kfree(percpu->scribble);
+ percpu->spare_page = NULL;
+ percpu->scribble = NULL;
+}
+
+static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ if (conf->level == 6 && !percpu->spare_page)
+ percpu->spare_page = alloc_page(GFP_KERNEL);
+ if (!percpu->scribble)
+ percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+
+ if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
+ free_scratch_buffer(conf, percpu);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void raid5_free_percpu(struct r5conf *conf)
{
- struct raid5_percpu *percpu;
unsigned long cpu;
if (!conf->percpu)
return;
- get_online_cpus();
- for_each_possible_cpu(cpu) {
- percpu = per_cpu_ptr(conf->percpu, cpu);
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- }
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&conf->cpu_notify);
#endif
+
+ get_online_cpus();
+ for_each_possible_cpu(cpu)
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
put_online_cpus();
free_percpu(conf->percpu);
@@ -4720,15 +4740,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (conf->level == 6 && !percpu->spare_page)
- percpu->spare_page = alloc_page(GFP_KERNEL);
- if (!percpu->scribble)
- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
-
- if (!percpu->scribble ||
- (conf->level == 6 && !percpu->spare_page)) {
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
+ if (alloc_scratch_buffer(conf, percpu)) {
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
return notifier_from_errno(-ENOMEM);
@@ -4736,10 +4748,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- percpu->spare_page = NULL;
- percpu->scribble = NULL;
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
break;
default:
break;
@@ -4751,40 +4760,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
static int raid5_alloc_percpu(struct r5conf *conf)
{
unsigned long cpu;
- struct page *spare_page;
- struct raid5_percpu __percpu *allcpus;
- void *scribble;
- int err;
+ int err = 0;
- allcpus = alloc_percpu(struct raid5_percpu);
- if (!allcpus)
+ conf->percpu = alloc_percpu(struct raid5_percpu);
+ if (!conf->percpu)
return -ENOMEM;
- conf->percpu = allcpus;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ conf->cpu_notify.notifier_call = raid456_cpu_notify;
+ conf->cpu_notify.priority = 0;
+ err = register_cpu_notifier(&conf->cpu_notify);
+ if (err)
+ return err;
+#endif
get_online_cpus();
- err = 0;
for_each_present_cpu(cpu) {
- if (conf->level == 6) {
- spare_page = alloc_page(GFP_KERNEL);
- if (!spare_page) {
- err = -ENOMEM;
- break;
- }
- per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
- }
- scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
- if (!scribble) {
- err = -ENOMEM;
+ err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ if (err) {
+ pr_err("%s: failed memory allocation for cpu%ld\n",
+ __func__, cpu);
break;
}
- per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
}
-#ifdef CONFIG_HOTPLUG_CPU
- conf->cpu_notify.notifier_call = raid456_cpu_notify;
- conf->cpu_notify.priority = 0;
- if (err == 0)
- err = register_cpu_notifier(&conf->cpu_notify);
-#endif
put_online_cpus();
return err;
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index c284143cfcd7..ec8b948eea4d 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (chip->pdata->battery_online)
+ if (chip->pdata && chip->pdata->battery_online)
chip->online = chip->pdata->battery_online();
else
chip->online = 1;
@@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
+ if (!chip->pdata || !chip->pdata->charger_online
+ || !chip->pdata->charger_enable) {
chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
return;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b7a034a3259e..3d8f662e4fe9 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -572,9 +572,7 @@ static void spi_pump_messages(struct kthread_work *work)
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
- "failed to transfer one message from queue: %d\n", ret);
- master->cur_msg->status = ret;
- spi_finalize_current_message(master);
+ "failed to transfer one message from queue\n");
return;
}
}
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index a8458669350f..0cad46751e00 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -873,7 +873,8 @@ static int __devinit ad799x_probe(struct i2c_client *client,
return 0;
error_free_irq:
- free_irq(client->irq, indio_dev);
+ if (client->irq > 0)
+ free_irq(client->irq, indio_dev);
error_cleanup_ring:
ad799x_ring_cleanup(indio_dev);
error_disable_reg:
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 4a418e44d562..acc0eab58468 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1091,6 +1091,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
{
unsigned int addr = 0;
unsigned int modem = 0;
+ unsigned int brk = 0;
struct gsm_dlci *dlci;
int len = clen;
u8 *dp = data;
@@ -1117,6 +1118,16 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
if (len == 0)
return;
}
+ len--;
+ if (len > 0) {
+ while (gsm_read_ea(&brk, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ modem <<= 7;
+ modem |= (brk & 0x7f);
+ }
tty = tty_port_tty_get(&dlci->port);
gsm_process_modem(tty, dlci, modem, clen);
if (tty) {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 904e8341b2c9..84bd4593455e 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -165,6 +165,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -204,6 +205,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a7019d1e3058..1e2d369df86e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -50,6 +50,7 @@
#define TI_XDS100V2_PID 0xa6d0
#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
+#define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */
/* US Interface Navigator (http://www.usinterface.com/) */
#define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */
@@ -363,6 +364,12 @@
/* Sprog II (Andrew Crosland's SprogII DCC interface) */
#define FTDI_SPROG_II 0xF0C8
+/*
+ * Two of the Tagsys RFID Readers
+ */
+#define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/
+#define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/
+
/* an infrared receiver for user access control with IR tags */
#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9d7e865a518f..abfb45b3940a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1376,7 +1376,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 685edc872654..144386bab90b 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -19,7 +19,9 @@ config USB_STORAGE
This option depends on 'SCSI' support being enabled, but you
probably also need 'SCSI device support: SCSI disk support'
- (BLK_DEV_SD) for most USB storage devices.
+ (BLK_DEV_SD) for most USB storage devices. Some devices also
+ will require 'Probe all LUNs on each SCSI device'
+ (SCSI_MULTI_LUN).
To compile this driver as a module, choose M here: the
module will be called usb-storage.
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 11418da9bc09..3a7fd6f6af4f 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -78,6 +78,8 @@ static const char* host_info(struct Scsi_Host *host)
static int slave_alloc (struct scsi_device *sdev)
{
+ struct us_data *us = host_to_us(sdev->host);
+
/*
* Set the INQUIRY transfer length to 36. We don't use any of
* the extra data and many devices choke if asked for more or
@@ -102,6 +104,10 @@ static int slave_alloc (struct scsi_device *sdev)
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+ /* Tell the SCSI layer if we know there is more than one LUN */
+ if (us->protocol == USB_PR_BULK && us->max_lun > 0)
+ sdev->sdev_bflags |= BLIST_FORCELUN;
+
return 0;
}
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index 65a6a75066a8..82e8ed0324e3 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
"Cypress ISD-300LP",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
"Super Top",
"USB 2.0 SATA BRIDGE",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 08d69e5fc143..bbe9adb0eb66 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1441,6 +1441,13 @@ UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */
+UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201,
+ "Research In Motion",
+ "BlackBerry Bold 9000",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
/* Reported by Michael Stattmann <michael@stattmann.com> */
UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
"Sony Ericsson",
diff --git a/fs/file.c b/fs/file.c
index ba3f6053025c..d512ca5ea28f 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -47,7 +47,7 @@ static void *alloc_fdmem(size_t size)
* vmalloc() if the allocation size will be considered "large" by the VM.
*/
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
- void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
+ void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY);
if (data != NULL)
return data;
}
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index aff1c616aedf..43ccfd6150d9 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -769,6 +769,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
struct nlm_file *file = block->b_file;
struct nlm_lock *lock = &block->b_call->a_args.lock;
int error;
+ loff_t fl_start, fl_end;
dprintk("lockd: grant blocked lock %p\n", block);
@@ -786,9 +787,16 @@ nlmsvc_grant_blocked(struct nlm_block *block)
}
/* Try the lock operation again */
+ /* vfs_lock_file() can mangle fl_start and fl_end, but we need
+ * them unchanged for the GRANT_MSG
+ */
lock->fl.fl_flags |= FL_SLEEP;
+ fl_start = lock->fl.fl_start;
+ fl_end = lock->fl.fl_end;
error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
lock->fl.fl_flags &= ~FL_SLEEP;
+ lock->fl.fl_start = fl_start;
+ lock->fl.fl_end = fl_end;
switch (error) {
case 0:
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 192a302d6cfd..8ab8e9390297 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -274,6 +274,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
{
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
+EXPORT_SYMBOL(irq_to_desc);
static void free_desc(unsigned int irq)
{
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index a470154e0408..955560e25e05 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -51,7 +51,13 @@
* HZ shrinks, so values greater than 8 overflow 32bits when
* HZ=100.
*/
+#if HZ < 34
+#define JIFFIES_SHIFT 6
+#elif HZ < 67
+#define JIFFIES_SHIFT 7
+#else
#define JIFFIES_SHIFT 8
+#endif
static cycle_t jiffies_read(struct clocksource *cs)
{
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 28667834181e..bd0f1c499e0a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2008,6 +2008,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
write &= RB_WRITE_MASK;
tail = write - length;
+ /*
+ * If this is the first commit on the page, then it has the same
+ * timestamp as the page itself.
+ */
+ if (!tail)
+ delta = 0;
+
/* See if we shot pass the end of this buffer page */
if (unlikely(write > BUF_PAGE_SIZE))
return rb_move_tail(cpu_buffer, length, tail,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index eace7664c805..e4b7188a0572 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -907,7 +907,7 @@ static int ieee80211_fragment(struct ieee80211_tx_data *tx,
}
/* adjust first fragment's length */
- skb->len = hdrlen + per_fragm;
+ skb_trim(skb, hdrlen + per_fragm);
return 0;
}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 44ddaa542db6..ea243fef9f02 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -186,8 +186,8 @@ static void do_usb_entry(struct usb_device_id *id,
range_lo < 0x9 ? "[%X-9" : "[%X",
range_lo);
sprintf(alias + strlen(alias),
- range_hi > 0xA ? "a-%X]" : "%X]",
- range_lo);
+ range_hi > 0xA ? "A-%X]" : "%X]",
+ range_hi);
}
}
if (bcdDevice_initial_digits < (sizeof(id->bcdDevice_lo) * 2 - 1))
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 88b2fe3ddf42..00d86427af0f 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -154,17 +154,13 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
list_add_tail(&dev->list, &kvm->coalesced_zones);
mutex_unlock(&kvm->slots_lock);
- return ret;
+ return 0;
out_free_dev:
mutex_unlock(&kvm->slots_lock);
-
kfree(dev);
- if (dev == NULL)
- return -ENXIO;
-
- return 0;
+ return ret;
}
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,946 @@
diff --git a/Makefile b/Makefile
index e677b662f8c5..f75a853f8524 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 83
+SUBLEVEL = 84
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
index f33679d2d3ee..50e1d850ee2e 100644
--- a/arch/arm/mach-sa1100/include/mach/collie.h
+++ b/arch/arm/mach-sa1100/include/mach/collie.h
@@ -13,6 +13,8 @@
#ifndef __ASM_ARCH_COLLIE_H
#define __ASM_ARCH_COLLIE_H
+#include "hardware.h" /* Gives GPIO_MAX */
+
extern void locomolcd_power(int on);
#define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index b47a0e1ab001..c712ecec13ba 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -81,6 +81,7 @@ _GLOBAL(relocate)
6: blr
+.balign 8
p_dyn: .llong __dynamic_start - 0b
p_rela: .llong __rela_dyn_start - 0b
p_st: .llong _stext - 0b
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 03920a15a632..28a3e62fcc50 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -525,7 +525,7 @@ static void __init quirk_amd_nb_node(struct pci_dev *dev)
return;
pci_read_config_dword(nb_ht, 0x60, &val);
- node = val & 7;
+ node = pcibus_to_node(dev->bus) | (val & 7);
/*
* Some hardware may return an invalid node ID,
* so check it first:
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e334389e1c75..b567285efceb 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3007,10 +3007,8 @@ static int cr8_write_interception(struct vcpu_svm *svm)
u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */
r = cr_interception(svm);
- if (irqchip_in_kernel(svm->vcpu.kvm)) {
- clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+ if (irqchip_in_kernel(svm->vcpu.kvm))
return r;
- }
if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
return r;
kvm_run->exit_reason = KVM_EXIT_SET_TPR;
@@ -3566,6 +3564,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
+ clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+
if (irr == -1)
return;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 400b8c6eb8eb..f2f37171e21a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4106,6 +4106,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 04ebeaf8cffe..1026609dc32e 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -878,7 +878,7 @@ static int lookup_existing_device(struct device *dev, void *data)
old->config_rom_retries = 0;
fw_notice(card, "rediscovered device %s\n", dev_name(dev));
- PREPARE_DELAYED_WORK(&old->work, fw_device_update);
+ old->workfn = fw_device_update;
fw_schedule_device_work(old, 0);
if (current_node == card->root_node)
@@ -1040,7 +1040,7 @@ static void fw_device_init(struct work_struct *work)
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
} else {
fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
@@ -1172,13 +1172,20 @@ static void fw_device_refresh(struct work_struct *work)
dev_name(&device->device));
gone:
atomic_set(&device->state, FW_DEVICE_GONE);
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
out:
if (node_id == card->root_node->node_id)
fw_schedule_bm_work(card, 0);
}
+static void fw_device_workfn(struct work_struct *work)
+{
+ struct fw_device *device = container_of(to_delayed_work(work),
+ struct fw_device, work);
+ device->workfn(work);
+}
+
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
{
struct fw_device *device;
@@ -1228,7 +1235,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* power-up after getting plugged in. We schedule the
* first config rom scan half a second after bus reset.
*/
- INIT_DELAYED_WORK(&device->work, fw_device_init);
+ device->workfn = fw_device_init;
+ INIT_DELAYED_WORK(&device->work, fw_device_workfn);
fw_schedule_device_work(device, INITIAL_DELAY);
break;
@@ -1244,7 +1252,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
if (atomic_cmpxchg(&device->state,
FW_DEVICE_RUNNING,
FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
+ device->workfn = fw_device_refresh;
fw_schedule_device_work(device,
device->is_local ? 0 : INITIAL_DELAY);
}
@@ -1259,7 +1267,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
smp_wmb(); /* update node_id before generation */
device->generation = card->generation;
if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_update);
+ device->workfn = fw_device_update;
fw_schedule_device_work(device, 0);
}
break;
@@ -1284,7 +1292,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
device = node->data;
if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device,
list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
}
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 638e1f71284a..7cff7f7e69e1 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1014,8 +1014,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
if (rcode == RCODE_COMPLETE) {
fwnet_transmit_packet_done(ptask);
} else {
- fwnet_transmit_packet_failed(ptask);
-
if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
dev_err(&ptask->dev->netdev->dev,
"fwnet_write_complete failed: %x (skipped %d)\n",
@@ -1023,8 +1021,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
errors_skipped = 0;
last_rcode = rcode;
- } else
+ } else {
errors_skipped++;
+ }
+ fwnet_transmit_packet_failed(ptask);
}
}
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index b7e65d7eab64..23a928352498 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -146,6 +146,7 @@ struct sbp2_logical_unit {
*/
int generation;
int retries;
+ work_func_t workfn;
struct delayed_work work;
bool has_sdev;
bool blocked;
@@ -865,7 +866,7 @@ static void sbp2_login(struct work_struct *work)
/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
sbp2_set_busy_timeout(lu);
- PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
+ lu->workfn = sbp2_reconnect;
sbp2_agent_reset(lu);
/* This was a re-login. */
@@ -919,7 +920,7 @@ static void sbp2_login(struct work_struct *work)
* If a bus reset happened, sbp2_update will have requeued
* lu->work already. Reset the work from reconnect to login.
*/
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
}
static void sbp2_reconnect(struct work_struct *work)
@@ -953,7 +954,7 @@ static void sbp2_reconnect(struct work_struct *work)
lu->retries++ >= 5) {
dev_err(tgt_dev(tgt), "failed to reconnect\n");
lu->retries = 0;
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
}
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
@@ -973,6 +974,13 @@ static void sbp2_reconnect(struct work_struct *work)
sbp2_conditionally_unblock(lu);
}
+static void sbp2_lu_workfn(struct work_struct *work)
+{
+ struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
+ struct sbp2_logical_unit, work);
+ lu->workfn(work);
+}
+
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
{
struct sbp2_logical_unit *lu;
@@ -999,7 +1007,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
lu->blocked = false;
++tgt->dont_block;
INIT_LIST_HEAD(&lu->orb_list);
- INIT_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
+ INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
list_add_tail(&lu->link, &tgt->lu_list);
return 0;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 6f4627fe24a1..072229dca464 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1048,7 +1048,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
if (is_dp)
args.v5.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v5.ucLaneNum = 8;
else
args.v5.ucLaneNum = 4;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8b73ae84efe7..ceb70dde21ab 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -430,9 +430,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
moved:
if (bo->evicted) {
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
- if (ret)
- pr_err("Can not flush read caches\n");
+ if (bdev->driver->invalidate_caches) {
+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+ if (ret)
+ pr_err("Can not flush read caches\n");
+ }
bo->evicted = false;
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index eb4014a25d3c..75e9233aabaf 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -805,7 +805,7 @@ static int flexcan_open(struct net_device *dev)
err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
if (err)
- goto out_close;
+ goto out_free_irq;
/* start chip and queuing */
err = flexcan_chip_start(dev);
@@ -816,6 +816,8 @@ static int flexcan_open(struct net_device *dev)
return 0;
+ out_free_irq:
+ free_irq(dev->irq, dev);
out_close:
close_candev(dev);
out:
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 558974f466de..6f4604958d63 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -5844,8 +5844,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
work_mask |= opaque_key;
- if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
- (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
+ if (desc->err_vlan & RXD_ERR_MASK) {
drop_it:
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 93865f899a4f..780604de73f4 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2484,7 +2484,11 @@ struct tg3_rx_buffer_desc {
#define RXD_ERR_TOO_SMALL 0x00400000
#define RXD_ERR_NO_RESOURCES 0x00800000
#define RXD_ERR_HUGE_FRAME 0x01000000
-#define RXD_ERR_MASK 0xffff0000
+
+#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \
+ RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \
+ RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \
+ RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
u32 reserved;
u32 opaque;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bc177b996f4c..efb50d6dfb6c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1084,7 +1084,8 @@ static int virtnet_probe(struct virtio_device *vdev)
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
vi->big_packets = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f04ba0a5454..d43df93dc83a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1729,11 +1729,20 @@ vmxnet3_netpoll(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
- vmxnet3_disable_all_intrs(adapter);
-
- vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
- vmxnet3_enable_all_intrs(adapter);
+ switch (adapter->intr.type) {
+#ifdef CONFIG_PCI_MSI
+ case VMXNET3_IT_MSIX: {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
+ break;
+ }
+#endif
+ case VMXNET3_IT_MSI:
+ default:
+ vmxnet3_intr(0, adapter->netdev);
+ break;
+ }
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index b6ba1e8149be..15960024d4a0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -55,7 +55,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
@@ -94,7 +94,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
};
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index a5e182b5e944..6f882bcf96f1 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -340,8 +340,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
ht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_ht_cap +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_ht_cap,
le16_to_cpu(ht_cap->header.len));
mwifiex_fill_cap_info(priv, radio_type, ht_cap);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index adbad69d1069..15bd1a42f236 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -310,9 +310,8 @@ static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
}
#define for_each_isci_host(id, ihost, pdev) \
- for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
- id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
- ihost = to_pci_info(pdev)->hosts[++id])
+ for (id = 0; id < SCI_MAX_CONTROLLERS && \
+ (ihost = to_pci_info(pdev)->hosts[id]); id++)
static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
{
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 6d1e9544cbe5..b81f34ddc025 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -619,13 +619,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
} else {
/* the phy is already the part of the port */
- u32 port_state = iport->sm.current_state_id;
-
- /* if the PORT'S state is resetting then the link up is from
- * port hard reset in this case, we need to tell the port
- * that link up is recieved
- */
- BUG_ON(port_state != SCI_PORT_RESETTING);
port_agent->phy_ready_mask |= 1 << phy_index;
sci_port_link_up(iport, iphy);
}
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 374254ede9d4..2a81e1793e5d 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -1312,7 +1312,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
/* XXX: need to cleanup any ireqs targeting this
* domain_device
*/
- ret = TMF_RESP_FUNC_COMPLETE;
+ ret = -ENODEV;
goto out;
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a2443031dbe7..09bedb7f47e9 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2600,8 +2600,7 @@ struct qla_hw_data {
IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
IS_QLA82XX(ha) || IS_QLA83XX(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
-#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
- IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
+#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index f9986ccbd80c..446c02379c80 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1131,6 +1131,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)
{
struct stor_mem_pools *memp = sdevice->hostdata;
+ if (!memp)
+ return;
+
mempool_destroy(memp->request_mempool);
kmem_cache_destroy(memp->request_pool);
kfree(memp);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 86eff48dab78..503a6bdc1d17 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -995,6 +995,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
bytes = min(bytes, working_bytes);
kaddr = kmap_atomic(page_out);
memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
+ if (*pg_index == (vcnt - 1) && *pg_offset == 0)
+ memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
kunmap_atomic(kaddr);
flush_dcache_page(page_out);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 89af1d269274..16d16e56cc39 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -540,16 +540,19 @@ int nfs_async_inode_return_delegation(struct inode *inode,
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
+ if (delegation == NULL)
+ goto out_enoent;
- if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) {
- rcu_read_unlock();
- return -ENOENT;
- }
+ if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
+ goto out_enoent;
nfs_mark_return_delegation(server, delegation);
rcu_read_unlock();
nfs_delegation_run_state_manager(clp);
return 0;
+out_enoent:
+ rcu_read_unlock();
+ return -ENOENT;
}
static struct inode *
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 7602783d7f41..8021098ff507 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2389,8 +2389,8 @@ out_dio:
if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
((file->f_flags & O_DIRECT) && !direct_io)) {
- ret = filemap_fdatawrite_range(file->f_mapping, pos,
- pos + count - 1);
+ ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
+ *ppos + count - 1);
if (ret < 0)
written = ret;
@@ -2403,8 +2403,8 @@ out_dio:
}
if (!ret)
- ret = filemap_fdatawait_range(file->f_mapping, pos,
- pos + count - 1);
+ ret = filemap_fdatawait_range(file->f_mapping, *ppos,
+ *ppos + count - 1);
}
/*
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 92fcd575775a..a40e5cedd6d4 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -712,6 +712,12 @@ static int ocfs2_release_dquot(struct dquot *dquot)
*/
if (status < 0)
mlog_errno(status);
+ /*
+ * Clear dq_off so that we search for the structure in quota file next
+ * time we acquire it. The structure might be deleted and reallocated
+ * elsewhere by another node while our dquot structure is on freelist.
+ */
+ dquot->dq_off = 0;
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_trans:
ocfs2_commit_trans(osb, handle);
@@ -750,16 +756,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
status = ocfs2_lock_global_qf(info, 1);
if (status < 0)
goto out;
- if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
- status = ocfs2_qinfo_lock(info, 0);
- if (status < 0)
- goto out_dq;
- status = qtree_read_dquot(&info->dqi_gi, dquot);
- ocfs2_qinfo_unlock(info, 0);
- if (status < 0)
- goto out_dq;
- }
- set_bit(DQ_READ_B, &dquot->dq_flags);
+ status = ocfs2_qinfo_lock(info, 0);
+ if (status < 0)
+ goto out_dq;
+ /*
+ * We always want to read dquot structure from disk because we don't
+ * know what happened with it while it was on freelist.
+ */
+ status = qtree_read_dquot(&info->dqi_gi, dquot);
+ ocfs2_qinfo_unlock(info, 0);
+ if (status < 0)
+ goto out_dq;
OCFS2_DQUOT(dquot)->dq_use_count++;
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index f100bf70a906..b6cfcf263d7f 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -1300,10 +1300,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
out:
- /* Clear the read bit so that next time someone uses this
- * dquot he reads fresh info from disk and allocates local
- * dquot structure */
- clear_bit(DQ_READ_B, &dquot->dq_flags);
return status;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9fc77b412ac4..754ac4ddd826 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2092,6 +2092,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
if (rc)
goto out_mmput;
+ rc = -ENOENT;
down_read(&mm->mmap_sem);
vma = find_exact_vma(mm, vm_start, vm_end);
if (vma && vma->vm_file) {
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 4d259fc7722c..66e013b7c9f2 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -186,6 +186,7 @@ struct fw_device {
unsigned irmc:1;
unsigned bc_implemented:2;
+ work_func_t workfn;
struct delayed_work work;
struct fw_attribute_group attribute_group;
};
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 265e2c3cbd1c..f5df3dcfe811 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -106,13 +106,13 @@ static inline u64 get_jiffies_64(void)
#define time_after(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
- ((long)(b) - (long)(a) < 0))
+ ((long)((b) - (a)) < 0))
#define time_before(a,b) time_after(b,a)
#define time_after_eq(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
- ((long)(a) - (long)(b) >= 0))
+ ((long)((a) - (b)) >= 0))
#define time_before_eq(a,b) time_after_eq(b,a)
/*
@@ -135,13 +135,13 @@ static inline u64 get_jiffies_64(void)
#define time_after64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
- ((__s64)(b) - (__s64)(a) < 0))
+ ((__s64)((b) - (a)) < 0))
#define time_before64(a,b) time_after64(b,a)
#define time_after_eq64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
- ((__s64)(a) - (__s64)(b) >= 0))
+ ((__s64)((a) - (b)) >= 0))
#define time_before_eq64(a,b) time_after_eq64(b,a)
/*
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index bd96ecd0e05c..c49c363f5bfd 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -60,6 +60,12 @@ struct tp_module {
unsigned int num_tracepoints;
struct tracepoint * const *tracepoints_ptrs;
};
+bool trace_module_has_bad_taint(struct module *mod);
+#else
+static inline bool trace_module_has_bad_taint(struct module *mod)
+{
+ return false;
+}
#endif /* CONFIG_MODULES */
struct tracepoint_iter {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index e372d9495e42..4b843ace285c 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2338,9 +2338,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
task_lock(current);
cs = nearest_hardwall_ancestor(task_cs(current));
+ allowed = node_isset(node, cs->mems_allowed);
task_unlock(current);
- allowed = node_isset(node, cs->mems_allowed);
mutex_unlock(&callback_mutex);
return allowed;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 86a500d7ea59..3d1bbbcc2923 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -777,8 +777,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
static void wake_threads_waitq(struct irq_desc *desc)
{
- if (atomic_dec_and_test(&desc->threads_active) &&
- waitqueue_active(&desc->wait_for_threads))
+ if (atomic_dec_and_test(&desc->threads_active))
wake_up(&desc->wait_for_threads);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index dd33c9fd8009..d21498e3c503 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5270,15 +5270,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
/*
- * Ensure the task's vruntime is normalized, so that when its
+ * Ensure the task's vruntime is normalized, so that when it's
* switched back to the fair class the enqueue_entity(.flags=0) will
* do the right thing.
*
- * If it was on_rq, then the dequeue_entity(.flags=0) will already
- * have normalized the vruntime, if it was !on_rq, then only when
+ * If it's on_rq, then the dequeue_entity(.flags=0) will already
+ * have normalized the vruntime, if it's !on_rq, then only when
* the task is sleeping will it still have non-normalized vruntime.
*/
- if (!se->on_rq && p->state != TASK_RUNNING) {
+ if (!p->on_rq && p->state != TASK_RUNNING) {
/*
* Fix up our vruntime so that the current sleep doesn't
* cause 'unlimited' sleep bonus.
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 29111da1d100..2f737f536c27 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1354,6 +1354,16 @@ static void trace_module_add_events(struct module *mod)
struct ftrace_module_file_ops *file_ops = NULL;
struct ftrace_event_call **call, **start, **end;
+ if (!mod->num_trace_events)
+ return;
+
+ /* Don't add infrastructure for mods without tracepoints */
+ if (trace_module_has_bad_taint(mod)) {
+ pr_err("%s: module has bad taint, not creating trace events\n",
+ mod->name);
+ return;
+ }
+
start = mod->trace_events;
end = mod->trace_events + mod->num_trace_events;
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d96ba22dabfa..23d8560dbc56 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -628,6 +628,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
#ifdef CONFIG_MODULES
+bool trace_module_has_bad_taint(struct module *mod)
+{
+ return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
+}
+
static int tracepoint_module_coming(struct module *mod)
{
struct tp_module *tp_mod, *iter;
@@ -638,7 +643,7 @@ static int tracepoint_module_coming(struct module *mod)
* module headers (for forced load), to make sure we don't cause a crash.
* Staging and out-of-tree GPL modules are fine.
*/
- if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
+ if (trace_module_has_bad_taint(mod))
return 0;
mutex_lock(&tracepoints_mutex);
tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index e2e0e0bc6622..569eb2cef3c9 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -242,6 +242,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
return NULL;
spin_lock_init(&sta->lock);
+ spin_lock_init(&sta->ps_lock);
INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
mutex_init(&sta->ampdu_mlme.mtx);
@@ -971,6 +972,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
skb_queue_head_init(&pending);
+ /* sync with ieee80211_tx_h_unicast_ps_buf */
+ spin_lock(&sta->ps_lock);
/* Send all buffered frames to the station */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
int count = skb_queue_len(&pending), tmp;
@@ -990,6 +993,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
}
ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
+ spin_unlock(&sta->ps_lock);
local->total_ps_buffered -= buffered;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index ab0576827baf..249f4d087936 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -227,6 +227,7 @@ struct sta_ampdu_mlme {
* @drv_unblock_wk: used for driver PS unblocking
* @listen_interval: listen interval of this station, when we're acting as AP
* @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
+ * @ps_lock: used for powersave (when mac80211 is the AP) related locking
* @ps_tx_buf: buffers (per AC) of frames to transmit to this station
* when it leaves power saving state or polls
* @tx_filtered: buffers (per AC) of frames we already tried to
@@ -297,10 +298,8 @@ struct sta_info {
/* use the accessors defined below */
unsigned long _flags;
- /*
- * STA powersave frame queues, no more than the internal
- * locking required.
- */
+ /* STA powersave lock and frame queues */
+ spinlock_t ps_lock;
struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
unsigned long driver_buffered_tids;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e4b7188a0572..b7fc3dd4b8ef 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -471,6 +471,20 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
+
+ /* sync with ieee80211_sta_ps_deliver_wakeup */
+ spin_lock(&sta->ps_lock);
+ /*
+ * STA woke up the meantime and all the frames on ps_tx_buf have
+ * been queued to pending queue. No reordering can happen, go
+ * ahead and Tx the packet.
+ */
+ if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
+ !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
+ spin_unlock(&sta->ps_lock);
+ return TX_CONTINUE;
+ }
+
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
@@ -487,6 +501,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
info->control.vif = &tx->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
+ spin_unlock(&sta->ps_lock);
if (!timer_pending(&local->sta_cleanup))
mod_timer(&local->sta_cleanup,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index cb1c4303a07a..f131caf06a19 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -747,6 +747,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
struct sctp_chunk auth;
sctp_ierror_t ret;
+ /* Make sure that we and the peer are AUTH capable */
+ if (!sctp_auth_enable || !new_asoc->peer.auth_capable) {
+ kfree_skb(chunk->auth_chunk);
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ }
+
/* set-up our fake chunk so that we can process it */
auth.skb = chunk->auth_chunk;
auth.asoc = chunk->asoc;
diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
index 793bdf03d7e0..71527467de05 100644
--- a/sound/pci/oxygen/xonar_dg.c
+++ b/sound/pci/oxygen/xonar_dg.c
@@ -294,6 +294,16 @@ static int output_switch_put(struct snd_kcontrol *ctl,
oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
data->output_sel == 1 ? GPIO_HP_REAR : 0,
GPIO_HP_REAR);
+ oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING,
+ data->output_sel == 0 ?
+ OXYGEN_PLAY_MUTE01 :
+ OXYGEN_PLAY_MUTE23 |
+ OXYGEN_PLAY_MUTE45 |
+ OXYGEN_PLAY_MUTE67,
+ OXYGEN_PLAY_MUTE01 |
+ OXYGEN_PLAY_MUTE23 |
+ OXYGEN_PLAY_MUTE45 |
+ OXYGEN_PLAY_MUTE67);
}
mutex_unlock(&chip->mutex);
return changed;
@@ -597,7 +607,7 @@ struct oxygen_model model_xonar_dg = {
.model_data_size = sizeof(struct dg),
.device_config = PLAYBACK_0_TO_I2S |
PLAYBACK_1_TO_SPDIF |
- CAPTURE_0_FROM_I2S_2 |
+ CAPTURE_0_FROM_I2S_1 |
CAPTURE_1_FROM_SPDIF,
.dac_channels_pcm = 6,
.dac_channels_mixer = 0,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 5ca46525d47e..30515583cab3 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -819,6 +819,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
}
break;
+ case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
case USB_ID(0x046d, 0x0808):
case USB_ID(0x046d, 0x0809):
case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */

View file

@ -0,0 +1,403 @@
diff --git a/Makefile b/Makefile
index f75a853f8524..66ae2984a38c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 84
+SUBLEVEL = 85
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 53426c66352a..2dd513bdca23 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -37,10 +37,10 @@ struct outer_cache_fns {
void (*resume)(void);
};
-#ifdef CONFIG_OUTER_CACHE
-
extern struct outer_cache_fns outer_cache;
+#ifdef CONFIG_OUTER_CACHE
+
static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
{
if (outer_cache.inv_range)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4cb164268846..fd6dec6ffa47 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2451,6 +2451,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
int emulate = 0;
gfn_t pseudo_gfn;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return 0;
+
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
if (iterator.level == level) {
unsigned pte_access = ACC_ALL;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 90f5c0ed9d2f..617b00b4857b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6281,8 +6281,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
free_vpid(vmx);
- free_nested(vmx);
free_loaded_vmcs(vmx->loaded_vmcs);
+ free_nested(vmx);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 877b9a1b2152..01495755701b 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -140,7 +140,7 @@ bpf_slow_path_byte_msh:
push %r9; \
push SKBDATA; \
/* rsi already has offset */ \
- mov $SIZE,%ecx; /* size */ \
+ mov $SIZE,%edx; /* size */ \
call bpf_internal_load_pointer_neg_helper; \
test %rax,%rax; \
pop SKBDATA; \
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index f4059e9da301..8ba22ebff637 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -962,33 +962,35 @@ static int __devinit i7300_get_devices(struct mem_ctl_info *mci)
/* Attempt to 'get' the MCH register we want */
pdev = NULL;
- while (!pvt->pci_dev_16_1_fsb_addr_map ||
- !pvt->pci_dev_16_2_fsb_err_regs) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
- if (!pdev) {
- /* End of list, leave */
- i7300_printk(KERN_ERR,
- "'system address,Process Bus' "
- "device not found:"
- "vendor 0x%x device 0x%x ERR funcs "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
- goto error;
- }
-
+ while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
+ pdev))) {
/* Store device 16 funcs 1 and 2 */
switch (PCI_FUNC(pdev->devfn)) {
case 1:
- pvt->pci_dev_16_1_fsb_addr_map = pdev;
+ if (!pvt->pci_dev_16_1_fsb_addr_map)
+ pvt->pci_dev_16_1_fsb_addr_map =
+ pci_dev_get(pdev);
break;
case 2:
- pvt->pci_dev_16_2_fsb_err_regs = pdev;
+ if (!pvt->pci_dev_16_2_fsb_err_regs)
+ pvt->pci_dev_16_2_fsb_err_regs =
+ pci_dev_get(pdev);
break;
}
}
+ if (!pvt->pci_dev_16_1_fsb_addr_map ||
+ !pvt->pci_dev_16_2_fsb_err_regs) {
+ /* At least one device was not found */
+ i7300_printk(KERN_ERR,
+ "'system address,Process Bus' device not found:"
+ "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
+ goto error;
+ }
+
debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
pci_name(pvt->pci_dev_16_0_fsb_ctlr),
pvt->pci_dev_16_0_fsb_ctlr->vendor,
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 479011004a11..9bdc3b8597a4 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -486,6 +486,7 @@ static void elantech_input_sync_v4(struct psmouse *psmouse)
unsigned char *packet = psmouse->packet;
input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
input_mt_report_pointer_emulation(dev, true);
input_sync(dev);
}
@@ -954,6 +955,44 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
}
/*
+ * Advertise INPUT_PROP_BUTTONPAD for clickpads. The testing of bit 12 in
+ * fw_version for this is based on the following fw_version & caps table:
+ *
+ * Laptop-model: fw_version: caps: buttons:
+ * Acer S3 0x461f00 10, 13, 0e clickpad
+ * Acer S7-392 0x581f01 50, 17, 0d clickpad
+ * Acer V5-131 0x461f02 01, 16, 0c clickpad
+ * Acer V5-551 0x461f00 ? clickpad
+ * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
+ * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
+ * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
+ * Asus UX31 0x361f00 20, 15, 0e clickpad
+ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
+ * Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
+ * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
+ * Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons
+ * Samsung NP770Z5E 0x575f01 10, 15, 0f clickpad
+ * Samsung NP700Z5B 0x361f06 21, 15, 0f clickpad
+ * Samsung NP900X3E-A02 0x575f03 ? clickpad
+ * Samsung NP-QX410 0x851b00 19, 14, 0c clickpad
+ * Samsung RC512 0x450f00 08, 15, 0c 2 hw buttons
+ * Samsung RF710 0x450f00 ? 2 hw buttons
+ * System76 Pangolin 0x250f01 ? 2 hw buttons
+ * (*) + 3 trackpoint buttons
+ */
+static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct elantech_data *etd = psmouse->private;
+
+ if (etd->fw_version & 0x001000) {
+ __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+ __clear_bit(BTN_RIGHT, dev->keybit);
+ }
+}
+
+/*
* Set the appropriate event bits for the input subsystem
*/
static int elantech_set_input_params(struct psmouse *psmouse)
@@ -996,6 +1035,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
__set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
/* fall through */
case 3:
+ if (etd->hw_version == 3)
+ elantech_set_buttonpad_prop(psmouse);
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
if (etd->reports_pressure) {
@@ -1017,9 +1058,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
*/
psmouse_warn(psmouse, "couldn't query resolution data.\n");
}
- /* v4 is clickpad, with only one button. */
- __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
- __clear_bit(BTN_RIGHT, dev->keybit);
+ elantech_set_buttonpad_prop(psmouse);
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
/* For X to recognize me as touchpad. */
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 91bad2f23842..ba38ace80a49 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -825,14 +825,15 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
trace_idx = 1;
#endif
+ /* map the remaining (adjusted) nocopy/dup fragments */
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
- if (!cmd->len[i])
+ if (!cmdlen[i])
continue;
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
continue;
phys_addr = dma_map_single(trans->dev,
- (void *)cmd->data[i],
- cmd->len[i], DMA_BIDIRECTIONAL);
+ (void *)cmddata[i],
+ cmdlen[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(trans->dev, phys_addr)) {
iwlagn_unmap_tfd(trans, out_meta,
&txq->tfds[q->write_ptr],
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index a08a6f0e4dd1..29338190ee27 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -583,7 +583,7 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
chan = priv->curchan;
if (chan) {
struct survey_info *survey = &priv->survey[chan->hw_value];
- survey->noise = clamp_t(s8, priv->noise, -128, 127);
+ survey->noise = clamp(priv->noise, -128, 127);
survey->channel_time = priv->survey_raw.active;
survey->channel_time_tx = priv->survey_raw.tx;
survey->channel_time_busy = priv->survey_raw.tx +
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 8b1c27f93025..496764b60a3b 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -109,6 +109,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0015 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
+ pdev->subsystem_device == 0xc0cd)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
diff --git a/ipc/msg.c b/ipc/msg.c
index 7385de25788a..25f1a6139584 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -296,7 +296,9 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
}
atomic_sub(msq->q_cbytes, &ns->msg_bytes);
security_msg_queue_free(msq);
+ ipc_lock_by_ptr(&msq->q_perm);
ipc_rcu_putref(msq);
+ ipc_unlock(&msq->q_perm);
}
/*
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index d8f031a762ae..835d81b0934e 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1269,14 +1269,17 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
*
* Caller should hold map_sem for read.
*/
-static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
+static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
+ bool force_resend_writes)
{
struct ceph_osd_request *req, *nreq;
struct rb_node *p;
int needmap = 0;
int err;
+ bool force_resend_req;
- dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
+ dout("kick_requests %s %s\n", force_resend ? " (force resend)" : "",
+ force_resend_writes ? " (force resend writes)" : "");
mutex_lock(&osdc->request_mutex);
for (p = rb_first(&osdc->requests); p; ) {
req = rb_entry(p, struct ceph_osd_request, r_node);
@@ -1299,7 +1302,10 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
continue;
}
- err = __map_request(osdc, req, force_resend);
+ force_resend_req = force_resend ||
+ (force_resend_writes &&
+ req->r_flags & CEPH_OSD_FLAG_WRITE);
+ err = __map_request(osdc, req, force_resend_req);
if (err < 0)
continue; /* error */
if (req->r_osd == NULL) {
@@ -1319,7 +1325,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
r_linger_item) {
dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
- err = __map_request(osdc, req, force_resend);
+ err = __map_request(osdc, req,
+ force_resend || force_resend_writes);
dout("__map_request returned %d\n", err);
if (err == 0)
continue; /* no change and no osd was specified */
@@ -1361,6 +1368,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
struct ceph_osdmap *newmap = NULL, *oldmap;
int err;
struct ceph_fsid fsid;
+ bool was_full;
dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
p = msg->front.iov_base;
@@ -1374,6 +1382,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
down_write(&osdc->map_sem);
+ was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+
/* incremental maps */
ceph_decode_32_safe(&p, end, nr_maps, bad);
dout(" %d inc maps\n", nr_maps);
@@ -1398,7 +1408,10 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
ceph_osdmap_destroy(osdc->osdmap);
osdc->osdmap = newmap;
}
- kick_requests(osdc, 0);
+ was_full = was_full ||
+ ceph_osdmap_flag(osdc->osdmap,
+ CEPH_OSDMAP_FULL);
+ kick_requests(osdc, 0, was_full);
} else {
dout("ignoring incremental map %u len %d\n",
epoch, maplen);
@@ -1441,7 +1454,10 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
skipped_map = 1;
ceph_osdmap_destroy(oldmap);
}
- kick_requests(osdc, skipped_map);
+ was_full = was_full ||
+ ceph_osdmap_flag(osdc->osdmap,
+ CEPH_OSDMAP_FULL);
+ kick_requests(osdc, skipped_map, was_full);
}
p += maplen;
nr_maps--;
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index eee5f8ed2493..ed7ccdcd3e6e 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -62,7 +62,7 @@ create_package() {
fi
# Create the package
- dpkg-gencontrol -isp $forcearch -p$pname -P"$pdir"
+ dpkg-gencontrol -isp $forcearch -Vkernel:debarch="${debarch:-$(dpkg --print-architecture)}" -p$pname -P"$pdir"
dpkg --build "$pdir" ..
}
@@ -252,15 +252,14 @@ mkdir -p "$destdir"
(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
-arch=$(dpkg --print-architecture)
cat <<EOF >> debian/control
Package: $kernel_headers_packagename
Provides: linux-headers, linux-headers-2.6
-Architecture: $arch
-Description: Linux kernel headers for $KERNELRELEASE on $arch
- This package provides kernel header files for $KERNELRELEASE on $arch
+Architecture: any
+Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch}
+ This package provides kernel header files for $KERNELRELEASE on \${kernel:debarch}
.
This is useful for people who need to build external modules
EOF
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 7895983862b2..1edb2e822074 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -133,7 +133,7 @@ static int snd_compr_open(struct inode *inode, struct file *f)
kfree(data);
}
snd_card_unref(compr->card);
- return 0;
+ return ret;
}
static int snd_compr_free(struct inode *inode, struct file *f)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,654 @@
diff --git a/Makefile b/Makefile
index 8f45901dd370..b2192326e7f9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 87
+SUBLEVEL = 88
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 76c7ccfb1ebe..37a8daf105eb 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -24,7 +24,7 @@ config SPARC
select HAVE_IRQ_WORK
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
- select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL if SPARC64
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
select USE_GENERIC_SMP_HELPERS if SMP
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index a1091afb8831..8bc9afdea2d4 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -266,8 +266,8 @@ extern long __strnlen_user(const char __user *, long len);
#define strlen_user __strlen_user
#define strnlen_user __strnlen_user
-#define __copy_to_user_inatomic ___copy_to_user
-#define __copy_from_user_inatomic ___copy_from_user
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
#endif /* __ASSEMBLY__ */
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index fdaf21811670..8c5c9a5675b4 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -486,8 +486,8 @@ static void __devinit apb_fake_ranges(struct pci_dev *dev,
apb_calc_first_last(map, &first, &last);
res = bus->resource[1];
res->flags = IORESOURCE_MEM;
- region.start = (first << 21);
- region.end = (last << 21) + ((1 << 21) - 1);
+ region.start = (first << 29);
+ region.end = (last << 29) + ((1 << 29) - 1);
pcibios_bus_to_resource(dev, res, &region);
}
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 817187d42777..557212cfa0f2 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -184,7 +184,8 @@ linux_sparc_syscall32:
mov %i0, %l5 ! IEU1
5: call %l7 ! CTI Group brk forced
srl %i5, 0, %o5 ! IEU1
- ba,a,pt %xcc, 3f
+ ba,pt %xcc, 3f
+ sra %o0, 0, %o0
/* Linux native system calls enter here... */
.align 32
@@ -212,7 +213,6 @@ linux_sparc_syscall:
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ret_sys_call:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
- sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
sllx %g2, 32, %g2
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index a22a7a502740..8156cafad11a 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
static inline int read_all_bytes(struct si_sm_data *bt)
{
- unsigned char i;
+ unsigned int i;
/*
* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index 02125e6a9109..5a4da94aefb0 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -518,9 +518,9 @@ static isdnloop_stat isdnloop_cmd_table[] =
static void
isdnloop_fake_err(isdnloop_card *card)
{
- char buf[60];
+ char buf[64];
- sprintf(buf, "E%s", card->omsg);
+ snprintf(buf, sizeof(buf), "E%s", card->omsg);
isdnloop_fake(card, buf, -1);
isdnloop_fake(card, "NAK", -1);
}
@@ -903,6 +903,8 @@ isdnloop_parse_cmd(isdnloop_card *card)
case 7:
/* 0x;EAZ */
p += 3;
+ if (strlen(p) >= sizeof(card->eazlist[0]))
+ break;
strcpy(card->eazlist[ch - 1], p);
break;
case 8:
@@ -1070,6 +1072,12 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
return -EBUSY;
if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
return -EFAULT;
+
+ for (i = 0; i < 3; i++) {
+ if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&card->isdnloop_lock, flags);
switch (sdef.ptype) {
case ISDN_PTYPE_EURO:
@@ -1127,7 +1135,7 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
{
ulong a;
int i;
- char cbuf[60];
+ char cbuf[80];
isdn_ctrl cmd;
isdnloop_cdef cdef;
@@ -1192,7 +1200,6 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
break;
if ((c->arg & 255) < ISDNLOOP_BCH) {
char *p;
- char dial[50];
char dcode[4];
a = c->arg;
@@ -1204,10 +1211,10 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
} else
/* Normal Dial */
strcpy(dcode, "CAL");
- strcpy(dial, p);
- sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
- dcode, dial, c->parm.setup.si1,
- c->parm.setup.si2, c->parm.setup.eazmsn);
+ snprintf(cbuf, sizeof(cbuf),
+ "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
+ dcode, p, c->parm.setup.si1,
+ c->parm.setup.si2, c->parm.setup.eazmsn);
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index a3e5cdd1cf56..cd4966bcb2d2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -338,8 +338,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* into multiple copies tend to give large frags their
* own buffers as before.
*/
- if ((offset + size > MAX_BUFFER_OFFSET) &&
- (size <= MAX_BUFFER_OFFSET) && offset && !head)
+ BUG_ON(size > MAX_BUFFER_OFFSET);
+ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
return true;
return false;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 50a3cb5b589a..3fd908c5c0cc 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -324,9 +324,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
r = -ENOBUFS;
goto err;
}
- d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+ r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
ARRAY_SIZE(vq->iov) - seg, &out,
&in, log, log_num);
+ if (unlikely(r < 0))
+ goto err;
+
+ d = r;
if (d == vq->num) {
r = 0;
goto err;
@@ -351,6 +355,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
+
+ /* Detect overrun */
+ if (unlikely(datalen > 0)) {
+ r = UIO_MAXIOV + 1;
+ goto err;
+ }
return headcount;
err:
vhost_discard_vq_desc(vq, headcount);
@@ -405,6 +415,14 @@ static void handle_rx(struct vhost_net *net)
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
break;
+ /* On overrun, truncate and discard */
+ if (unlikely(headcount > UIO_MAXIOV)) {
+ msg.msg_iovlen = 1;
+ err = sock->ops->recvmsg(NULL, sock, &msg,
+ 1, MSG_DONTWAIT | MSG_TRUNC);
+ pr_debug("Discarded rx packet: len %zd\n", sock_len);
+ continue;
+ }
/* OK, now we need to know about added descriptors. */
if (!headcount) {
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
index 16a5047903a6..406d9cc84ba8 100644
--- a/fs/jffs2/compr_rtime.c
+++ b/fs/jffs2/compr_rtime.c
@@ -33,7 +33,7 @@ static int jffs2_rtime_compress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t *sourcelen, uint32_t *dstlen)
{
- short positions[256];
+ unsigned short positions[256];
int outpos = 0;
int pos=0;
@@ -74,7 +74,7 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t srclen, uint32_t destlen)
{
- short positions[256];
+ unsigned short positions[256];
int outpos = 0;
int pos=0;
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index e4619b00f7c5..fa35ff79ab35 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -231,7 +231,7 @@ struct jffs2_tmp_dnode_info
uint32_t version;
uint32_t data_crc;
uint32_t partial_crc;
- uint16_t csize;
+ uint32_t csize;
uint16_t overlapped;
};
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index ffa8f12c7a54..87044bcc4137 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -139,6 +139,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
spin_unlock(&c->erase_completion_lock);
schedule();
+ remove_wait_queue(&c->erase_wait, &wait);
} else
spin_unlock(&c->erase_completion_lock);
} else if (ret)
@@ -169,20 +170,25 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
uint32_t *len, uint32_t sumsize)
{
- int ret = -EAGAIN;
+ int ret;
minsize = PAD(minsize);
jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
- spin_lock(&c->erase_completion_lock);
- while(ret == -EAGAIN) {
+ while (true) {
+ spin_lock(&c->erase_completion_lock);
ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
if (ret) {
jffs2_dbg(1, "%s(): looping, ret is %d\n",
__func__, ret);
}
+ spin_unlock(&c->erase_completion_lock);
+
+ if (ret == -EAGAIN)
+ cond_resched();
+ else
+ break;
}
- spin_unlock(&c->erase_completion_lock);
if (!ret)
ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
diff --git a/kernel/exit.c b/kernel/exit.c
index bfbd85669694..3eb4dcfc658a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -761,9 +761,6 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
struct list_head *dead)
{
list_move_tail(&p->sibling, &p->real_parent->children);
-
- if (p->exit_state == EXIT_DEAD)
- return;
/*
* If this is a threaded reparent there is no need to
* notify anyone anything has happened.
@@ -771,9 +768,19 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
if (same_thread_group(p->real_parent, father))
return;
- /* We don't want people slaying init. */
+ /*
+ * We don't want people slaying init.
+ *
+ * Note: we do this even if it is EXIT_DEAD, wait_task_zombie()
+ * can change ->exit_state to EXIT_ZOMBIE. If this is the final
+ * state, do_notify_parent() was already called and ->exit_signal
+ * doesn't matter.
+ */
p->exit_signal = SIGCHLD;
+ if (p->exit_state == EXIT_DEAD)
+ return;
+
/* If it has exited notify the new parent about this child's death. */
if (!p->ptrace &&
p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 4226dfeb5178..01c67507dc43 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -299,9 +299,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
*/
int nla_strcmp(const struct nlattr *nla, const char *str)
{
- int len = strlen(str) + 1;
- int d = nla_len(nla) - len;
+ int len = strlen(str);
+ char *buf = nla_data(nla);
+ int attrlen = nla_len(nla);
+ int d;
+ if (attrlen > 0 && buf[attrlen - 1] == '\0')
+ attrlen--;
+
+ d = attrlen - len;
if (d == 0)
d = memcmp(nla_data(nla), str, len);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index c2029f732126..cf88c2f3b9e3 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -533,6 +533,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
+ if (saddr == NULL)
+ saddr = dev->dev_addr;
+
return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 1e6347cf77f2..21b2de18c3b7 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3383,7 +3383,13 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
- if (ltk->type & HCI_SMP_STK) {
+ /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
+ * temporary key used to encrypt a connection following
+ * pairing. It is used during the Encrypted Session Setup to
+ * distribute the keys. Later, security can be re-established
+ * using a distributed LTK.
+ */
+ if (ltk->type == HCI_SMP_STK_SLAVE) {
list_del(&ltk->list);
kfree(ltk);
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index cef202a94112..0b870d75a542 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1138,6 +1138,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
+ /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
+ if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
+ err = -EINVAL;
+ goto out;
+ }
+
if (skb->len == sizeof(*mld)) {
if (!pskb_may_pull(skb, sizeof(*mld))) {
err = -EINVAL;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index abfa0074d59f..f976c3858df0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -897,8 +897,11 @@ retry:
* Lifetime is greater than REGEN_ADVANCE time units. In particular,
* an implementation must not create a temporary address with a zero
* Preferred Lifetime.
+ * Use age calculation as in addrconf_verify to avoid unnecessary
+ * temporary addresses being generated.
*/
- if (tmp_prefered_lft <= regen_advance) {
+ age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
+ if (tmp_prefered_lft <= regen_advance + age) {
in6_ifa_put(ifp);
in6_dev_put(idev);
ret = -1;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index dbf20f620936..50b124fe9a3a 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -501,7 +501,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
np->tclass, NULL, &fl6, (struct rt6_info*)dst,
MSG_DONTWAIT, np->dontfrag);
if (err) {
- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
+ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
} else {
err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 8cd6854c2cae..5a2d819d8262 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1194,21 +1194,19 @@ static void ip6_append_data_mtu(unsigned int *mtu,
unsigned int fragheaderlen,
struct sk_buff *skb,
struct rt6_info *rt,
- bool pmtuprobe)
+ unsigned int orig_mtu)
{
if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
if (skb == NULL) {
/* first fragment, reserve header_len */
- *mtu = *mtu - rt->dst.header_len;
+ *mtu = orig_mtu - rt->dst.header_len;
} else {
/*
* this fragment is not first, the headers
* space is regarded as data space.
*/
- *mtu = min(*mtu, pmtuprobe ?
- rt->dst.dev->mtu :
- dst_mtu(rt->dst.path));
+ *mtu = orig_mtu;
}
*maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ fragheaderlen - sizeof(struct frag_hdr);
@@ -1225,7 +1223,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_cork *cork;
struct sk_buff *skb, *skb_prev = NULL;
- unsigned int maxfraglen, fragheaderlen, mtu;
+ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
int exthdrlen;
int dst_exthdrlen;
int hh_len;
@@ -1310,6 +1308,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
dst_exthdrlen = 0;
mtu = cork->fragsize;
}
+ orig_mtu = mtu;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -1392,8 +1391,7 @@ alloc_new_skb:
if (skb == NULL || skb_prev == NULL)
ip6_append_data_mtu(&mtu, &maxfraglen,
fragheaderlen, skb, rt,
- np->pmtudisc ==
- IPV6_PMTUDISC_PROBE);
+ orig_mtu);
skb_prev = skb;
@@ -1663,8 +1661,8 @@ int ip6_push_pending_frames(struct sock *sk)
if (proto == IPPROTO_ICMPV6) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
- ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
+ ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
+ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
}
err = ip6_local_out(skb);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 9728df5c569e..e027bf21c6f4 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1430,11 +1430,12 @@ static void mld_sendpack(struct sk_buff *skb)
dst_output);
out:
if (!err) {
- ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
- IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
- } else
- IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
+ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+ IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
+ } else {
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ }
rcu_read_unlock();
return;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 335b16fccb9d..b685f99ffb18 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1302,7 +1302,7 @@ int ip6_route_add(struct fib6_config *cfg)
if (!table)
goto out;
- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
+ rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
if (!rt) {
err = -ENOMEM;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index 7826d46baa70..589935661d66 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -239,7 +239,8 @@ static int rds_iw_laddr_check(__be32 addr)
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
/* due to this, we will claim to support IB devices unless we
check node_type. */
- if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
+ if (ret || !cm_id->device ||
+ cm_id->device->node_type != RDMA_NODE_RNIC)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI4 ret %d node type %d\n",
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index a85eeeb55dd0..adb028746270 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1366,8 +1366,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk)
BUG_ON(!list_empty(&chunk->list));
list_del_init(&chunk->transmitted_list);
- /* Free the chunk skb data and the SCTP_chunk stub itself. */
- dev_kfree_skb(chunk->skb);
+ consume_skb(chunk->skb);
+ consume_skb(chunk->auth_chunk);
SCTP_DBG_OBJCNT_DEC(chunk);
kmem_cache_free(sctp_chunk_cachep, chunk);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index f131caf06a19..5ac33b600538 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -749,7 +749,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
/* Make sure that we and the peer are AUTH capable */
if (!sctp_auth_enable || !new_asoc->peer.auth_capable) {
- kfree_skb(chunk->auth_chunk);
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
@@ -764,10 +763,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
auth.transport = chunk->transport;
ret = sctp_sf_authenticate(ep, new_asoc, type, &auth);
-
- /* We can now safely free the auth_chunk clone */
- kfree_skb(chunk->auth_chunk);
-
if (ret != SCTP_IERROR_NO_ERROR) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
diff --git a/net/socket.c b/net/socket.c
index 4006452e8475..cc3fc4d4263e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1907,6 +1907,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
{
if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
return -EFAULT;
+
+ if (kmsg->msg_namelen < 0)
+ return -EINVAL;
+
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
return 0;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c0a81a50bd4e..09a84c9f2f59 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1787,8 +1787,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
err = mutex_lock_interruptible(&u->readlock);
- if (err) {
- err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
+ if (unlikely(err)) {
+ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
+ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+ */
+ err = noblock ? -EAGAIN : -ERESTARTSYS;
goto out;
}
@@ -1910,6 +1913,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = msg->msg_name;
int copied = 0;
+ int noblock = flags & MSG_DONTWAIT;
int check_creds = 0;
int target;
int err = 0;
@@ -1925,7 +1929,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
- timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+ timeo = sock_rcvtimeo(sk, noblock);
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
@@ -1937,8 +1941,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
}
err = mutex_lock_interruptible(&u->readlock);
- if (err) {
- err = sock_intr_errno(timeo);
+ if (unlikely(err)) {
+ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
+ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+ */
+ err = noblock ? -EAGAIN : -ERESTARTSYS;
goto out;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,633 @@
diff --git a/Makefile b/Makefile
index aa1001213eb1..16899b9ba84f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 90
+SUBLEVEL = 91
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index 1c893f05d224..21ecdf5e55f9 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -230,6 +230,87 @@ _GLOBAL(_rest32gpr_31_x)
mr 1,11
blr
+#ifdef CONFIG_ALTIVEC
+/* Called with r0 pointing just beyond the end of the vector save area. */
+
+_GLOBAL(_savevr_20)
+ li r11,-192
+ stvx vr20,r11,r0
+_GLOBAL(_savevr_21)
+ li r11,-176
+ stvx vr21,r11,r0
+_GLOBAL(_savevr_22)
+ li r11,-160
+ stvx vr22,r11,r0
+_GLOBAL(_savevr_23)
+ li r11,-144
+ stvx vr23,r11,r0
+_GLOBAL(_savevr_24)
+ li r11,-128
+ stvx vr24,r11,r0
+_GLOBAL(_savevr_25)
+ li r11,-112
+ stvx vr25,r11,r0
+_GLOBAL(_savevr_26)
+ li r11,-96
+ stvx vr26,r11,r0
+_GLOBAL(_savevr_27)
+ li r11,-80
+ stvx vr27,r11,r0
+_GLOBAL(_savevr_28)
+ li r11,-64
+ stvx vr28,r11,r0
+_GLOBAL(_savevr_29)
+ li r11,-48
+ stvx vr29,r11,r0
+_GLOBAL(_savevr_30)
+ li r11,-32
+ stvx vr30,r11,r0
+_GLOBAL(_savevr_31)
+ li r11,-16
+ stvx vr31,r11,r0
+ blr
+
+_GLOBAL(_restvr_20)
+ li r11,-192
+ lvx vr20,r11,r0
+_GLOBAL(_restvr_21)
+ li r11,-176
+ lvx vr21,r11,r0
+_GLOBAL(_restvr_22)
+ li r11,-160
+ lvx vr22,r11,r0
+_GLOBAL(_restvr_23)
+ li r11,-144
+ lvx vr23,r11,r0
+_GLOBAL(_restvr_24)
+ li r11,-128
+ lvx vr24,r11,r0
+_GLOBAL(_restvr_25)
+ li r11,-112
+ lvx vr25,r11,r0
+_GLOBAL(_restvr_26)
+ li r11,-96
+ lvx vr26,r11,r0
+_GLOBAL(_restvr_27)
+ li r11,-80
+ lvx vr27,r11,r0
+_GLOBAL(_restvr_28)
+ li r11,-64
+ lvx vr28,r11,r0
+_GLOBAL(_restvr_29)
+ li r11,-48
+ lvx vr29,r11,r0
+_GLOBAL(_restvr_30)
+ li r11,-32
+ lvx vr30,r11,r0
+_GLOBAL(_restvr_31)
+ li r11,-16
+ lvx vr31,r11,r0
+ blr
+
+#endif /* CONFIG_ALTIVEC */
+
#else /* CONFIG_PPC64 */
.globl _savegpr0_14
@@ -353,6 +434,111 @@ _restgpr0_31:
mtlr r0
blr
+#ifdef CONFIG_ALTIVEC
+/* Called with r0 pointing just beyond the end of the vector save area. */
+
+.globl _savevr_20
+_savevr_20:
+ li r12,-192
+ stvx vr20,r12,r0
+.globl _savevr_21
+_savevr_21:
+ li r12,-176
+ stvx vr21,r12,r0
+.globl _savevr_22
+_savevr_22:
+ li r12,-160
+ stvx vr22,r12,r0
+.globl _savevr_23
+_savevr_23:
+ li r12,-144
+ stvx vr23,r12,r0
+.globl _savevr_24
+_savevr_24:
+ li r12,-128
+ stvx vr24,r12,r0
+.globl _savevr_25
+_savevr_25:
+ li r12,-112
+ stvx vr25,r12,r0
+.globl _savevr_26
+_savevr_26:
+ li r12,-96
+ stvx vr26,r12,r0
+.globl _savevr_27
+_savevr_27:
+ li r12,-80
+ stvx vr27,r12,r0
+.globl _savevr_28
+_savevr_28:
+ li r12,-64
+ stvx vr28,r12,r0
+.globl _savevr_29
+_savevr_29:
+ li r12,-48
+ stvx vr29,r12,r0
+.globl _savevr_30
+_savevr_30:
+ li r12,-32
+ stvx vr30,r12,r0
+.globl _savevr_31
+_savevr_31:
+ li r12,-16
+ stvx vr31,r12,r0
+ blr
+
+.globl _restvr_20
+_restvr_20:
+ li r12,-192
+ lvx vr20,r12,r0
+.globl _restvr_21
+_restvr_21:
+ li r12,-176
+ lvx vr21,r12,r0
+.globl _restvr_22
+_restvr_22:
+ li r12,-160
+ lvx vr22,r12,r0
+.globl _restvr_23
+_restvr_23:
+ li r12,-144
+ lvx vr23,r12,r0
+.globl _restvr_24
+_restvr_24:
+ li r12,-128
+ lvx vr24,r12,r0
+.globl _restvr_25
+_restvr_25:
+ li r12,-112
+ lvx vr25,r12,r0
+.globl _restvr_26
+_restvr_26:
+ li r12,-96
+ lvx vr26,r12,r0
+.globl _restvr_27
+_restvr_27:
+ li r12,-80
+ lvx vr27,r12,r0
+.globl _restvr_28
+_restvr_28:
+ li r12,-64
+ lvx vr28,r12,r0
+.globl _restvr_29
+_restvr_29:
+ li r12,-48
+ lvx vr29,r12,r0
+.globl _restvr_30
+_restvr_30:
+ li r12,-32
+ lvx vr30,r12,r0
+.globl _restvr_31
+_restvr_31:
+ li r12,-16
+ lvx vr31,r12,r0
+ blr
+
+#endif /* CONFIG_ALTIVEC */
+
#endif /* CONFIG_PPC64 */
#endif
diff --git a/block/blk-core.c b/block/blk-core.c
index 279f05dcbc87..1175e57104cc 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2104,7 +2104,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
if (!req->bio)
return false;
- trace_block_rq_complete(req->q, req);
+ trace_block_rq_complete(req->q, req, nr_bytes);
/*
* For fs requests, rq is just carrier of independent bio's
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 25506c777381..9bec1717047e 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -486,6 +486,8 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
pthru32->dataxferaddr = kioc->buf_paddr;
if (kioc->data_dir & UIOC_WR) {
+ if (pthru32->dataxferlen > kioc->xferlen)
+ return -EINVAL;
if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
pthru32->dataxferlen)) {
return (-EFAULT);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 37818fbfbb0e..fa0376b38019 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1996,7 +1996,9 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
tty->ops->flush_chars(tty);
} else {
while (nr > 0) {
+ mutex_lock(&tty->output_lock);
c = tty->ops->write(tty, b, nr);
+ mutex_unlock(&tty->output_lock);
if (c < 0) {
retval = c;
goto break_out;
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index ac2cf6dcc598..3b15bcac3766 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -192,6 +192,8 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (var->xres_virtual != var->xres || var->yres_virtual != var->yres)
return -EINVAL;
+ if (var->xres * var->yres * (var->bits_per_pixel >> 3) > info->fix.smem_len)
+ return -EINVAL;
if (var->nonstd)
return -EINVAL;
if (1000000000 / var->pixclock > TGA_PLL_MAX_FREQ)
@@ -272,6 +274,7 @@ tgafb_set_par(struct fb_info *info)
par->yres = info->var.yres;
par->pll_freq = pll_freq = 1000000000 / info->var.pixclock;
par->bits_per_pixel = info->var.bits_per_pixel;
+ info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
tga_type = par->tga_type;
@@ -1318,6 +1321,7 @@ tgafb_init_fix(struct fb_info *info)
int tga_bus_tc = TGA_BUS_TC(par->dev);
u8 tga_type = par->tga_type;
const char *tga_type_name = NULL;
+ unsigned memory_size;
switch (tga_type) {
case TGA_TYPE_8PLANE:
@@ -1325,21 +1329,25 @@ tgafb_init_fix(struct fb_info *info)
tga_type_name = "Digital ZLXp-E1";
if (tga_bus_tc)
tga_type_name = "Digital ZLX-E1";
+ memory_size = 2097152;
break;
case TGA_TYPE_24PLANE:
if (tga_bus_pci)
tga_type_name = "Digital ZLXp-E2";
if (tga_bus_tc)
tga_type_name = "Digital ZLX-E2";
+ memory_size = 8388608;
break;
case TGA_TYPE_24PLUSZ:
if (tga_bus_pci)
tga_type_name = "Digital ZLXp-E3";
if (tga_bus_tc)
tga_type_name = "Digital ZLX-E3";
+ memory_size = 16777216;
break;
default:
tga_type_name = "Unknown";
+ memory_size = 16777216;
break;
}
@@ -1351,9 +1359,8 @@ tgafb_init_fix(struct fb_info *info)
? FB_VISUAL_PSEUDOCOLOR
: FB_VISUAL_DIRECTCOLOR);
- info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
info->fix.smem_start = (size_t) par->tga_fb_base;
- info->fix.smem_len = info->fix.line_length * par->yres;
+ info->fix.smem_len = memory_size;
info->fix.mmio_start = (size_t) par->tga_regs_base;
info->fix.mmio_len = 512;
@@ -1478,6 +1485,9 @@ tgafb_register(struct device *dev)
modedb_tga = &modedb_tc;
modedbsize_tga = 1;
}
+
+ tgafb_init_fix(info);
+
ret = fb_find_mode(&info->var, info,
mode_option ? mode_option : mode_option_tga,
modedb_tga, modedbsize_tga, NULL,
@@ -1495,7 +1505,6 @@ tgafb_register(struct device *dev)
}
tgafb_set_par(info);
- tgafb_init_fix(info);
if (register_framebuffer(info) < 0) {
printk(KERN_ERR "tgafb: Could not register framebuffer\n");
diff --git a/include/linux/net.h b/include/linux/net.h
index ff8097592f1d..d40ccb796e8d 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -259,6 +259,29 @@ extern struct socket *sockfd_lookup(int fd, int *err);
#define sockfd_put(sock) fput(sock->file)
extern int net_ratelimit(void);
+#define net_ratelimited_function(function, ...) \
+do { \
+ if (net_ratelimit()) \
+ function(__VA_ARGS__); \
+} while (0)
+
+#define net_emerg_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_emerg, fmt, ##__VA_ARGS__)
+#define net_alert_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_alert, fmt, ##__VA_ARGS__)
+#define net_crit_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_crit, fmt, ##__VA_ARGS__)
+#define net_err_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_err, fmt, ##__VA_ARGS__)
+#define net_notice_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_notice, fmt, ##__VA_ARGS__)
+#define net_warn_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
+#define net_info_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+#define net_dbg_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+
#define net_random() random32()
#define net_srandom(seed) srandom32((__force u32)seed)
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 96755c3798a5..0066409f86c7 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -37,8 +37,8 @@ enum nf_ct_ext_id {
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
struct rcu_head rcu;
- u8 offset[NF_CT_EXT_NUM];
- u8 len;
+ u16 offset[NF_CT_EXT_NUM];
+ u16 len;
char data[0];
};
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 05c5e61f0a7c..048e2658d895 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -81,6 +81,7 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
* block_rq_complete - block IO operation completed by device driver
* @q: queue containing the block operation request
* @rq: block operations request
+ * @nr_bytes: number of completed bytes
*
* The block_rq_complete tracepoint event indicates that some portion
* of operation request has been completed by the device driver. If
@@ -88,11 +89,37 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
* do for the request. If @rq->bio is non-NULL then there is
* additional work required to complete the request.
*/
-DEFINE_EVENT(block_rq_with_error, block_rq_complete,
+TRACE_EVENT(block_rq_complete,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request_queue *q, struct request *rq,
+ unsigned int nr_bytes),
- TP_ARGS(q, rq)
+ TP_ARGS(q, rq, nr_bytes),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( int, errors )
+ __array( char, rwbs, RWBS_LEN )
+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->sector = blk_rq_pos(rq);
+ __entry->nr_sector = nr_bytes >> 9;
+ __entry->errors = rq->errors;
+
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
+ blk_dump_cmd(__get_str(cmd), rq);
+ ),
+
+ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, __get_str(cmd),
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->errors)
);
DECLARE_EVENT_CLASS(block_rq,
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index c0bd0308741c..b0eeda6a798b 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -685,6 +685,7 @@ void blk_trace_shutdown(struct request_queue *q)
* blk_add_trace_rq - Add a trace for a request oriented action
* @q: queue the io is for
* @rq: the source request
+ * @nr_bytes: number of completed bytes
* @what: the action
*
* Description:
@@ -692,7 +693,7 @@ void blk_trace_shutdown(struct request_queue *q)
*
**/
static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
- u32 what)
+ unsigned int nr_bytes, u32 what)
{
struct blk_trace *bt = q->blk_trace;
@@ -701,11 +702,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
what |= BLK_TC_ACT(BLK_TC_PC);
- __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
+ __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
what, rq->errors, rq->cmd_len, rq->cmd);
} else {
what |= BLK_TC_ACT(BLK_TC_FS);
- __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
+ __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
rq->cmd_flags, what, rq->errors, 0, NULL);
}
}
@@ -713,33 +714,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
static void blk_add_trace_rq_abort(void *ignore,
struct request_queue *q, struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_ABORT);
+ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
}
static void blk_add_trace_rq_insert(void *ignore,
struct request_queue *q, struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_INSERT);
+ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
}
static void blk_add_trace_rq_issue(void *ignore,
struct request_queue *q, struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
+ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
}
static void blk_add_trace_rq_requeue(void *ignore,
struct request_queue *q,
struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
}
static void blk_add_trace_rq_complete(void *ignore,
struct request_queue *q,
- struct request *rq)
+ struct request *rq,
+ unsigned int nr_bytes)
{
- blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
+ blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
}
/**
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 23d8560dbc56..7840b3486db3 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -638,6 +638,9 @@ static int tracepoint_module_coming(struct module *mod)
struct tp_module *tp_mod, *iter;
int ret = 0;
+ if (!mod->num_tracepoints)
+ return 0;
+
/*
* We skip modules that taint the kernel, especially those with different
* module headers (for forced load), to make sure we don't cause a crash.
@@ -681,6 +684,9 @@ static int tracepoint_module_going(struct module *mod)
{
struct tp_module *pos;
+ if (!mod->num_tracepoints)
+ return 0;
+
mutex_lock(&tracepoints_mutex);
tracepoint_update_probe_range(mod->tracepoints_ptrs,
mod->tracepoints_ptrs + mod->num_tracepoints);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 5fe2ff3b01ef..f381fa16bdc9 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1044,10 +1044,9 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
if (repl->num_counters &&
copy_to_user(repl->counters, counterstmp,
repl->num_counters * sizeof(struct ebt_counter))) {
- ret = -EFAULT;
+ /* Silent error, can't fail, new table is already in place */
+ net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
}
- else
- ret = 0;
/* decrease module count and free resources */
EBT_ENTRY_ITERATE(table->entries, table->entries_size,
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index fd7a3f68917f..bcb6e6197595 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1039,8 +1039,10 @@ static int __do_replace(struct net *net, const char *name,
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
- sizeof(struct xt_counters) * num_counters) != 0)
- ret = -EFAULT;
+ sizeof(struct xt_counters) * num_counters) != 0) {
+ /* Silent error, can't fail, new table is already in place */
+ net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n");
+ }
vfree(counters);
xt_table_unlock(t);
return ret;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 24e556e83a3b..f98a1cf54c5b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1227,8 +1227,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
- sizeof(struct xt_counters) * num_counters) != 0)
- ret = -EFAULT;
+ sizeof(struct xt_counters) * num_counters) != 0) {
+ /* Silent error, can't fail, new table is already in place */
+ net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
+ }
vfree(counters);
xt_table_unlock(t);
return ret;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 9d4e15559319..6fe8ced0068f 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1236,8 +1236,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
- sizeof(struct xt_counters) * num_counters) != 0)
- ret = -EFAULT;
+ sizeof(struct xt_counters) * num_counters) != 0) {
+ /* Silent error, can't fail, new table is already in place */
+ net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
+ }
vfree(counters);
xt_table_unlock(t);
return ret;
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index c4e7d1510f9d..62ed15a03515 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -569,12 +569,16 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 ||
strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 ||
strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 ||
- strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0)
+ strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0 ||
+ strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
+ strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
return 1;
if (info->hdr->e_machine == EM_PPC64)
/* Special register function linked on all modules during final link of .ko */
if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
- strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0)
+ strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
+ strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
+ strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
return 1;
/* Do not ignore this symbol */
return 0;

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,291 @@
diff --git a/Makefile b/Makefile
index 20f420096dfa..0864af4a683b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 93
+SUBLEVEL = 94
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 6524c6e21896..694aeedcbf88 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -67,9 +67,11 @@ LDFLAGS_vmlinux-y := -Bstatic
LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
+asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
+
CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=no -mcall-aixdesc
CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
-KBUILD_CPPFLAGS += -Iarch/$(ARCH)
+KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr)
KBUILD_AFLAGS += -Iarch/$(ARCH)
KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
CPP = $(CC) -E $(KBUILD_CFLAGS)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 50f73aa2ba21..6f5a837431e9 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -294,11 +294,16 @@ n:
* ld rY,ADDROFF(name)(rX)
*/
#ifdef __powerpc64__
+#ifdef HAVE_AS_ATHIGH
+#define __AS_ATHIGH high
+#else
+#define __AS_ATHIGH h
+#endif
#define LOAD_REG_IMMEDIATE(reg,expr) \
lis (reg),(expr)@highest; \
ori (reg),(reg),(expr)@higher; \
rldicr (reg),(reg),32,31; \
- oris (reg),(reg),(expr)@h; \
+ oris (reg),(reg),(expr)@__AS_ATHIGH; \
ori (reg),(reg),(expr)@l;
#define LOAD_REG_ADDR(reg,name) \
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ccd7b6711196..0e87baf8fcc2 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -441,6 +441,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(0x1b4b, 0x9230),
.driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
+ .driver_data = board_ahci_yes_fbs },
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 00b81272e314..174b622dcaef 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -55,7 +55,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->ring = ring;
cq->is_tx = mode;
- spin_lock_init(&cq->lock);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 31b455a49273..467a51171d47 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -370,15 +370,11 @@ static void mlx4_en_netpoll(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_cq *cq;
- unsigned long flags;
int i;
for (i = 0; i < priv->rx_ring_num; i++) {
cq = &priv->rx_cq[i];
- spin_lock_irqsave(&cq->lock, flags);
- napi_synchronize(&cq->napi);
- mlx4_en_process_rx_cq(dev, cq, 0);
- spin_unlock_irqrestore(&cq->lock, flags);
+ napi_schedule(&cq->napi);
}
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d69fee41f24a..8df3c4be3ff1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -301,7 +301,6 @@ struct mlx4_en_cq {
struct mlx4_cq mcq;
struct mlx4_hwq_resources wqres;
int ring;
- spinlock_t lock;
struct net_device *dev;
struct napi_struct napi;
/* Per-core Tx cq processing support */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index e5f416f8042d..1a7955a39070 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1294,7 +1294,6 @@ struct megasas_instance {
u32 *reply_queue;
dma_addr_t reply_queue_h;
- unsigned long base_addr;
struct megasas_register_set __iomem *reg_set;
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 79261628d067..618870033dd0 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3445,6 +3445,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
u32 max_sectors_1;
u32 max_sectors_2;
u32 tmp_sectors, msix_enable;
+ resource_size_t base_addr;
struct megasas_register_set __iomem *reg_set;
struct megasas_ctrl_info *ctrl_info;
unsigned long bar_list;
@@ -3453,14 +3454,14 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
- instance->base_addr = pci_resource_start(instance->pdev, instance->bar);
if (pci_request_selected_regions(instance->pdev, instance->bar,
"megasas: LSI")) {
printk(KERN_DEBUG "megasas: IO memory region busy!\n");
return -EBUSY;
}
- instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
+ base_addr = pci_resource_start(instance->pdev, instance->bar);
+ instance->reg_set = ioremap_nocache(base_addr, 8192);
if (!instance->reg_set) {
printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index 826653fff70e..aafcf0330014 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -99,18 +99,23 @@ static ssize_t reset_store(struct device *dev,
return -ENOMEM;
/* Do not reset an active device! */
- if (bdev->bd_holders)
- return -EBUSY;
+ if (bdev->bd_holders) {
+ ret = -EBUSY;
+ goto out;
+ }
ret = kstrtou16(buf, 10, &do_reset);
if (ret)
- return ret;
+ goto out;
- if (!do_reset)
- return -EINVAL;
+ if (!do_reset) {
+ ret = -EINVAL;
+ goto out;
+ }
/* Make sure all pending I/O is finished */
fsync_bdev(bdev);
+ bdput(bdev);
down_write(&zram->init_lock);
if (zram->init_done)
@@ -118,6 +123,10 @@ static ssize_t reset_store(struct device *dev,
up_write(&zram->init_lock);
return len;
+
+out:
+ bdput(bdev);
+ return ret;
}
static ssize_t num_reads_show(struct device *dev,
@@ -188,8 +197,10 @@ static ssize_t mem_used_total_show(struct device *dev,
u64 val = 0;
struct zram *zram = dev_to_zram(dev);
+ down_read(&zram->init_lock);
if (zram->init_done)
val = zs_get_total_size_bytes(zram->mem_pool);
+ up_read(&zram->init_lock);
return sprintf(buf, "%llu\n", val);
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index af1de0f34eae..549071209258 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -868,6 +868,22 @@ static enum audit_state audit_filter_task(struct task_struct *tsk, char **key)
return AUDIT_BUILD_CONTEXT;
}
+static int audit_in_mask(const struct audit_krule *rule, unsigned long val)
+{
+ int word, bit;
+
+ if (val > 0xffffffff)
+ return false;
+
+ word = AUDIT_WORD(val);
+ if (word >= AUDIT_BITMASK_SIZE)
+ return false;
+
+ bit = AUDIT_BIT(val);
+
+ return rule->mask[word] & bit;
+}
+
/* At syscall entry and exit time, this filter is called if the
* audit_state is not low enough that auditing cannot take place, but is
* also not high enough that we already know we have to write an audit
@@ -885,11 +901,8 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
rcu_read_lock();
if (!list_empty(list)) {
- int word = AUDIT_WORD(ctx->major);
- int bit = AUDIT_BIT(ctx->major);
-
list_for_each_entry_rcu(e, list, list) {
- if ((e->rule.mask[word] & bit) == bit &&
+ if (audit_in_mask(&e->rule, ctx->major) &&
audit_filter_rules(tsk, &e->rule, ctx, NULL,
&state, false)) {
rcu_read_unlock();
@@ -909,20 +922,16 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
static int audit_filter_inode_name(struct task_struct *tsk,
struct audit_names *n,
struct audit_context *ctx) {
- int word, bit;
int h = audit_hash_ino((u32)n->ino);
struct list_head *list = &audit_inode_hash[h];
struct audit_entry *e;
enum audit_state state;
- word = AUDIT_WORD(ctx->major);
- bit = AUDIT_BIT(ctx->major);
-
if (list_empty(list))
return 0;
list_for_each_entry_rcu(e, list, list) {
- if ((e->rule.mask[word] & bit) == bit &&
+ if (audit_in_mask(&e->rule, ctx->major) &&
audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) {
ctx->current_state = state;
return 1;
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 9bb1b8a37a22..010288fdcc90 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -22,7 +22,6 @@
#endif
#include <net/netfilter/nf_conntrack_zones.h>
-/* Returns new sk_buff, or NULL */
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
{
int err;
@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
err = ip_defrag(skb, user);
local_bh_enable();
- if (!err)
+ if (!err) {
ip_send_check(ip_hdr(skb));
+ skb->local_df = 1;
+ }
return err;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,552 @@
diff --git a/Makefile b/Makefile
index e4ecdedbfe27..fdd7c32ea1f7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 96
+SUBLEVEL = 97
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 14ac52c5ae86..884de3433ad7 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -131,7 +131,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
board_bind_eic_interrupt = &msc_bind_eic_interrupt;
- for (; nirq >= 0; nirq--, imp++) {
+ for (; nirq > 0; nirq--, imp++) {
int n = imp->im_irq;
switch (imp->im_type) {
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index e7dba0b2a170..eb6b72faec0d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -496,7 +496,7 @@ void timer_interrupt(struct pt_regs * regs)
__get_cpu_var(irq_stat).timer_irqs++;
-#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
+#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 9a52349874ee..e3b28e34bd78 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1395,7 +1395,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
regs->gpr[rd] = byterev_4(val);
goto ldst_done;
-#ifdef CONFIG_PPC_CPU
+#ifdef CONFIG_PPC_FPU
case 535: /* lfsx */
case 567: /* lfsux */
if (!(regs->msr & MSR_FP))
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 8a811d98a795..9c9e24512f28 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -319,6 +319,7 @@ static int pseries_eeh_get_state(struct device_node *dn, int *state)
} else {
result = EEH_STATE_NOT_SUPPORT;
}
+ break;
default:
result = EEH_STATE_NOT_SUPPORT;
}
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 19f16ebaf4fa..0b60cd9dcf16 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -286,6 +286,22 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
#define ARCH_HAS_USER_SINGLE_STEP_INFO
+/*
+ * When hitting ptrace_stop(), we cannot return using SYSRET because
+ * that does not restore the full CPU state, only a minimal set. The
+ * ptracer can change arbitrary register values, which is usually okay
+ * because the usual ptrace stops run off the signal delivery path which
+ * forces IRET; however, ptrace_event() stops happen in arbitrary places
+ * in the kernel and don't force IRET path.
+ *
+ * So force IRET path after a ptrace stop.
+ */
+#define arch_ptrace_stop_needed(code, info) \
+({ \
+ set_thread_flag(TIF_NOTIFY_RESUME); \
+ false; \
+})
+
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index f0d588f8859e..1acb99100556 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -98,7 +98,7 @@ struct ib_umad_port {
struct ib_umad_device {
int start_port, end_port;
- struct kref ref;
+ struct kobject kobj;
struct ib_umad_port port[0];
};
@@ -134,14 +134,18 @@ static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
static void ib_umad_add_one(struct ib_device *device);
static void ib_umad_remove_one(struct ib_device *device);
-static void ib_umad_release_dev(struct kref *ref)
+static void ib_umad_release_dev(struct kobject *kobj)
{
struct ib_umad_device *dev =
- container_of(ref, struct ib_umad_device, ref);
+ container_of(kobj, struct ib_umad_device, kobj);
kfree(dev);
}
+static struct kobj_type ib_umad_dev_ktype = {
+ .release = ib_umad_release_dev,
+};
+
static int hdr_size(struct ib_umad_file *file)
{
return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
@@ -780,27 +784,19 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
{
struct ib_umad_port *port;
struct ib_umad_file *file;
- int ret;
+ int ret = -ENXIO;
port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
- if (port)
- kref_get(&port->umad_dev->ref);
- else
- return -ENXIO;
mutex_lock(&port->file_mutex);
- if (!port->ib_dev) {
- ret = -ENXIO;
+ if (!port->ib_dev)
goto out;
- }
+ ret = -ENOMEM;
file = kzalloc(sizeof *file, GFP_KERNEL);
- if (!file) {
- kref_put(&port->umad_dev->ref, ib_umad_release_dev);
- ret = -ENOMEM;
+ if (!file)
goto out;
- }
mutex_init(&file->mutex);
spin_lock_init(&file->send_lock);
@@ -814,6 +810,13 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
list_add_tail(&file->port_list, &port->file_list);
ret = nonseekable_open(inode, filp);
+ if (ret) {
+ list_del(&file->port_list);
+ kfree(file);
+ goto out;
+ }
+
+ kobject_get(&port->umad_dev->kobj);
out:
mutex_unlock(&port->file_mutex);
@@ -852,7 +855,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
mutex_unlock(&file->port->file_mutex);
kfree(file);
- kref_put(&dev->ref, ib_umad_release_dev);
+ kobject_put(&dev->kobj);
return 0;
}
@@ -880,10 +883,6 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
int ret;
port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
- if (port)
- kref_get(&port->umad_dev->ref);
- else
- return -ENXIO;
if (filp->f_flags & O_NONBLOCK) {
if (down_trylock(&port->sm_sem)) {
@@ -898,17 +897,27 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
}
ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
- if (ret) {
- up(&port->sm_sem);
- goto fail;
- }
+ if (ret)
+ goto err_up_sem;
filp->private_data = port;
- return nonseekable_open(inode, filp);
+ ret = nonseekable_open(inode, filp);
+ if (ret)
+ goto err_clr_sm_cap;
+
+ kobject_get(&port->umad_dev->kobj);
+
+ return 0;
+
+err_clr_sm_cap:
+ swap(props.set_port_cap_mask, props.clr_port_cap_mask);
+ ib_modify_port(port->ib_dev, port->port_num, 0, &props);
+
+err_up_sem:
+ up(&port->sm_sem);
fail:
- kref_put(&port->umad_dev->ref, ib_umad_release_dev);
return ret;
}
@@ -927,7 +936,7 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
up(&port->sm_sem);
- kref_put(&port->umad_dev->ref, ib_umad_release_dev);
+ kobject_put(&port->umad_dev->kobj);
return ret;
}
@@ -995,6 +1004,7 @@ static int find_overflow_devnum(void)
}
static int ib_umad_init_port(struct ib_device *device, int port_num,
+ struct ib_umad_device *umad_dev,
struct ib_umad_port *port)
{
int devnum;
@@ -1027,6 +1037,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
cdev_init(&port->cdev, &umad_fops);
port->cdev.owner = THIS_MODULE;
+ port->cdev.kobj.parent = &umad_dev->kobj;
kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
if (cdev_add(&port->cdev, base, 1))
goto err_cdev;
@@ -1045,6 +1056,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
base += IB_UMAD_MAX_PORTS;
cdev_init(&port->sm_cdev, &umad_sm_fops);
port->sm_cdev.owner = THIS_MODULE;
+ port->sm_cdev.kobj.parent = &umad_dev->kobj;
kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
if (cdev_add(&port->sm_cdev, base, 1))
goto err_sm_cdev;
@@ -1138,7 +1150,7 @@ static void ib_umad_add_one(struct ib_device *device)
if (!umad_dev)
return;
- kref_init(&umad_dev->ref);
+ kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
umad_dev->start_port = s;
umad_dev->end_port = e;
@@ -1146,7 +1158,8 @@ static void ib_umad_add_one(struct ib_device *device)
for (i = s; i <= e; ++i) {
umad_dev->port[i - s].umad_dev = umad_dev;
- if (ib_umad_init_port(device, i, &umad_dev->port[i - s]))
+ if (ib_umad_init_port(device, i, umad_dev,
+ &umad_dev->port[i - s]))
goto err;
}
@@ -1158,7 +1171,7 @@ err:
while (--i >= s)
ib_umad_kill_port(&umad_dev->port[i - s]);
- kref_put(&umad_dev->ref, ib_umad_release_dev);
+ kobject_put(&umad_dev->kobj);
}
static void ib_umad_remove_one(struct ib_device *device)
@@ -1172,7 +1185,7 @@ static void ib_umad_remove_one(struct ib_device *device)
for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
ib_umad_kill_port(&umad_dev->port[i]);
- kref_put(&umad_dev->ref, ib_umad_release_dev);
+ kobject_put(&umad_dev->kobj);
}
static char *umad_devnode(struct device *dev, umode_t *mode)
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index e2f9a51f4a38..45802e97332e 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -346,6 +346,10 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
ret = -EFAULT;
goto bail;
}
+ dp.len = odp.len;
+ dp.unit = odp.unit;
+ dp.data = odp.data;
+ dp.pbc_wd = 0;
} else {
ret = -EINVAL;
goto bail;
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index c4ff788823b5..14f39298cb97 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -1005,7 +1005,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
event.event = IB_EVENT_PKEY_CHANGE;
event.device = &dd->verbs_dev.ibdev;
- event.element.port_num = 1;
+ event.element.port_num = port;
ib_dispatch_event(&event);
}
return 0;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 922d845f76b0..7fa948d7a867 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1371,6 +1371,12 @@ err_unmap:
err_iu:
srp_put_tx_iu(target, iu, SRP_IU_CMD);
+ /*
+ * Avoid that the loops that iterate over the request ring can
+ * encounter a dangling SCSI command pointer.
+ */
+ req->scmnd = NULL;
+
spin_lock_irqsave(&target->lock, flags);
list_add(&req->list, &target->free_reqs);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 9bdc3b8597a4..d93ea6417d15 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -472,8 +472,15 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse,
input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
- input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
- input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+
+ /* For clickpads map both buttons to BTN_LEFT */
+ if (etd->fw_version & 0x001000) {
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
+ } else {
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ }
+
input_report_abs(dev, ABS_PRESSURE, pres);
input_report_abs(dev, ABS_TOOL_WIDTH, width);
@@ -483,10 +490,17 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse,
static void elantech_input_sync_v4(struct psmouse *psmouse)
{
struct input_dev *dev = psmouse->dev;
+ struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
- input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
- input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ /* For clickpads map both buttons to BTN_LEFT */
+ if (etd->fw_version & 0x001000) {
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
+ } else {
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ }
+
input_mt_report_pointer_emulation(dev, true);
input_sync(dev);
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a7f6dcea0d76..4cdd2bc21403 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3610,7 +3610,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
u16 cmd;
int rc;
- WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
+ WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
/* ARCH specific VGA enables */
rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 103c95e300fd..61bc33ed1116 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2921,6 +2921,7 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index bbb170e50055..a3b97e0c98df 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -62,7 +62,6 @@
* @adev: amba device structure of wdt
* @status: current status of wdt
* @load_val: load value to be set for current timeout
- * @timeout: current programmed timeout
*/
struct sp805_wdt {
spinlock_t lock;
@@ -73,7 +72,6 @@ struct sp805_wdt {
#define WDT_BUSY 0
#define WDT_CAN_BE_CLOSED 1
unsigned int load_val;
- unsigned int timeout;
};
/* local variables */
@@ -101,7 +99,7 @@ static void wdt_setload(unsigned int timeout)
spin_lock(&wdt->lock);
wdt->load_val = load;
/* roundup timeout to closest positive integer value */
- wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
+ wdd->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
spin_unlock(&wdt->lock);
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 28e5648c9cc4..a4b87c69fbbb 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3485,7 +3485,7 @@ nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
* correspondance, and we have to delete the lockowner when we
* delete the lock stateid:
*/
- unhash_lockowner(lo);
+ release_lockowner(lo);
return nfs_ok;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 6eaa2e2335dc..ba7bf4a11855 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2032,8 +2032,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
err = vfs_getattr(exp->ex_path.mnt, dentry, &stat);
if (err)
goto out_nfserr;
- if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL |
- FATTR4_WORD0_MAXNAME)) ||
+ if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
+ FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
(bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
err = vfs_statfs(&path, &statfs);
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index 9e1d05666fed..e0a7a764a903 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -128,7 +128,6 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
freed = ubifs_destroy_tnc_subtree(znode);
atomic_long_sub(freed, &ubifs_clean_zn_cnt);
atomic_long_sub(freed, &c->clean_zn_cnt);
- ubifs_assert(atomic_long_read(&c->clean_zn_cnt) >= 0);
total_freed += freed;
znode = zprev;
}
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 5c719627c2aa..ed12724901bc 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -379,6 +379,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
* calling arch_ptrace_stop() when it would be superfluous. For example,
* if the thread has not been back to user mode since the last stop, the
* thread state might indicate that nothing needs to be done.
+ *
+ * This is guaranteed to be invoked once before a task stops for ptrace and
+ * may include arch-specific operations necessary prior to a ptrace stop.
*/
#define arch_ptrace_stop_needed(code, info) (0)
#endif
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 31966a4fb8cc..51b72d8a8498 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -4,6 +4,7 @@
#include <linux/tracepoint.h>
#include <linux/unistd.h>
#include <linux/ftrace_event.h>
+#include <linux/thread_info.h>
#include <asm/ptrace.h>
@@ -54,4 +55,18 @@ int perf_sysexit_enable(struct ftrace_event_call *call);
void perf_sysexit_disable(struct ftrace_event_call *call);
#endif
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
+static inline void syscall_tracepoint_update(struct task_struct *p)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+ else
+ clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+}
+#else
+static inline void syscall_tracepoint_update(struct task_struct *p)
+{
+}
+#endif
+
#endif /* _TRACE_SYSCALL_H */
diff --git a/kernel/fork.c b/kernel/fork.c
index afac42b8889c..621c547dabb1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1441,7 +1441,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
total_forks++;
spin_unlock(&current->sighand->siglock);
+ syscall_tracepoint_update(p);
write_unlock_irq(&tasklist_lock);
+
proc_fork_connector(p);
cgroup_post_fork(p);
if (clone_flags & CLONE_THREAD)
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index 54e35c1e5948..5e29610303b0 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -163,11 +163,11 @@ static int mcount_adjust = 0;
static int MIPS_is_fake_mcount(Elf_Rel const *rp)
{
- static Elf_Addr old_r_offset;
+ static Elf_Addr old_r_offset = ~(Elf_Addr)0;
Elf_Addr current_r_offset = _w(rp->r_offset);
int is_fake;
- is_fake = old_r_offset &&
+ is_fake = (old_r_offset != ~(Elf_Addr)0) &&
(current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET);
old_r_offset = current_r_offset;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,797 @@
diff --git a/Makefile b/Makefile
index d277446ee8ee..ed97caf40f71 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 98
+SUBLEVEL = 99
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 02aee03e713c..1eef567f177a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -472,7 +472,22 @@ static void power_pmu_read(struct perf_event *event)
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
local64_add(delta, &event->count);
- local64_sub(delta, &event->hw.period_left);
+
+ /*
+ * A number of places program the PMC with (0x80000000 - period_left).
+ * We never want period_left to be less than 1 because we will program
+ * the PMC with a value >= 0x800000000 and an edge detected PMC will
+ * roll around to 0 before taking an exception. We have seen this
+ * on POWER8.
+ *
+ * To fix this, clamp the minimum value of period_left to 1.
+ */
+ do {
+ prev = local64_read(&event->hw.period_left);
+ val = prev - delta;
+ if (val < 1)
+ val = 1;
+ } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
}
/*
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index be1ef574ce9a..dec49d34c048 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -50,6 +50,21 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
return err;
}
+static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg)
+{
+ unsigned long i;
+
+ for (i = 0; i < nr_pages; ++i)
+ if (pfn_valid(start_pfn + i) &&
+ !PageReserved(pfn_to_page(start_pfn + i)))
+ return 1;
+
+ WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
+
+ return 0;
+}
+
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
@@ -93,14 +108,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
+ pfn = phys_addr >> PAGE_SHIFT;
last_pfn = last_addr >> PAGE_SHIFT;
- for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
- int is_ram = page_is_ram(pfn);
-
- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
- return NULL;
- WARN_ON_ONCE(is_ram);
- }
+ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+ __ioremap_check_ram) == 1)
+ return NULL;
/*
* Mappings have to be page-aligned
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 9bdfcf50a190..93f3034ea85d 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -34,6 +34,7 @@
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <linux/delay.h>
#include <asm/unaligned.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
@@ -1055,6 +1056,28 @@ static int battery_notify(struct notifier_block *nb,
return 0;
}
+/*
+ * Some machines'(E,G Lenovo Z480) ECs are not stable
+ * during boot up and this causes battery driver fails to be
+ * probed due to failure of getting battery information
+ * from EC sometimes. After several retries, the operation
+ * may work. So add retry code here and 20ms sleep between
+ * every retries.
+ */
+static int acpi_battery_update_retry(struct acpi_battery *battery)
+{
+ int retry, ret;
+
+ for (retry = 5; retry; retry--) {
+ ret = acpi_battery_update(battery);
+ if (!ret)
+ break;
+
+ msleep(20);
+ }
+ return ret;
+}
+
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
@@ -1074,9 +1097,11 @@ static int acpi_battery_add(struct acpi_device *device)
if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle,
"_BIX", &handle)))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
- result = acpi_battery_update(battery);
+
+ result = acpi_battery_update_retry(battery);
if (result)
goto fail;
+
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
#endif
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 52480010156d..739eb0d0d12d 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -539,8 +539,10 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
return -EINVAL;
}
addr = addr & 0xFFFFFFFFFFFFF000ULL;
- addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
- addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+ if (addr != rdev->dummy_page.addr)
+ addr |= R600_PTE_VALID | R600_PTE_READABLE |
+ R600_PTE_WRITEABLE;
+ addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
writeq(addr, ptr + (i * 8));
return 0;
}
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 80cc465d8ac7..f078baef90fe 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -231,6 +231,9 @@ static ssize_t set_fan_div(struct device *dev,
/* Update the value */
reg = (reg & 0x3F) | (val << 6);
+ /* Update the cache */
+ data->fan_div[attr->index] = reg;
+
/* Write value */
i2c_smbus_write_byte_data(client,
ADM1029_REG_FAN_DIV[attr->index], reg);
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index f600fa1f92e3..1d2bace45287 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -715,7 +715,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
get_temp_alarm, NULL, IDX_TEMP1_MAX);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
get_temp_alarm, NULL, IDX_TEMP1_CRIT);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
get_temp, NULL, IDX_TEMP2_INPUT);
static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp,
set_temp, IDX_TEMP2_MIN);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 3bd9bd49ed9a..ac339570a805 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -159,6 +159,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ed1650fb910d..89b5664aa53b 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -731,7 +731,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
- { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) },
+ { USB_DEVICE(TESTO_VID, TESTO_1_PID) },
+ { USB_DEVICE(TESTO_VID, TESTO_3_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 500474c48f4b..106cc16cc6ed 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -798,7 +798,8 @@
* Submitted by Colin Leroy
*/
#define TESTO_VID 0x128D
-#define TESTO_USB_INTERFACE_PID 0x0001
+#define TESTO_1_PID 0x0001
+#define TESTO_3_PID 0x0003
/*
* Mobility Electronics products.
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 58fcaa5ae548..e1e05bad2be0 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1501,6 +1501,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 0b0c03c878e5..f0e4e46867f7 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2589,10 +2589,11 @@ static void print_daily_error_info(unsigned long arg)
es = sbi->s_es;
if (es->s_error_count)
- ext4_msg(sb, KERN_NOTICE, "error count: %u",
+ /* fsck newer than v1.41.13 is needed to clean this condition. */
+ ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
le32_to_cpu(es->s_error_count));
if (es->s_first_error_time) {
- printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d",
+ printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
sb->s_id, le32_to_cpu(es->s_first_error_time),
(int) sizeof(es->s_first_error_func),
es->s_first_error_func,
@@ -2606,7 +2607,7 @@ static void print_daily_error_info(unsigned long arg)
printk("\n");
}
if (es->s_last_error_time) {
- printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d",
+ printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
sb->s_id, le32_to_cpu(es->s_last_error_time),
(int) sizeof(es->s_last_error_func),
es->s_last_error_func,
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4b843ace285c..9cb82b9aeb73 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1152,7 +1152,13 @@ done:
int current_cpuset_is_being_rebound(void)
{
- return task_cs(current) == cpuset_being_rebound;
+ int ret;
+
+ rcu_read_lock();
+ ret = task_cs(current) == cpuset_being_rebound;
+ rcu_read_unlock();
+
+ return ret;
}
static int update_relax_domain_level(struct cpuset *cs, s64 val)
diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h
index 14193d596d78..ab29b6a22669 100644
--- a/kernel/rtmutex-debug.h
+++ b/kernel/rtmutex-debug.h
@@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
{
return (waiter != NULL);
}
+
+static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+{
+ debug_rt_mutex_print_deadlock(w);
+}
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index a242e691c993..375ae48479fa 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -81,6 +81,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
owner = *p;
} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
}
+
+/*
+ * Safe fastpath aware unlock:
+ * 1) Clear the waiters bit
+ * 2) Drop lock->wait_lock
+ * 3) Try to unlock the lock with cmpxchg
+ */
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+ __releases(lock->wait_lock)
+{
+ struct task_struct *owner = rt_mutex_owner(lock);
+
+ clear_rt_mutex_waiters(lock);
+ raw_spin_unlock(&lock->wait_lock);
+ /*
+ * If a new waiter comes in between the unlock and the cmpxchg
+ * we have two situations:
+ *
+ * unlock(wait_lock);
+ * lock(wait_lock);
+ * cmpxchg(p, owner, 0) == owner
+ * mark_rt_mutex_waiters(lock);
+ * acquire(lock);
+ * or:
+ *
+ * unlock(wait_lock);
+ * lock(wait_lock);
+ * mark_rt_mutex_waiters(lock);
+ *
+ * cmpxchg(p, owner, 0) != owner
+ * enqueue_waiter();
+ * unlock(wait_lock);
+ * lock(wait_lock);
+ * wake waiter();
+ * unlock(wait_lock);
+ * lock(wait_lock);
+ * acquire(lock);
+ */
+ return rt_mutex_cmpxchg(lock, owner, NULL);
+}
+
#else
# define rt_mutex_cmpxchg(l,c,n) (0)
static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
@@ -88,6 +129,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
lock->owner = (struct task_struct *)
((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
}
+
+/*
+ * Simple slow path only version: lock->owner is protected by lock->wait_lock.
+ */
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+ __releases(lock->wait_lock)
+{
+ lock->owner = NULL;
+ raw_spin_unlock(&lock->wait_lock);
+ return true;
+}
#endif
/*
@@ -141,6 +193,11 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
*/
int max_lock_depth = 1024;
+static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
+{
+ return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
+}
+
/*
* Adjust the priority chain. Also used for deadlock detection.
* Decreases task's usage by one - may thus free the task.
@@ -149,6 +206,7 @@ int max_lock_depth = 1024;
static int rt_mutex_adjust_prio_chain(struct task_struct *task,
int deadlock_detect,
struct rt_mutex *orig_lock,
+ struct rt_mutex *next_lock,
struct rt_mutex_waiter *orig_waiter,
struct task_struct *top_task)
{
@@ -182,7 +240,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
}
put_task_struct(task);
- return deadlock_detect ? -EDEADLK : 0;
+ return -EDEADLK;
}
retry:
/*
@@ -207,13 +265,32 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
goto out_unlock_pi;
/*
+ * We dropped all locks after taking a refcount on @task, so
+ * the task might have moved on in the lock chain or even left
+ * the chain completely and blocks now on an unrelated lock or
+ * on @orig_lock.
+ *
+ * We stored the lock on which @task was blocked in @next_lock,
+ * so we can detect the chain change.
+ */
+ if (next_lock != waiter->lock)
+ goto out_unlock_pi;
+
+ /*
* Drop out, when the task has no waiters. Note,
* top_waiter can be NULL, when we are in the deboosting
* mode!
*/
- if (top_waiter && (!task_has_pi_waiters(task) ||
- top_waiter != task_top_pi_waiter(task)))
- goto out_unlock_pi;
+ if (top_waiter) {
+ if (!task_has_pi_waiters(task))
+ goto out_unlock_pi;
+ /*
+ * If deadlock detection is off, we stop here if we
+ * are not the top pi waiter of the task.
+ */
+ if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
+ goto out_unlock_pi;
+ }
/*
* When deadlock detection is off then we check, if further
@@ -229,11 +306,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
goto retry;
}
- /* Deadlock detection */
+ /*
+ * Deadlock detection. If the lock is the same as the original
+ * lock which caused us to walk the lock chain or if the
+ * current lock is owned by the task which initiated the chain
+ * walk, we detected a deadlock.
+ */
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
raw_spin_unlock(&lock->wait_lock);
- ret = deadlock_detect ? -EDEADLK : 0;
+ ret = -EDEADLK;
goto out_unlock_pi;
}
@@ -280,11 +362,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
__rt_mutex_adjust_prio(task);
}
+ /*
+ * Check whether the task which owns the current lock is pi
+ * blocked itself. If yes we store a pointer to the lock for
+ * the lock chain change detection above. After we dropped
+ * task->pi_lock next_lock cannot be dereferenced anymore.
+ */
+ next_lock = task_blocked_on_lock(task);
+
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
top_waiter = rt_mutex_top_waiter(lock);
raw_spin_unlock(&lock->wait_lock);
+ /*
+ * We reached the end of the lock chain. Stop right here. No
+ * point to go back just to figure that out.
+ */
+ if (!next_lock)
+ goto out_put_task;
+
if (!detect_deadlock && waiter != top_waiter)
goto out_put_task;
@@ -395,8 +492,21 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
{
struct task_struct *owner = rt_mutex_owner(lock);
struct rt_mutex_waiter *top_waiter = waiter;
- unsigned long flags;
+ struct rt_mutex *next_lock;
int chain_walk = 0, res;
+ unsigned long flags;
+
+ /*
+ * Early deadlock detection. We really don't want the task to
+ * enqueue on itself just to untangle the mess later. It's not
+ * only an optimization. We drop the locks, so another waiter
+ * can come in before the chain walk detects the deadlock. So
+ * the other will detect the deadlock and return -EDEADLOCK,
+ * which is wrong, as the other waiter is not in a deadlock
+ * situation.
+ */
+ if (owner == task)
+ return -EDEADLK;
raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
@@ -417,20 +527,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
if (!owner)
return 0;
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
- raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
chain_walk = 1;
- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
- }
- else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
+ } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
chain_walk = 1;
+ }
+
+ /* Store the lock on which owner is blocked or NULL */
+ next_lock = task_blocked_on_lock(owner);
- if (!chain_walk)
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+ /*
+ * Even if full deadlock detection is on, if the owner is not
+ * blocked itself, we can avoid finding this out in the chain
+ * walk.
+ */
+ if (!chain_walk || !next_lock)
return 0;
/*
@@ -442,8 +560,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
raw_spin_unlock(&lock->wait_lock);
- res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
- task);
+ res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
+ next_lock, waiter, task);
raw_spin_lock(&lock->wait_lock);
@@ -453,7 +571,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
/*
* Wake up the next waiter on the lock.
*
- * Remove the top waiter from the current tasks waiter list and wake it up.
+ * Remove the top waiter from the current tasks pi waiter list and
+ * wake it up.
*
* Called with lock->wait_lock held.
*/
@@ -474,10 +593,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
*/
plist_del(&waiter->pi_list_entry, &current->pi_waiters);
- rt_mutex_set_owner(lock, NULL);
+ /*
+ * As we are waking up the top waiter, and the waiter stays
+ * queued on the lock until it gets the lock, this lock
+ * obviously has waiters. Just set the bit here and this has
+ * the added benefit of forcing all new tasks into the
+ * slow path making sure no task of lower priority than
+ * the top waiter can steal this lock.
+ */
+ lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+ /*
+ * It's safe to dereference waiter as it cannot go away as
+ * long as we hold lock->wait_lock. The waiter task needs to
+ * acquire it in order to dequeue the waiter.
+ */
wake_up_process(waiter->task);
}
@@ -492,8 +624,8 @@ static void remove_waiter(struct rt_mutex *lock,
{
int first = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
+ struct rt_mutex *next_lock = NULL;
unsigned long flags;
- int chain_walk = 0;
raw_spin_lock_irqsave(&current->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
@@ -517,15 +649,15 @@ static void remove_waiter(struct rt_mutex *lock,
}
__rt_mutex_adjust_prio(owner);
- if (owner->pi_blocked_on)
- chain_walk = 1;
+ /* Store the lock on which owner is blocked or NULL */
+ next_lock = task_blocked_on_lock(owner);
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
- if (!chain_walk)
+ if (!next_lock)
return;
/* gets dropped in rt_mutex_adjust_prio_chain()! */
@@ -533,7 +665,7 @@ static void remove_waiter(struct rt_mutex *lock,
raw_spin_unlock(&lock->wait_lock);
- rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
+ rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
raw_spin_lock(&lock->wait_lock);
}
@@ -546,6 +678,7 @@ static void remove_waiter(struct rt_mutex *lock,
void rt_mutex_adjust_pi(struct task_struct *task)
{
struct rt_mutex_waiter *waiter;
+ struct rt_mutex *next_lock;
unsigned long flags;
raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -555,12 +688,13 @@ void rt_mutex_adjust_pi(struct task_struct *task)
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
-
+ next_lock = waiter->lock;
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
- rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
+
+ rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
}
/**
@@ -612,6 +746,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
return ret;
}
+static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ struct rt_mutex_waiter *w)
+{
+ /*
+ * If the result is not -EDEADLOCK or the caller requested
+ * deadlock detection, nothing to do here.
+ */
+ if (res != -EDEADLOCK || detect_deadlock)
+ return;
+
+ /*
+ * Yell lowdly and stop the task right here.
+ */
+ rt_mutex_print_deadlock(w);
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+}
+
/*
* Slow path lock function:
*/
@@ -649,8 +803,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
set_current_state(TASK_RUNNING);
- if (unlikely(ret))
+ if (unlikely(ret)) {
remove_waiter(lock, &waiter);
+ rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
+ }
/*
* try_to_take_rt_mutex() sets the waiter bit
@@ -706,12 +862,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
rt_mutex_deadlock_account_unlock(current);
- if (!rt_mutex_has_waiters(lock)) {
- lock->owner = NULL;
- raw_spin_unlock(&lock->wait_lock);
- return;
+ /*
+ * We must be careful here if the fast path is enabled. If we
+ * have no waiters queued we cannot set owner to NULL here
+ * because of:
+ *
+ * foo->lock->owner = NULL;
+ * rtmutex_lock(foo->lock); <- fast path
+ * free = atomic_dec_and_test(foo->refcnt);
+ * rtmutex_unlock(foo->lock); <- fast path
+ * if (free)
+ * kfree(foo);
+ * raw_spin_unlock(foo->lock->wait_lock);
+ *
+ * So for the fastpath enabled kernel:
+ *
+ * Nothing can set the waiters bit as long as we hold
+ * lock->wait_lock. So we do the following sequence:
+ *
+ * owner = rt_mutex_owner(lock);
+ * clear_rt_mutex_waiters(lock);
+ * raw_spin_unlock(&lock->wait_lock);
+ * if (cmpxchg(&lock->owner, owner, 0) == owner)
+ * return;
+ * goto retry;
+ *
+ * The fastpath disabled variant is simple as all access to
+ * lock->owner is serialized by lock->wait_lock:
+ *
+ * lock->owner = NULL;
+ * raw_spin_unlock(&lock->wait_lock);
+ */
+ while (!rt_mutex_has_waiters(lock)) {
+ /* Drops lock->wait_lock ! */
+ if (unlock_rt_mutex_safe(lock) == true)
+ return;
+ /* Relock the rtmutex and try again */
+ raw_spin_lock(&lock->wait_lock);
}
+ /*
+ * The wakeup next waiter path does not suffer from the above
+ * race. See the comments there.
+ */
wakeup_next_waiter(lock);
raw_spin_unlock(&lock->wait_lock);
@@ -958,7 +1151,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
return 1;
}
- ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
+ /* We enforce deadlock detection for futexes */
+ ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
if (ret && !rt_mutex_owner(lock)) {
/*
diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h
index a1a1dd06421d..f6a1f3c133b1 100644
--- a/kernel/rtmutex.h
+++ b/kernel/rtmutex.h
@@ -24,3 +24,8 @@
#define debug_rt_mutex_print_deadlock(w) do { } while (0)
#define debug_rt_mutex_detect_deadlock(w,d) (d)
#define debug_rt_mutex_reset_waiter(w) do { } while (0)
+
+static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+{
+ WARN(1, "rtmutex deadlock detected\n");
+}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a494ec317e0a..8897a6f5c408 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1052,7 +1052,6 @@ void tracing_start(void)
arch_spin_unlock(&ftrace_max_lock);
- ftrace_start();
out:
raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
}
@@ -1068,7 +1067,6 @@ void tracing_stop(void)
struct ring_buffer *buffer;
unsigned long flags;
- ftrace_stop();
raw_spin_lock_irqsave(&tracing_start_lock, flags);
if (trace_stop_count++)
goto out;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ee50c256fdc3..5cec36b6e838 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1991,7 +1991,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
} else
*new = *old;
- rcu_read_lock();
if (current_cpuset_is_being_rebound()) {
nodemask_t mems = cpuset_mems_allowed(current);
if (new->flags & MPOL_F_REBINDING)
@@ -1999,7 +1998,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
else
mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
}
- rcu_read_unlock();
atomic_set(&new->refcnt, 1);
return new;
}

View file

@ -0,0 +1,929 @@
diff --git a/Makefile b/Makefile
index ed97caf40f71..d6c64eb82525 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 99
+SUBLEVEL = 100
EXTRAVERSION =
NAME = Saber-toothed Squirrel
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 268b2455e7b0..b1cbcff69cdb 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1070,6 +1070,15 @@ again:
intel_pmu_lbr_read();
/*
+ * CondChgd bit 63 doesn't mean any overflow status. Ignore
+ * and clear the bit.
+ */
+ if (__test_and_clear_bit(63, (unsigned long *)&status)) {
+ if (!status)
+ goto done;
+ }
+
+ /*
* PEBS overflow sets bit 62 in the global status register
*/
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 36e5a8ee0e1e..1ae2e0ea5492 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -14558,38 +14558,40 @@ static struct pcomp_testvec zlib_decomp_tv_template[] = {
static struct comp_testvec lzo_comp_tv_template[] = {
{
.inlen = 70,
- .outlen = 46,
+ .outlen = 57,
.input = "Join us now and share the software "
"Join us now and share the software ",
.output = "\x00\x0d\x4a\x6f\x69\x6e\x20\x75"
- "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
- "\x64\x20\x73\x68\x61\x72\x65\x20"
- "\x74\x68\x65\x20\x73\x6f\x66\x74"
- "\x77\x70\x01\x01\x4a\x6f\x69\x6e"
- "\x3d\x88\x00\x11\x00\x00",
+ "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
+ "\x64\x20\x73\x68\x61\x72\x65\x20"
+ "\x74\x68\x65\x20\x73\x6f\x66\x74"
+ "\x77\x70\x01\x32\x88\x00\x0c\x65"
+ "\x20\x74\x68\x65\x20\x73\x6f\x66"
+ "\x74\x77\x61\x72\x65\x20\x11\x00"
+ "\x00",
}, {
.inlen = 159,
- .outlen = 133,
+ .outlen = 131,
.input = "This document describes a compression method based on the LZO "
"compression algorithm. This document defines the application of "
"the LZO algorithm used in UBIFS.",
- .output = "\x00\x2b\x54\x68\x69\x73\x20\x64"
+ .output = "\x00\x2c\x54\x68\x69\x73\x20\x64"
"\x6f\x63\x75\x6d\x65\x6e\x74\x20"
"\x64\x65\x73\x63\x72\x69\x62\x65"
"\x73\x20\x61\x20\x63\x6f\x6d\x70"
"\x72\x65\x73\x73\x69\x6f\x6e\x20"
"\x6d\x65\x74\x68\x6f\x64\x20\x62"
"\x61\x73\x65\x64\x20\x6f\x6e\x20"
- "\x74\x68\x65\x20\x4c\x5a\x4f\x2b"
- "\x8c\x00\x0d\x61\x6c\x67\x6f\x72"
- "\x69\x74\x68\x6d\x2e\x20\x20\x54"
- "\x68\x69\x73\x2a\x54\x01\x02\x66"
- "\x69\x6e\x65\x73\x94\x06\x05\x61"
- "\x70\x70\x6c\x69\x63\x61\x74\x76"
- "\x0a\x6f\x66\x88\x02\x60\x09\x27"
- "\xf0\x00\x0c\x20\x75\x73\x65\x64"
- "\x20\x69\x6e\x20\x55\x42\x49\x46"
- "\x53\x2e\x11\x00\x00",
+ "\x74\x68\x65\x20\x4c\x5a\x4f\x20"
+ "\x2a\x8c\x00\x09\x61\x6c\x67\x6f"
+ "\x72\x69\x74\x68\x6d\x2e\x20\x20"
+ "\x2e\x54\x01\x03\x66\x69\x6e\x65"
+ "\x73\x20\x74\x06\x05\x61\x70\x70"
+ "\x6c\x69\x63\x61\x74\x76\x0a\x6f"
+ "\x66\x88\x02\x60\x09\x27\xf0\x00"
+ "\x0c\x20\x75\x73\x65\x64\x20\x69"
+ "\x6e\x20\x55\x42\x49\x46\x53\x2e"
+ "\x11\x00\x00",
},
};
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 60404f4b2446..adc9bfd4d82f 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -709,6 +709,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
struct radeon_device *rdev = dev->dev_private;
int ret = 0;
+ /* don't leak the edid if we already fetched it in detect() */
+ if (radeon_connector->edid)
+ goto got_edid;
+
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
@@ -748,6 +752,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
}
if (radeon_connector->edid) {
+got_edid:
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 97b2e21ac46a..cf065df9bb18 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -582,7 +582,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
int map_size;
- u32 ver;
+ u32 ver, sts;
static int iommu_allocated = 0;
int agaw = 0;
int msagaw = 0;
@@ -652,6 +652,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
(unsigned long long)iommu->cap,
(unsigned long long)iommu->ecap);
+ /* Reflect status in gcmd */
+ sts = readl(iommu->reg + DMAR_GSTS_REG);
+ if (sts & DMA_GSTS_IRES)
+ iommu->gcmd |= DMA_GCMD_IRE;
+ if (sts & DMA_GSTS_TES)
+ iommu->gcmd |= DMA_GCMD_TE;
+ if (sts & DMA_GSTS_QIES)
+ iommu->gcmd |= DMA_GCMD_QIE;
+
raw_spin_lock_init(&iommu->register_lock);
drhd->iommu = iommu;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 4e1c6bfc9c8d..dd255c578ad9 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3659,6 +3659,7 @@ static struct notifier_block device_nb = {
int __init intel_iommu_init(void)
{
int ret = 0;
+ struct dmar_drhd_unit *drhd;
/* VT-d is required for a TXT/tboot launch, so enforce that */
force_on = tboot_force_iommu();
@@ -3669,6 +3670,20 @@ int __init intel_iommu_init(void)
return -ENODEV;
}
+ /*
+ * Disable translation if already enabled prior to OS handover.
+ */
+ for_each_drhd_unit(drhd) {
+ struct intel_iommu *iommu;
+
+ if (drhd->ignored)
+ continue;
+
+ iommu = drhd->iommu;
+ if (iommu->gcmd & DMA_GCMD_TE)
+ iommu_disable_translation(iommu);
+ }
+
if (dmar_dev_scope_init() < 0) {
if (force_on)
panic("tboot: Failed to initialize DMAR device scope\n");
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ef1f9400b967..b2740f12b180 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2411,7 +2411,7 @@ static int be_open(struct net_device *netdev)
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
- be_eq_notify(adapter, eqo->q.id, true, false, 0);
+ be_eq_notify(adapter, eqo->q.id, true, true, 0);
}
status = be_cmd_link_status_query(adapter, NULL, NULL,
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 8e2ac643a777..ed6ec513defa 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1086,6 +1086,24 @@ static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
return vp;
}
+static void vnet_cleanup(void)
+{
+ struct vnet *vp;
+ struct net_device *dev;
+
+ mutex_lock(&vnet_list_mutex);
+ while (!list_empty(&vnet_list)) {
+ vp = list_first_entry(&vnet_list, struct vnet, list);
+ list_del(&vp->list);
+ dev = vp->dev;
+ /* vio_unregister_driver() should have cleaned up port_list */
+ BUG_ON(!list_empty(&vp->port_list));
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ mutex_unlock(&vnet_list_mutex);
+}
+
static const char *local_mac_prop = "local-mac-address";
static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
@@ -1244,7 +1262,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
kfree(port);
- unregister_netdev(vp->dev);
}
return 0;
}
@@ -1272,6 +1289,7 @@ static int __init vnet_init(void)
static void __exit vnet_exit(void)
{
vio_unregister_driver(&vnet_port_driver);
+ vnet_cleanup();
}
module_init(vnet_init);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index bac88c22d990..fbe75a784edb 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -681,7 +681,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
dev->hard_header_len);
- po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
+ po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
po->chan.ops = &pppoe_chan_ops;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9d1b3ca6334b..a884c322f3ea 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -457,6 +457,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
tx_info = MWIFIEX_SKB_TXCB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
mwifiex_fill_buffer(skb);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 19db29f67558..f27d0c8cd9e8 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -185,6 +185,7 @@ void thaw_processes(void)
printk("Restarting tasks ... ");
+ __usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
read_lock(&tasklist_lock);
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 877aa733b961..b7045793bd56 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -569,9 +569,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
struct itimerspec *new_setting,
struct itimerspec *old_setting)
{
+ ktime_t exp;
+
if (!rtcdev)
return -ENOTSUPP;
+ if (flags & ~TIMER_ABSTIME)
+ return -EINVAL;
+
if (old_setting)
alarm_timer_get(timr, old_setting);
@@ -581,8 +586,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
/* start the timer */
timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
- alarm_start(&timr->it.alarm.alarmtimer,
- timespec_to_ktime(new_setting->it_value));
+ exp = timespec_to_ktime(new_setting->it_value);
+ /* Convert (if necessary) to absolute time */
+ if (flags != TIMER_ABSTIME) {
+ ktime_t now;
+
+ now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
+ exp = ktime_add(now, exp);
+ }
+
+ alarm_start(&timr->it.alarm.alarmtimer, exp);
return 0;
}
@@ -714,6 +727,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
if (!alarmtimer_get_rtcdev())
return -ENOTSUPP;
+ if (flags & ~TIMER_ABSTIME)
+ return -EINVAL;
+
if (!capable(CAP_WAKE_ALARM))
return -EPERM;
diff --git a/mm/shmem.c b/mm/shmem.c
index 58c4a477be67..4bb5a80dd13b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -76,6 +76,17 @@ static struct vfsmount *shm_mnt;
/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
#define SHORT_SYMLINK_LEN 128
+/*
+ * vmtruncate_range() communicates with shmem_fault via
+ * inode->i_private (with i_mutex making sure that it has only one user at
+ * a time): we would prefer not to enlarge the shmem inode just for that.
+ */
+struct shmem_falloc {
+ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
+ pgoff_t start; /* start of range currently being fallocated */
+ pgoff_t next; /* the next page offset to be fallocated */
+};
+
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name; /* xattr name */
@@ -488,22 +499,19 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
}
index = start;
- for ( ; ; ) {
+ while (index <= end) {
cond_resched();
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
pvec.pages, indices);
if (!pvec.nr) {
- if (index == start)
+ /* If all gone or hole-punch, we're done */
+ if (index == start || end != -1)
break;
+ /* But if truncating, restart to make sure all gone */
index = start;
continue;
}
- if (index == start && indices[0] > end) {
- shmem_deswap_pagevec(&pvec);
- pagevec_release(&pvec);
- break;
- }
mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
@@ -513,8 +521,12 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
break;
if (radix_tree_exceptional_entry(page)) {
- nr_swaps_freed += !shmem_free_swap(mapping,
- index, page);
+ if (shmem_free_swap(mapping, index, page)) {
+ /* Swap was replaced by page: retry */
+ index--;
+ break;
+ }
+ nr_swaps_freed++;
continue;
}
@@ -522,6 +534,11 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
if (page->mapping == mapping) {
VM_BUG_ON(PageWriteback(page));
truncate_inode_page(mapping, page);
+ } else {
+ /* Page was replaced by swap: retry */
+ unlock_page(page);
+ index--;
+ break;
}
unlock_page(page);
}
@@ -1060,6 +1077,63 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int error;
int ret = VM_FAULT_LOCKED;
+ /*
+ * Trinity finds that probing a hole which tmpfs is punching can
+ * prevent the hole-punch from ever completing: which in turn
+ * locks writers out with its hold on i_mutex. So refrain from
+ * faulting pages into the hole while it's being punched. Although
+ * shmem_truncate_range() does remove the additions, it may be unable to
+ * keep up, as each new page needs its own unmap_mapping_range() call,
+ * and the i_mmap tree grows ever slower to scan if new vmas are added.
+ *
+ * It does not matter if we sometimes reach this check just before the
+ * hole-punch begins, so that one fault then races with the punch:
+ * we just need to make racing faults a rare case.
+ *
+ * The implementation below would be much simpler if we just used a
+ * standard mutex or completion: but we cannot take i_mutex in fault,
+ * and bloating every shmem inode for this unlikely case would be sad.
+ */
+ if (unlikely(inode->i_private)) {
+ struct shmem_falloc *shmem_falloc;
+
+ spin_lock(&inode->i_lock);
+ shmem_falloc = inode->i_private;
+ if (shmem_falloc &&
+ vmf->pgoff >= shmem_falloc->start &&
+ vmf->pgoff < shmem_falloc->next) {
+ wait_queue_head_t *shmem_falloc_waitq;
+ DEFINE_WAIT(shmem_fault_wait);
+
+ ret = VM_FAULT_NOPAGE;
+ if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
+ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ /* It's polite to up mmap_sem if we can */
+ up_read(&vma->vm_mm->mmap_sem);
+ ret = VM_FAULT_RETRY;
+ }
+
+ shmem_falloc_waitq = shmem_falloc->waitq;
+ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock(&inode->i_lock);
+ schedule();
+
+ /*
+ * shmem_falloc_waitq points into the vmtruncate_range()
+ * stack of the hole-punching task: shmem_falloc_waitq
+ * is usually invalid by the time we reach here, but
+ * finish_wait() does not dereference it in that case;
+ * though i_lock needed lest racing with wake_up_all().
+ */
+ spin_lock(&inode->i_lock);
+ finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+ spin_unlock(&inode->i_lock);
+ return ret;
+ }
+ spin_unlock(&inode->i_lock);
+ }
+
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
if (error)
return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
@@ -1071,6 +1145,47 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return ret;
}
+int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+{
+ /*
+ * If the underlying filesystem is not going to provide
+ * a way to truncate a range of blocks (punch a hole) -
+ * we should return failure right now.
+ * Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range().
+ */
+ if (inode->i_op->truncate_range != shmem_truncate_range)
+ return -ENOSYS;
+
+ mutex_lock(&inode->i_mutex);
+ {
+ struct shmem_falloc shmem_falloc;
+ struct address_space *mapping = inode->i_mapping;
+ loff_t unmap_start = round_up(lstart, PAGE_SIZE);
+ loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
+
+ shmem_falloc.waitq = &shmem_falloc_waitq;
+ shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+ shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+ spin_lock(&inode->i_lock);
+ inode->i_private = &shmem_falloc;
+ spin_unlock(&inode->i_lock);
+
+ if ((u64)unmap_end > (u64)unmap_start)
+ unmap_mapping_range(mapping, unmap_start,
+ 1 + unmap_end - unmap_start, 0);
+ shmem_truncate_range(inode, lstart, lend);
+ /* No need to unmap again: hole-punching leaves COWed pages */
+
+ spin_lock(&inode->i_lock);
+ inode->i_private = NULL;
+ wake_up_all(&shmem_falloc_waitq);
+ spin_unlock(&inode->i_lock);
+ }
+ mutex_unlock(&inode->i_mutex);
+ return 0;
+}
+
#ifdef CONFIG_NUMA
static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
{
@@ -2547,6 +2662,12 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
+int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+{
+ /* Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range(). */
+ return -ENOSYS;
+}
+
#define shmem_vm_ops generic_file_vm_ops
#define shmem_file_operations ramfs_file_operations
#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
diff --git a/mm/truncate.c b/mm/truncate.c
index 4224627695ba..f38055cb8af6 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -603,31 +603,6 @@ int vmtruncate(struct inode *inode, loff_t newsize)
}
EXPORT_SYMBOL(vmtruncate);
-int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
-{
- struct address_space *mapping = inode->i_mapping;
- loff_t holebegin = round_up(lstart, PAGE_SIZE);
- loff_t holelen = 1 + lend - holebegin;
-
- /*
- * If the underlying filesystem is not going to provide
- * a way to truncate a range of blocks (punch a hole) -
- * we should return failure right now.
- */
- if (!inode->i_op->truncate_range)
- return -ENOSYS;
-
- mutex_lock(&inode->i_mutex);
- inode_dio_wait(inode);
- unmap_mapping_range(mapping, holebegin, holelen, 1);
- inode->i_op->truncate_range(inode, lstart, lend);
- /* unmap again to remove racily COWed private pages */
- unmap_mapping_range(mapping, holebegin, holelen, 1);
- mutex_unlock(&inode->i_mutex);
-
- return 0;
-}
-
/**
* truncate_pagecache_range - unmap and remove pagecache that is hole-punched
* @inode: inode
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 912613c566cb..37c486c019fe 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -96,8 +96,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_id);
static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
{
- if (skb_cow(skb, skb_headroom(skb)) < 0)
+ if (skb_cow(skb, skb_headroom(skb)) < 0) {
+ kfree_skb(skb);
return NULL;
+ }
+
memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
skb->mac_header += VLAN_HLEN;
return skb;
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 334d4cd7612f..79aaac288afb 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1494,8 +1494,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
goto drop;
/* Queue packet (standard) */
- skb->sk = sock;
-
if (sock_queue_rcv_skb(sock, skb) < 0)
goto drop;
@@ -1649,7 +1647,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
if (!skb)
goto out;
- skb->sk = sk;
skb_reserve(skb, ddp_dl->header_length);
skb_reserve(skb, dev->hard_header_len);
skb->dev = dev;
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index c32be292c7e3..2022b46ab38f 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -150,7 +150,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
if (!*_result)
goto put;
- memcpy(*_result, upayload->data, len + 1);
+ memcpy(*_result, upayload->data, len);
+ (*_result)[len] = '\0';
+
if (_expiry)
*_expiry = rkey->expiry;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c8e26992742f..3f0bb3b3819d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1866,6 +1866,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
rtnl_lock();
in_dev = ip_mc_find_dev(net, imr);
+ if (!in_dev) {
+ ret = -ENODEV;
+ goto out;
+ }
ifindex = imr->imr_ifindex;
for (imlp = &inet->mc_list;
(iml = rtnl_dereference(*imlp)) != NULL;
@@ -1883,16 +1887,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
*imlp = iml->next_rcu;
- if (in_dev)
- ip_mc_dec_group(in_dev, group);
+ ip_mc_dec_group(in_dev, group);
rtnl_unlock();
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
kfree_rcu(iml, rcu);
return 0;
}
- if (!in_dev)
- ret = -ENODEV;
+out:
rtnl_unlock();
return ret;
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index b69a3700642b..523541730777 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -279,6 +279,10 @@ int ip_options_compile(struct net *net,
optptr++;
continue;
}
+ if (unlikely(l < 2)) {
+ pp_ptr = optptr;
+ goto error;
+ }
optlen = optptr[1];
if (optlen<2 || optlen>l) {
pp_ptr = optptr;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 99eb909c9d5f..2d3290496a0a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1250,7 +1250,7 @@ static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
}
/* D-SACK for already forgotten data... Do dumb counting. */
- if (dup_sack && tp->undo_marker && tp->undo_retrans &&
+ if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
!after(end_seq_0, prior_snd_una) &&
after(end_seq_0, tp->undo_marker))
tp->undo_retrans--;
@@ -1304,7 +1304,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
unsigned int new_len = (pkt_len / mss) * mss;
if (!in_sack && new_len < pkt_len) {
new_len += mss;
- if (new_len > skb->len)
+ if (new_len >= skb->len)
return 0;
}
pkt_len = new_len;
@@ -1328,7 +1328,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
/* Account D-SACK for retransmitted packet. */
if (dup_sack && (sacked & TCPCB_RETRANS)) {
- if (tp->undo_marker && tp->undo_retrans &&
+ if (tp->undo_marker && tp->undo_retrans > 0 &&
after(end_seq, tp->undo_marker))
tp->undo_retrans--;
if (sacked & TCPCB_SACKED_ACKED)
@@ -2226,7 +2226,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
tp->lost_out = 0;
tp->undo_marker = 0;
- tp->undo_retrans = 0;
+ tp->undo_retrans = -1;
}
void tcp_clear_retrans(struct tcp_sock *tp)
@@ -3165,7 +3165,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
tp->high_seq = tp->snd_nxt;
tp->prior_ssthresh = 0;
tp->undo_marker = tp->snd_una;
- tp->undo_retrans = tp->retrans_out;
+ tp->undo_retrans = tp->retrans_out ? : -1;
if (icsk->icsk_ca_state < TCP_CA_CWR) {
if (!(flag & FLAG_ECE))
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 987f5cc706b4..fd414b61f966 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2194,13 +2194,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (!tp->retrans_stamp)
tp->retrans_stamp = TCP_SKB_CB(skb)->when;
- tp->undo_retrans += tcp_skb_pcount(skb);
-
/* snd_nxt is stored to detect loss of retransmitted segment,
* see tcp_input.c tcp_sacktag_write_queue().
*/
TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
}
+
+ if (tp->undo_retrans < 0)
+ tp->undo_retrans = 0;
+ tp->undo_retrans += tcp_skb_pcount(skb);
return err;
}
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 8a84017834c2..57da44707eb1 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -373,9 +373,10 @@ fail:
* specification [SCTP] and any extensions for a list of possible
* error formats.
*/
-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
- const struct sctp_association *asoc, struct sctp_chunk *chunk,
- __u16 flags, gfp_t gfp)
+struct sctp_ulpevent *
+sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, __u16 flags,
+ gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_remote_error *sre;
@@ -394,8 +395,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
/* Copy the skb to a new skb with room for us to prepend
* notification with.
*/
- skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
- 0, gfp);
+ skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
/* Pull off the rest of the cause TLV from the chunk. */
skb_pull(chunk->skb, elen);
@@ -406,62 +406,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
event = sctp_skb2event(skb);
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
- sre = (struct sctp_remote_error *)
- skb_push(skb, sizeof(struct sctp_remote_error));
+ sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
/* Trim the buffer to the right length. */
- skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
+ skb_trim(skb, sizeof(*sre) + elen);
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_type:
- * It should be SCTP_REMOTE_ERROR.
- */
+ /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
+ memset(sre, 0, sizeof(*sre));
sre->sre_type = SCTP_REMOTE_ERROR;
-
- /*
- * Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_flags: 16 bits (unsigned integer)
- * Currently unused.
- */
sre->sre_flags = 0;
-
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_length: sizeof (__u32)
- *
- * This field is the total length of the notification data,
- * including the notification header.
- */
sre->sre_length = skb->len;
-
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_error: 16 bits (unsigned integer)
- * This value represents one of the Operational Error causes defined in
- * the SCTP specification, in network byte order.
- */
sre->sre_error = cause;
-
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_assoc_id: sizeof (sctp_assoc_t)
- *
- * The association id field, holds the identifier for the association.
- * All notifications for a given association have the same association
- * identifier. For TCP style socket, this field is ignored.
- */
sctp_ulpevent_set_owner(event, asoc);
sre->sre_assoc_id = sctp_assoc2id(asoc);
return event;
-
fail:
return NULL;
}
@@ -904,7 +863,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
return notification->sn_header.sn_type;
}
-/* Copy out the sndrcvinfo into a msghdr. */
+/* RFC6458, Section 5.3.2. SCTP Header Information Structure
+ * (SCTP_SNDRCV, DEPRECATED)
+ */
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *msghdr)
{
@@ -913,74 +874,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
if (sctp_ulpevent_is_notification(event))
return;
- /* Sockets API Extensions for SCTP
- * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
- *
- * sinfo_stream: 16 bits (unsigned integer)
- *
- * For recvmsg() the SCTP stack places the message's stream number in
- * this value.
- */
+ memset(&sinfo, 0, sizeof(sinfo));
sinfo.sinfo_stream = event->stream;
- /* sinfo_ssn: 16 bits (unsigned integer)
- *
- * For recvmsg() this value contains the stream sequence number that
- * the remote endpoint placed in the DATA chunk. For fragmented
- * messages this is the same number for all deliveries of the message
- * (if more than one recvmsg() is needed to read the message).
- */
sinfo.sinfo_ssn = event->ssn;
- /* sinfo_ppid: 32 bits (unsigned integer)
- *
- * In recvmsg() this value is
- * the same information that was passed by the upper layer in the peer
- * application. Please note that byte order issues are NOT accounted
- * for and this information is passed opaquely by the SCTP stack from
- * one end to the other.
- */
sinfo.sinfo_ppid = event->ppid;
- /* sinfo_flags: 16 bits (unsigned integer)
- *
- * This field may contain any of the following flags and is composed of
- * a bitwise OR of these values.
- *
- * recvmsg() flags:
- *
- * SCTP_UNORDERED - This flag is present when the message was sent
- * non-ordered.
- */
sinfo.sinfo_flags = event->flags;
- /* sinfo_tsn: 32 bit (unsigned integer)
- *
- * For the receiving side, this field holds a TSN that was
- * assigned to one of the SCTP Data Chunks.
- */
sinfo.sinfo_tsn = event->tsn;
- /* sinfo_cumtsn: 32 bit (unsigned integer)
- *
- * This field will hold the current cumulative TSN as
- * known by the underlying SCTP layer. Note this field is
- * ignored when sending and only valid for a receive
- * operation when sinfo_flags are set to SCTP_UNORDERED.
- */
sinfo.sinfo_cumtsn = event->cumtsn;
- /* sinfo_assoc_id: sizeof (sctp_assoc_t)
- *
- * The association handle field, sinfo_assoc_id, holds the identifier
- * for the association announced in the COMMUNICATION_UP notification.
- * All notifications for a given association have the same identifier.
- * Ignored for one-to-one style sockets.
- */
sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
-
- /* context value that is set via SCTP_CONTEXT socket option. */
+ /* Context value that is set via SCTP_CONTEXT socket option. */
sinfo.sinfo_context = event->asoc->default_rcv_context;
-
/* These fields are not used while receiving. */
sinfo.sinfo_timetolive = 0;
put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
- sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
+ sizeof(sinfo), &sinfo);
}
/* Do accounting for bytes received and hold a reference to the association
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index e00441a2092f..9495be3a61e0 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -541,6 +541,7 @@ receive:
buf = node->bclink.deferred_head;
node->bclink.deferred_head = buf->next;
+ buf->next = NULL;
node->bclink.deferred_size--;
goto receive;
}

View file

@ -0,0 +1,349 @@
diff --git a/Makefile b/Makefile
index d6c64eb82525..a22bcb567348 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
-SUBLEVEL = 100
+SUBLEVEL = 101
EXTRAVERSION =
NAME = Saber-toothed Squirrel
@@ -592,6 +592,8 @@ KBUILD_CFLAGS += -fomit-frame-pointer
endif
endif
+KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
+
ifdef CONFIG_DEBUG_INFO
KBUILD_CFLAGS += -g
KBUILD_AFLAGS += -gdwarf-2
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 02f300fbf070..e0a8707dd137 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -292,7 +292,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
* psw and gprs are stored on the stack
*/
if (addr == (addr_t) &dummy->regs.psw.mask &&
- ((data & ~PSW_MASK_USER) != psw_user_bits ||
+ (((data^psw_user_bits) & ~PSW_MASK_USER) ||
+ (((data^psw_user_bits) & PSW_MASK_ASC) &&
+ ((data|psw_user_bits) & PSW_MASK_ASC) == PSW_MASK_ASC) ||
((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
/* Invalid psw mask. */
return -EINVAL;
@@ -595,7 +597,10 @@ static int __poke_user_compat(struct task_struct *child,
*/
if (addr == (addr_t) &dummy32->regs.psw.mask) {
/* Build a 64 bit psw mask from 31 bit mask. */
- if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
+ if (((tmp^psw32_user_bits) & ~PSW32_MASK_USER) ||
+ (((tmp^psw32_user_bits) & PSW32_MASK_ASC) &&
+ ((tmp|psw32_user_bits) & PSW32_MASK_ASC)
+ == PSW32_MASK_ASC))
/* Invalid psw mask. */
return -EINVAL;
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index e1e7f9c831da..e36c5cf38fde 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -428,8 +428,8 @@ sysenter_do_call:
cmpl $(NR_syscalls), %eax
jae sysenter_badsys
call *sys_call_table(,%eax,4)
- movl %eax,PT_EAX(%esp)
sysenter_after_call:
+ movl %eax,PT_EAX(%esp)
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
@@ -510,6 +510,7 @@ ENTRY(system_call)
jae syscall_badsys
syscall_call:
call *sys_call_table(,%eax,4)
+syscall_after_call:
movl %eax,PT_EAX(%esp) # store the return value
syscall_exit:
LOCKDEP_SYS_EXIT
@@ -678,12 +679,12 @@ syscall_fault:
END(syscall_fault)
syscall_badsys:
- movl $-ENOSYS,PT_EAX(%esp)
- jmp syscall_exit
+ movl $-ENOSYS,%eax
+ jmp syscall_after_call
END(syscall_badsys)
sysenter_badsys:
- movl $-ENOSYS,PT_EAX(%esp)
+ movl $-ENOSYS,%eax
jmp sysenter_after_call
END(syscall_badsys)
CFI_ENDPROC
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 4af6f5cc1167..f606487bba56 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
EXPORT_SYMBOL(blk_queue_find_tag);
/**
- * __blk_free_tags - release a given set of tag maintenance info
+ * blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
- * Tries to free the specified @bqt. Returns true if it was
- * actually freed and false if there are still references using it
+ * Drop the reference count on @bqt and frees it when the last reference
+ * is dropped.
*/
-static int __blk_free_tags(struct blk_queue_tag *bqt)
+void blk_free_tags(struct blk_queue_tag *bqt)
{
- int retval;
-
- retval = atomic_dec_and_test(&bqt->refcnt);
- if (retval) {
+ if (atomic_dec_and_test(&bqt->refcnt)) {
BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
bqt->max_depth);
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
kfree(bqt);
}
-
- return retval;
}
+EXPORT_SYMBOL(blk_free_tags);
/**
* __blk_queue_free_tags - release tag maintenance info
@@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q)
if (!bqt)
return;
- __blk_free_tags(bqt);
+ blk_free_tags(bqt);
q->queue_tags = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
}
/**
- * blk_free_tags - release a given set of tag maintenance info
- * @bqt: the tag map to free
- *
- * For externally managed @bqt frees the map. Callers of this
- * function must guarantee to have released all the queues that
- * might have been using this tag map.
- */
-void blk_free_tags(struct blk_queue_tag *bqt)
-{
- if (unlikely(!__blk_free_tags(bqt)))
- BUG();
-}
-EXPORT_SYMBOL(blk_free_tags);
-
-/**
* blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device
*
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 0e87baf8fcc2..0a450eb517e0 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -446,6 +446,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+ { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
/* Asmedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6e67fdebdada..6b922365d5e9 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4693,6 +4693,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
* ata_qc_new - Request an available ATA command, for queueing
* @ap: target port
*
+ * Some ATA host controllers may implement a queue depth which is less
+ * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
+ * the hardware limitation.
+ *
* LOCKING:
* None.
*/
@@ -4700,14 +4704,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
struct ata_queued_cmd *qc = NULL;
+ unsigned int max_queue = ap->host->n_tags;
unsigned int i, tag;
/* no command while frozen */
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
return NULL;
- for (i = 0; i < ATA_MAX_QUEUE; i++) {
- tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+ for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
+ tag = tag < max_queue ? tag : 0;
/* the last tag is reserved for internal command. */
if (tag == ATA_TAG_INTERNAL)
@@ -5959,6 +5964,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
{
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
+ host->n_tags = ATA_MAX_QUEUE - 1;
host->dev = dev;
host->flags = flags;
host->ops = ops;
@@ -6041,6 +6047,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
{
int i, rc;
+ host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
+
/* host must have been started */
if (!(host->flags & ATA_HOST_STARTED)) {
dev_err(host->dev, "BUG: trying to register unstarted host\n");
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index f04c0961f993..e5206fc76562 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -331,7 +331,7 @@ static int build_snap_context(struct ceph_snap_realm *realm)
/* alloc new snap context */
err = -ENOMEM;
- if (num > (ULONG_MAX - sizeof(*snapc)) / sizeof(u64))
+ if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
goto fail;
snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS);
if (!snapc)
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
index 6bd325fedc87..19a240446fca 100644
--- a/include/drm/drm_mem_util.h
+++ b/include/drm/drm_mem_util.h
@@ -31,7 +31,7 @@
static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
{
- if (size != 0 && nmemb > ULONG_MAX / size)
+ if (size != 0 && nmemb > SIZE_MAX / size)
return NULL;
if (size * nmemb <= PAGE_SIZE)
@@ -44,7 +44,7 @@ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
{
- if (size != 0 && nmemb > ULONG_MAX / size)
+ if (size != 0 && nmemb > SIZE_MAX / size)
return NULL;
if (size * nmemb <= PAGE_SIZE)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 645231c373c8..b795ee5cd208 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -35,6 +35,7 @@
#define LLONG_MAX ((long long)(~0ULL>>1))
#define LLONG_MIN (-LLONG_MAX - 1)
#define ULLONG_MAX (~0ULL)
+#define SIZE_MAX (~(size_t)0)
#define STACK_MAGIC 0xdeadbeef
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 50d7cb1ee947..dd16deb27dd8 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -539,6 +539,7 @@ struct ata_host {
struct device *dev;
void __iomem * const *iomap;
unsigned int n_ports;
+ unsigned int n_tags; /* nr of NCQ tags */
void *private_data;
struct ata_port_operations *ops;
unsigned long flags;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a595dce6b0c7..67d5d94b783a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -242,7 +242,7 @@ size_t ksize(const void *);
*/
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
- if (size != 0 && n > ULONG_MAX / size)
+ if (size != 0 && n > SIZE_MAX / size)
return NULL;
return __kmalloc(n * size, flags);
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0d4e0ad97a04..efd682099a0a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2348,6 +2348,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
} else {
if (cow)
huge_ptep_set_wrprotect(src, addr, src_pte);
+ entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
get_page(ptepage);
page_dup_rmap(ptepage);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 45eb6217bf38..ad6ee88a3d48 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -750,7 +750,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
}
spin_lock_irqsave(&object->lock, flags);
- if (ptr + size > object->pointer + object->size) {
+ if (size == SIZE_MAX) {
+ size = object->pointer + object->size - ptr;
+ } else if (ptr + size > object->pointer + object->size) {
kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
dump_object_info(object);
kmem_cache_free(scan_area_cache, area);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1196c7728ede..ad9d90064a4b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -349,6 +349,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
if (unlikely(!va))
return ERR_PTR(-ENOMEM);
+ /*
+ * Only scan the relevant parts containing pointers to other objects
+ * to avoid false negatives.
+ */
+ kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
+
retry:
spin_lock(&vmap_area_lock);
/*
@@ -1669,11 +1675,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
insert_vmalloc_vmlist(area);
/*
- * A ref_count = 3 is needed because the vm_struct and vmap_area
- * structures allocated in the __get_vm_area_node() function contain
- * references to the virtual address of the vmalloc'ed block.
+ * A ref_count = 2 is needed because vm_struct allocated in
+ * __get_vm_area_node() contains a reference to the virtual address of
+ * the vmalloc'ed block.
*/
- kmemleak_alloc(addr, real_size, 3, gfp_mask);
+ kmemleak_alloc(addr, real_size, 2, gfp_mask);
return addr;

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index eee5f8e..9e08070 100644
index ed7ccdc..4bda948 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -35,15 +35,17 @@ create_package() {
@ -28,7 +28,7 @@ index eee5f8e..9e08070 100644
fi
# Create the package
- dpkg-gencontrol -isp $forcearch -p$pname -P"$pdir"
- dpkg-gencontrol -isp $forcearch -Vkernel:debarch="${debarch:-$(dpkg --print-architecture)}" -p$pname -P"$pdir"
+ dpkg-gencontrol $forcearch -Vkernel:debarch="${debarch:-$(dpkg --print-architecture)}" -p$pname -P"$pdir"
dpkg --build "$pdir" ..
}
@ -169,7 +169,7 @@ index eee5f8e..9e08070 100644
for script in postinst postrm preinst prerm ; do
mkdir -p "$tmpdir$debhookdir/$script.d"
cat <<EOF > "$tmpdir/DEBIAN/$script"
@@ -149,12 +201,38 @@ set -e
@@ -149,12 +201,39 @@ set -e
# Pass maintainer script parameters to hook scripts
export DEB_MAINT_PARAMS="\$*"
@ -189,8 +189,9 @@ index eee5f8e..9e08070 100644
+sed -e "s/exit 0//g" -i $tmpdir/DEBIAN/postinst
+cat >> $tmpdir/DEBIAN/postinst <<EOT
+if [ "\$(grep nand /proc/partitions)" != "" ] && [ "\$(grep mmc /proc/partitions)" = "" ]; then
+mkimage -A arm -O linux -T kernel -C none -a "0x40008000" -e "0x40008000" -n "Linux kernel" -d /$installed_image_path /boot/uImage > /dev/null 2>&1
+mkimage -A arm -O linux -T kernel -C none -a "0x40008000" -e "0x40008000" -n "Linux kernel" -d /$installed_image_path /tmp/uImage > /dev/null 2>&1
+rm -f /$installed_image_path /boot/zImage
+mv /tmp/uImage /boot/uImage
+else
+ln -sf $(basename $installed_image_path) /boot/zImage > /dev/null 2>&1 || mv /$installed_image_path /boot/zImage
+fi
@ -202,14 +203,14 @@ index eee5f8e..9e08070 100644
+##
+sed -e "s/exit 0//g" -i $tmpdir/DEBIAN/preinst
+cat >> $tmpdir/DEBIAN/preinst <<EOT
+rm -f /boot/System.map* /boot/config* /$installed_image_path /boot/zImage
+rm -f /boot/System.map* /boot/config* /boot/vmlinuz* /boot/zImage /boot/uImage /boot/initrd*
+EOT
+echo "exit 0" >> $tmpdir/DEBIAN/preinst
+
# Try to determine maintainer and email values
if [ -n "$DEBEMAIL" ]; then
email=$DEBEMAIL
@@ -242,32 +320,37 @@ EOF
@@ -242,14 +320,17 @@ EOF
fi
@ -232,23 +233,8 @@ index eee5f8e..9e08070 100644
+(cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
-arch=$(dpkg --print-architecture)
+
+olddir="$(pwd)"; cd "$destdir"; make M=scripts clean; cd $olddir
cat <<EOF >> debian/control
Package: $kernel_headers_packagename
Provides: linux-headers, linux-headers-2.6
-Architecture: $arch
-Description: Linux kernel headers for $KERNELRELEASE on $arch
- This package provides kernel header files for $KERNELRELEASE on $arch
+Architecture: any
+Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch}
+ This package provides kernel header files for $KERNELRELEASE on \${kernel:debarch}
.
This is useful for people who need to build external modules
EOF
@@ -266,7 +347,8 @@ EOF
# Do we have firmware? Move it out of the way and build it into a package.
if [ -e "$tmpdir/lib/firmware" ]; then
@ -258,7 +244,7 @@ index eee5f8e..9e08070 100644
cat <<EOF >> debian/control
@@ -293,9 +376,35 @@ EOF
@@ -292,9 +374,35 @@ EOF
if [ "$ARCH" != "um" ]; then
create_package "$kernel_headers_packagename" "$kernel_headers_dir"

View file

@ -0,0 +1,13 @@
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index ed7ccdc..20a2d0f 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -253,6 +253,8 @@ mkdir -p "$destdir"
ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
+olddir="$(pwd)"; cd "$destdir"; make M=scripts clean; cd $olddir
+
cat <<EOF >> debian/control
Package: $kernel_headers_packagename