mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-27 01:02:19 +00:00
929 lines
30 KiB
Diff
929 lines
30 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index ed97caf40f71..d6c64eb82525 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 3
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 99
|
|
+SUBLEVEL = 100
|
|
EXTRAVERSION =
|
|
NAME = Saber-toothed Squirrel
|
|
|
|
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
|
|
index 268b2455e7b0..b1cbcff69cdb 100644
|
|
--- a/arch/x86/kernel/cpu/perf_event_intel.c
|
|
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
|
|
@@ -1070,6 +1070,15 @@ again:
|
|
intel_pmu_lbr_read();
|
|
|
|
/*
|
|
+ * CondChgd bit 63 doesn't mean any overflow status. Ignore
|
|
+ * and clear the bit.
|
|
+ */
|
|
+ if (__test_and_clear_bit(63, (unsigned long *)&status)) {
|
|
+ if (!status)
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
+ /*
|
|
* PEBS overflow sets bit 62 in the global status register
|
|
*/
|
|
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
|
|
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
|
|
index 36e5a8ee0e1e..1ae2e0ea5492 100644
|
|
--- a/crypto/testmgr.h
|
|
+++ b/crypto/testmgr.h
|
|
@@ -14558,38 +14558,40 @@ static struct pcomp_testvec zlib_decomp_tv_template[] = {
|
|
static struct comp_testvec lzo_comp_tv_template[] = {
|
|
{
|
|
.inlen = 70,
|
|
- .outlen = 46,
|
|
+ .outlen = 57,
|
|
.input = "Join us now and share the software "
|
|
"Join us now and share the software ",
|
|
.output = "\x00\x0d\x4a\x6f\x69\x6e\x20\x75"
|
|
- "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
|
|
- "\x64\x20\x73\x68\x61\x72\x65\x20"
|
|
- "\x74\x68\x65\x20\x73\x6f\x66\x74"
|
|
- "\x77\x70\x01\x01\x4a\x6f\x69\x6e"
|
|
- "\x3d\x88\x00\x11\x00\x00",
|
|
+ "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
|
|
+ "\x64\x20\x73\x68\x61\x72\x65\x20"
|
|
+ "\x74\x68\x65\x20\x73\x6f\x66\x74"
|
|
+ "\x77\x70\x01\x32\x88\x00\x0c\x65"
|
|
+ "\x20\x74\x68\x65\x20\x73\x6f\x66"
|
|
+ "\x74\x77\x61\x72\x65\x20\x11\x00"
|
|
+ "\x00",
|
|
}, {
|
|
.inlen = 159,
|
|
- .outlen = 133,
|
|
+ .outlen = 131,
|
|
.input = "This document describes a compression method based on the LZO "
|
|
"compression algorithm. This document defines the application of "
|
|
"the LZO algorithm used in UBIFS.",
|
|
- .output = "\x00\x2b\x54\x68\x69\x73\x20\x64"
|
|
+ .output = "\x00\x2c\x54\x68\x69\x73\x20\x64"
|
|
"\x6f\x63\x75\x6d\x65\x6e\x74\x20"
|
|
"\x64\x65\x73\x63\x72\x69\x62\x65"
|
|
"\x73\x20\x61\x20\x63\x6f\x6d\x70"
|
|
"\x72\x65\x73\x73\x69\x6f\x6e\x20"
|
|
"\x6d\x65\x74\x68\x6f\x64\x20\x62"
|
|
"\x61\x73\x65\x64\x20\x6f\x6e\x20"
|
|
- "\x74\x68\x65\x20\x4c\x5a\x4f\x2b"
|
|
- "\x8c\x00\x0d\x61\x6c\x67\x6f\x72"
|
|
- "\x69\x74\x68\x6d\x2e\x20\x20\x54"
|
|
- "\x68\x69\x73\x2a\x54\x01\x02\x66"
|
|
- "\x69\x6e\x65\x73\x94\x06\x05\x61"
|
|
- "\x70\x70\x6c\x69\x63\x61\x74\x76"
|
|
- "\x0a\x6f\x66\x88\x02\x60\x09\x27"
|
|
- "\xf0\x00\x0c\x20\x75\x73\x65\x64"
|
|
- "\x20\x69\x6e\x20\x55\x42\x49\x46"
|
|
- "\x53\x2e\x11\x00\x00",
|
|
+ "\x74\x68\x65\x20\x4c\x5a\x4f\x20"
|
|
+ "\x2a\x8c\x00\x09\x61\x6c\x67\x6f"
|
|
+ "\x72\x69\x74\x68\x6d\x2e\x20\x20"
|
|
+ "\x2e\x54\x01\x03\x66\x69\x6e\x65"
|
|
+ "\x73\x20\x74\x06\x05\x61\x70\x70"
|
|
+ "\x6c\x69\x63\x61\x74\x76\x0a\x6f"
|
|
+ "\x66\x88\x02\x60\x09\x27\xf0\x00"
|
|
+ "\x0c\x20\x75\x73\x65\x64\x20\x69"
|
|
+ "\x6e\x20\x55\x42\x49\x46\x53\x2e"
|
|
+ "\x11\x00\x00",
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
|
|
index 60404f4b2446..adc9bfd4d82f 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_display.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_display.c
|
|
@@ -709,6 +709,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
int ret = 0;
|
|
|
|
+ /* don't leak the edid if we already fetched it in detect() */
|
|
+ if (radeon_connector->edid)
|
|
+ goto got_edid;
|
|
+
|
|
/* on hw with routers, select right port */
|
|
if (radeon_connector->router.ddc_valid)
|
|
radeon_router_select_ddc_port(radeon_connector);
|
|
@@ -748,6 +752,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
|
|
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
|
|
}
|
|
if (radeon_connector->edid) {
|
|
+got_edid:
|
|
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
|
|
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
|
|
drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
|
|
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
|
|
index 97b2e21ac46a..cf065df9bb18 100644
|
|
--- a/drivers/iommu/dmar.c
|
|
+++ b/drivers/iommu/dmar.c
|
|
@@ -582,7 +582,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|
{
|
|
struct intel_iommu *iommu;
|
|
int map_size;
|
|
- u32 ver;
|
|
+ u32 ver, sts;
|
|
static int iommu_allocated = 0;
|
|
int agaw = 0;
|
|
int msagaw = 0;
|
|
@@ -652,6 +652,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|
(unsigned long long)iommu->cap,
|
|
(unsigned long long)iommu->ecap);
|
|
|
|
+ /* Reflect status in gcmd */
|
|
+ sts = readl(iommu->reg + DMAR_GSTS_REG);
|
|
+ if (sts & DMA_GSTS_IRES)
|
|
+ iommu->gcmd |= DMA_GCMD_IRE;
|
|
+ if (sts & DMA_GSTS_TES)
|
|
+ iommu->gcmd |= DMA_GCMD_TE;
|
|
+ if (sts & DMA_GSTS_QIES)
|
|
+ iommu->gcmd |= DMA_GCMD_QIE;
|
|
+
|
|
raw_spin_lock_init(&iommu->register_lock);
|
|
|
|
drhd->iommu = iommu;
|
|
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
|
|
index 4e1c6bfc9c8d..dd255c578ad9 100644
|
|
--- a/drivers/iommu/intel-iommu.c
|
|
+++ b/drivers/iommu/intel-iommu.c
|
|
@@ -3659,6 +3659,7 @@ static struct notifier_block device_nb = {
|
|
int __init intel_iommu_init(void)
|
|
{
|
|
int ret = 0;
|
|
+ struct dmar_drhd_unit *drhd;
|
|
|
|
/* VT-d is required for a TXT/tboot launch, so enforce that */
|
|
force_on = tboot_force_iommu();
|
|
@@ -3669,6 +3670,20 @@ int __init intel_iommu_init(void)
|
|
return -ENODEV;
|
|
}
|
|
|
|
+ /*
|
|
+ * Disable translation if already enabled prior to OS handover.
|
|
+ */
|
|
+ for_each_drhd_unit(drhd) {
|
|
+ struct intel_iommu *iommu;
|
|
+
|
|
+ if (drhd->ignored)
|
|
+ continue;
|
|
+
|
|
+ iommu = drhd->iommu;
|
|
+ if (iommu->gcmd & DMA_GCMD_TE)
|
|
+ iommu_disable_translation(iommu);
|
|
+ }
|
|
+
|
|
if (dmar_dev_scope_init() < 0) {
|
|
if (force_on)
|
|
panic("tboot: Failed to initialize DMAR device scope\n");
|
|
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
|
|
index ef1f9400b967..b2740f12b180 100644
|
|
--- a/drivers/net/ethernet/emulex/benet/be_main.c
|
|
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
|
|
@@ -2411,7 +2411,7 @@ static int be_open(struct net_device *netdev)
|
|
|
|
for_all_evt_queues(adapter, eqo, i) {
|
|
napi_enable(&eqo->napi);
|
|
- be_eq_notify(adapter, eqo->q.id, true, false, 0);
|
|
+ be_eq_notify(adapter, eqo->q.id, true, true, 0);
|
|
}
|
|
|
|
status = be_cmd_link_status_query(adapter, NULL, NULL,
|
|
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
|
|
index 8e2ac643a777..ed6ec513defa 100644
|
|
--- a/drivers/net/ethernet/sun/sunvnet.c
|
|
+++ b/drivers/net/ethernet/sun/sunvnet.c
|
|
@@ -1086,6 +1086,24 @@ static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
|
|
return vp;
|
|
}
|
|
|
|
+static void vnet_cleanup(void)
|
|
+{
|
|
+ struct vnet *vp;
|
|
+ struct net_device *dev;
|
|
+
|
|
+ mutex_lock(&vnet_list_mutex);
|
|
+ while (!list_empty(&vnet_list)) {
|
|
+ vp = list_first_entry(&vnet_list, struct vnet, list);
|
|
+ list_del(&vp->list);
|
|
+ dev = vp->dev;
|
|
+ /* vio_unregister_driver() should have cleaned up port_list */
|
|
+ BUG_ON(!list_empty(&vp->port_list));
|
|
+ unregister_netdev(dev);
|
|
+ free_netdev(dev);
|
|
+ }
|
|
+ mutex_unlock(&vnet_list_mutex);
|
|
+}
|
|
+
|
|
static const char *local_mac_prop = "local-mac-address";
|
|
|
|
static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
|
|
@@ -1244,7 +1262,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
|
|
|
|
kfree(port);
|
|
|
|
- unregister_netdev(vp->dev);
|
|
}
|
|
return 0;
|
|
}
|
|
@@ -1272,6 +1289,7 @@ static int __init vnet_init(void)
|
|
static void __exit vnet_exit(void)
|
|
{
|
|
vio_unregister_driver(&vnet_port_driver);
|
|
+ vnet_cleanup();
|
|
}
|
|
|
|
module_init(vnet_init);
|
|
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
|
|
index bac88c22d990..fbe75a784edb 100644
|
|
--- a/drivers/net/ppp/pppoe.c
|
|
+++ b/drivers/net/ppp/pppoe.c
|
|
@@ -681,7 +681,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|
po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
|
|
dev->hard_header_len);
|
|
|
|
- po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
|
|
+ po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
|
|
po->chan.private = sk;
|
|
po->chan.ops = &pppoe_chan_ops;
|
|
|
|
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
|
|
index 9d1b3ca6334b..a884c322f3ea 100644
|
|
--- a/drivers/net/wireless/mwifiex/main.c
|
|
+++ b/drivers/net/wireless/mwifiex/main.c
|
|
@@ -457,6 +457,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
}
|
|
|
|
tx_info = MWIFIEX_SKB_TXCB(skb);
|
|
+ memset(tx_info, 0, sizeof(*tx_info));
|
|
tx_info->bss_num = priv->bss_num;
|
|
tx_info->bss_type = priv->bss_type;
|
|
mwifiex_fill_buffer(skb);
|
|
diff --git a/kernel/power/process.c b/kernel/power/process.c
|
|
index 19db29f67558..f27d0c8cd9e8 100644
|
|
--- a/kernel/power/process.c
|
|
+++ b/kernel/power/process.c
|
|
@@ -185,6 +185,7 @@ void thaw_processes(void)
|
|
|
|
printk("Restarting tasks ... ");
|
|
|
|
+ __usermodehelper_set_disable_depth(UMH_FREEZING);
|
|
thaw_workqueues();
|
|
|
|
read_lock(&tasklist_lock);
|
|
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
|
|
index 877aa733b961..b7045793bd56 100644
|
|
--- a/kernel/time/alarmtimer.c
|
|
+++ b/kernel/time/alarmtimer.c
|
|
@@ -569,9 +569,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
|
|
struct itimerspec *new_setting,
|
|
struct itimerspec *old_setting)
|
|
{
|
|
+ ktime_t exp;
|
|
+
|
|
if (!rtcdev)
|
|
return -ENOTSUPP;
|
|
|
|
+ if (flags & ~TIMER_ABSTIME)
|
|
+ return -EINVAL;
|
|
+
|
|
if (old_setting)
|
|
alarm_timer_get(timr, old_setting);
|
|
|
|
@@ -581,8 +586,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
|
|
|
|
/* start the timer */
|
|
timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
|
|
- alarm_start(&timr->it.alarm.alarmtimer,
|
|
- timespec_to_ktime(new_setting->it_value));
|
|
+ exp = timespec_to_ktime(new_setting->it_value);
|
|
+ /* Convert (if necessary) to absolute time */
|
|
+ if (flags != TIMER_ABSTIME) {
|
|
+ ktime_t now;
|
|
+
|
|
+ now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
|
|
+ exp = ktime_add(now, exp);
|
|
+ }
|
|
+
|
|
+ alarm_start(&timr->it.alarm.alarmtimer, exp);
|
|
return 0;
|
|
}
|
|
|
|
@@ -714,6 +727,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
|
|
if (!alarmtimer_get_rtcdev())
|
|
return -ENOTSUPP;
|
|
|
|
+ if (flags & ~TIMER_ABSTIME)
|
|
+ return -EINVAL;
|
|
+
|
|
if (!capable(CAP_WAKE_ALARM))
|
|
return -EPERM;
|
|
|
|
diff --git a/mm/shmem.c b/mm/shmem.c
|
|
index 58c4a477be67..4bb5a80dd13b 100644
|
|
--- a/mm/shmem.c
|
|
+++ b/mm/shmem.c
|
|
@@ -76,6 +76,17 @@ static struct vfsmount *shm_mnt;
|
|
/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
|
|
#define SHORT_SYMLINK_LEN 128
|
|
|
|
+/*
|
|
+ * vmtruncate_range() communicates with shmem_fault via
|
|
+ * inode->i_private (with i_mutex making sure that it has only one user at
|
|
+ * a time): we would prefer not to enlarge the shmem inode just for that.
|
|
+ */
|
|
+struct shmem_falloc {
|
|
+ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
|
|
+ pgoff_t start; /* start of range currently being fallocated */
|
|
+ pgoff_t next; /* the next page offset to be fallocated */
|
|
+};
|
|
+
|
|
struct shmem_xattr {
|
|
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
|
|
char *name; /* xattr name */
|
|
@@ -488,22 +499,19 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
|
|
}
|
|
|
|
index = start;
|
|
- for ( ; ; ) {
|
|
+ while (index <= end) {
|
|
cond_resched();
|
|
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
|
|
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
|
|
pvec.pages, indices);
|
|
if (!pvec.nr) {
|
|
- if (index == start)
|
|
+ /* If all gone or hole-punch, we're done */
|
|
+ if (index == start || end != -1)
|
|
break;
|
|
+ /* But if truncating, restart to make sure all gone */
|
|
index = start;
|
|
continue;
|
|
}
|
|
- if (index == start && indices[0] > end) {
|
|
- shmem_deswap_pagevec(&pvec);
|
|
- pagevec_release(&pvec);
|
|
- break;
|
|
- }
|
|
mem_cgroup_uncharge_start();
|
|
for (i = 0; i < pagevec_count(&pvec); i++) {
|
|
struct page *page = pvec.pages[i];
|
|
@@ -513,8 +521,12 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
|
|
break;
|
|
|
|
if (radix_tree_exceptional_entry(page)) {
|
|
- nr_swaps_freed += !shmem_free_swap(mapping,
|
|
- index, page);
|
|
+ if (shmem_free_swap(mapping, index, page)) {
|
|
+ /* Swap was replaced by page: retry */
|
|
+ index--;
|
|
+ break;
|
|
+ }
|
|
+ nr_swaps_freed++;
|
|
continue;
|
|
}
|
|
|
|
@@ -522,6 +534,11 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
|
|
if (page->mapping == mapping) {
|
|
VM_BUG_ON(PageWriteback(page));
|
|
truncate_inode_page(mapping, page);
|
|
+ } else {
|
|
+ /* Page was replaced by swap: retry */
|
|
+ unlock_page(page);
|
|
+ index--;
|
|
+ break;
|
|
}
|
|
unlock_page(page);
|
|
}
|
|
@@ -1060,6 +1077,63 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
int error;
|
|
int ret = VM_FAULT_LOCKED;
|
|
|
|
+ /*
|
|
+ * Trinity finds that probing a hole which tmpfs is punching can
|
|
+ * prevent the hole-punch from ever completing: which in turn
|
|
+ * locks writers out with its hold on i_mutex. So refrain from
|
|
+ * faulting pages into the hole while it's being punched. Although
|
|
+ * shmem_truncate_range() does remove the additions, it may be unable to
|
|
+ * keep up, as each new page needs its own unmap_mapping_range() call,
|
|
+ * and the i_mmap tree grows ever slower to scan if new vmas are added.
|
|
+ *
|
|
+ * It does not matter if we sometimes reach this check just before the
|
|
+ * hole-punch begins, so that one fault then races with the punch:
|
|
+ * we just need to make racing faults a rare case.
|
|
+ *
|
|
+ * The implementation below would be much simpler if we just used a
|
|
+ * standard mutex or completion: but we cannot take i_mutex in fault,
|
|
+ * and bloating every shmem inode for this unlikely case would be sad.
|
|
+ */
|
|
+ if (unlikely(inode->i_private)) {
|
|
+ struct shmem_falloc *shmem_falloc;
|
|
+
|
|
+ spin_lock(&inode->i_lock);
|
|
+ shmem_falloc = inode->i_private;
|
|
+ if (shmem_falloc &&
|
|
+ vmf->pgoff >= shmem_falloc->start &&
|
|
+ vmf->pgoff < shmem_falloc->next) {
|
|
+ wait_queue_head_t *shmem_falloc_waitq;
|
|
+ DEFINE_WAIT(shmem_fault_wait);
|
|
+
|
|
+ ret = VM_FAULT_NOPAGE;
|
|
+ if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
|
|
+ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
|
|
+ /* It's polite to up mmap_sem if we can */
|
|
+ up_read(&vma->vm_mm->mmap_sem);
|
|
+ ret = VM_FAULT_RETRY;
|
|
+ }
|
|
+
|
|
+ shmem_falloc_waitq = shmem_falloc->waitq;
|
|
+ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
|
|
+ TASK_UNINTERRUPTIBLE);
|
|
+ spin_unlock(&inode->i_lock);
|
|
+ schedule();
|
|
+
|
|
+ /*
|
|
+ * shmem_falloc_waitq points into the vmtruncate_range()
|
|
+ * stack of the hole-punching task: shmem_falloc_waitq
|
|
+ * is usually invalid by the time we reach here, but
|
|
+ * finish_wait() does not dereference it in that case;
|
|
+ * though i_lock needed lest racing with wake_up_all().
|
|
+ */
|
|
+ spin_lock(&inode->i_lock);
|
|
+ finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
|
|
+ spin_unlock(&inode->i_lock);
|
|
+ return ret;
|
|
+ }
|
|
+ spin_unlock(&inode->i_lock);
|
|
+ }
|
|
+
|
|
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
|
|
if (error)
|
|
return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
|
|
@@ -1071,6 +1145,47 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
return ret;
|
|
}
|
|
|
|
+int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
|
|
+{
|
|
+ /*
|
|
+ * If the underlying filesystem is not going to provide
|
|
+ * a way to truncate a range of blocks (punch a hole) -
|
|
+ * we should return failure right now.
|
|
+ * Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range().
|
|
+ */
|
|
+ if (inode->i_op->truncate_range != shmem_truncate_range)
|
|
+ return -ENOSYS;
|
|
+
|
|
+ mutex_lock(&inode->i_mutex);
|
|
+ {
|
|
+ struct shmem_falloc shmem_falloc;
|
|
+ struct address_space *mapping = inode->i_mapping;
|
|
+ loff_t unmap_start = round_up(lstart, PAGE_SIZE);
|
|
+ loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
|
|
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
|
|
+
|
|
+ shmem_falloc.waitq = &shmem_falloc_waitq;
|
|
+ shmem_falloc.start = unmap_start >> PAGE_SHIFT;
|
|
+ shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
|
|
+ spin_lock(&inode->i_lock);
|
|
+ inode->i_private = &shmem_falloc;
|
|
+ spin_unlock(&inode->i_lock);
|
|
+
|
|
+ if ((u64)unmap_end > (u64)unmap_start)
|
|
+ unmap_mapping_range(mapping, unmap_start,
|
|
+ 1 + unmap_end - unmap_start, 0);
|
|
+ shmem_truncate_range(inode, lstart, lend);
|
|
+ /* No need to unmap again: hole-punching leaves COWed pages */
|
|
+
|
|
+ spin_lock(&inode->i_lock);
|
|
+ inode->i_private = NULL;
|
|
+ wake_up_all(&shmem_falloc_waitq);
|
|
+ spin_unlock(&inode->i_lock);
|
|
+ }
|
|
+ mutex_unlock(&inode->i_mutex);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
#ifdef CONFIG_NUMA
|
|
static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
|
|
{
|
|
@@ -2547,6 +2662,12 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
|
|
}
|
|
EXPORT_SYMBOL_GPL(shmem_truncate_range);
|
|
|
|
+int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
|
|
+{
|
|
+ /* Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range(). */
|
|
+ return -ENOSYS;
|
|
+}
|
|
+
|
|
#define shmem_vm_ops generic_file_vm_ops
|
|
#define shmem_file_operations ramfs_file_operations
|
|
#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
|
|
diff --git a/mm/truncate.c b/mm/truncate.c
|
|
index 4224627695ba..f38055cb8af6 100644
|
|
--- a/mm/truncate.c
|
|
+++ b/mm/truncate.c
|
|
@@ -603,31 +603,6 @@ int vmtruncate(struct inode *inode, loff_t newsize)
|
|
}
|
|
EXPORT_SYMBOL(vmtruncate);
|
|
|
|
-int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
|
|
-{
|
|
- struct address_space *mapping = inode->i_mapping;
|
|
- loff_t holebegin = round_up(lstart, PAGE_SIZE);
|
|
- loff_t holelen = 1 + lend - holebegin;
|
|
-
|
|
- /*
|
|
- * If the underlying filesystem is not going to provide
|
|
- * a way to truncate a range of blocks (punch a hole) -
|
|
- * we should return failure right now.
|
|
- */
|
|
- if (!inode->i_op->truncate_range)
|
|
- return -ENOSYS;
|
|
-
|
|
- mutex_lock(&inode->i_mutex);
|
|
- inode_dio_wait(inode);
|
|
- unmap_mapping_range(mapping, holebegin, holelen, 1);
|
|
- inode->i_op->truncate_range(inode, lstart, lend);
|
|
- /* unmap again to remove racily COWed private pages */
|
|
- unmap_mapping_range(mapping, holebegin, holelen, 1);
|
|
- mutex_unlock(&inode->i_mutex);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
/**
|
|
* truncate_pagecache_range - unmap and remove pagecache that is hole-punched
|
|
* @inode: inode
|
|
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
|
|
index 912613c566cb..37c486c019fe 100644
|
|
--- a/net/8021q/vlan_core.c
|
|
+++ b/net/8021q/vlan_core.c
|
|
@@ -96,8 +96,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_id);
|
|
|
|
static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
|
|
{
|
|
- if (skb_cow(skb, skb_headroom(skb)) < 0)
|
|
+ if (skb_cow(skb, skb_headroom(skb)) < 0) {
|
|
+ kfree_skb(skb);
|
|
return NULL;
|
|
+ }
|
|
+
|
|
memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
|
|
skb->mac_header += VLAN_HLEN;
|
|
return skb;
|
|
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
|
|
index 334d4cd7612f..79aaac288afb 100644
|
|
--- a/net/appletalk/ddp.c
|
|
+++ b/net/appletalk/ddp.c
|
|
@@ -1494,8 +1494,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
|
|
goto drop;
|
|
|
|
/* Queue packet (standard) */
|
|
- skb->sk = sock;
|
|
-
|
|
if (sock_queue_rcv_skb(sock, skb) < 0)
|
|
goto drop;
|
|
|
|
@@ -1649,7 +1647,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
|
|
if (!skb)
|
|
goto out;
|
|
|
|
- skb->sk = sk;
|
|
skb_reserve(skb, ddp_dl->header_length);
|
|
skb_reserve(skb, dev->hard_header_len);
|
|
skb->dev = dev;
|
|
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
|
|
index c32be292c7e3..2022b46ab38f 100644
|
|
--- a/net/dns_resolver/dns_query.c
|
|
+++ b/net/dns_resolver/dns_query.c
|
|
@@ -150,7 +150,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
|
|
if (!*_result)
|
|
goto put;
|
|
|
|
- memcpy(*_result, upayload->data, len + 1);
|
|
+ memcpy(*_result, upayload->data, len);
|
|
+ (*_result)[len] = '\0';
|
|
+
|
|
if (_expiry)
|
|
*_expiry = rkey->expiry;
|
|
|
|
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
|
|
index c8e26992742f..3f0bb3b3819d 100644
|
|
--- a/net/ipv4/igmp.c
|
|
+++ b/net/ipv4/igmp.c
|
|
@@ -1866,6 +1866,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
|
|
|
|
rtnl_lock();
|
|
in_dev = ip_mc_find_dev(net, imr);
|
|
+ if (!in_dev) {
|
|
+ ret = -ENODEV;
|
|
+ goto out;
|
|
+ }
|
|
ifindex = imr->imr_ifindex;
|
|
for (imlp = &inet->mc_list;
|
|
(iml = rtnl_dereference(*imlp)) != NULL;
|
|
@@ -1883,16 +1887,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
|
|
|
|
*imlp = iml->next_rcu;
|
|
|
|
- if (in_dev)
|
|
- ip_mc_dec_group(in_dev, group);
|
|
+ ip_mc_dec_group(in_dev, group);
|
|
rtnl_unlock();
|
|
/* decrease mem now to avoid the memleak warning */
|
|
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
|
|
kfree_rcu(iml, rcu);
|
|
return 0;
|
|
}
|
|
- if (!in_dev)
|
|
- ret = -ENODEV;
|
|
+out:
|
|
rtnl_unlock();
|
|
return ret;
|
|
}
|
|
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
|
|
index b69a3700642b..523541730777 100644
|
|
--- a/net/ipv4/ip_options.c
|
|
+++ b/net/ipv4/ip_options.c
|
|
@@ -279,6 +279,10 @@ int ip_options_compile(struct net *net,
|
|
optptr++;
|
|
continue;
|
|
}
|
|
+ if (unlikely(l < 2)) {
|
|
+ pp_ptr = optptr;
|
|
+ goto error;
|
|
+ }
|
|
optlen = optptr[1];
|
|
if (optlen<2 || optlen>l) {
|
|
pp_ptr = optptr;
|
|
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
|
|
index 99eb909c9d5f..2d3290496a0a 100644
|
|
--- a/net/ipv4/tcp_input.c
|
|
+++ b/net/ipv4/tcp_input.c
|
|
@@ -1250,7 +1250,7 @@ static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
|
|
}
|
|
|
|
/* D-SACK for already forgotten data... Do dumb counting. */
|
|
- if (dup_sack && tp->undo_marker && tp->undo_retrans &&
|
|
+ if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
|
|
!after(end_seq_0, prior_snd_una) &&
|
|
after(end_seq_0, tp->undo_marker))
|
|
tp->undo_retrans--;
|
|
@@ -1304,7 +1304,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
|
|
unsigned int new_len = (pkt_len / mss) * mss;
|
|
if (!in_sack && new_len < pkt_len) {
|
|
new_len += mss;
|
|
- if (new_len > skb->len)
|
|
+ if (new_len >= skb->len)
|
|
return 0;
|
|
}
|
|
pkt_len = new_len;
|
|
@@ -1328,7 +1328,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
|
|
|
|
/* Account D-SACK for retransmitted packet. */
|
|
if (dup_sack && (sacked & TCPCB_RETRANS)) {
|
|
- if (tp->undo_marker && tp->undo_retrans &&
|
|
+ if (tp->undo_marker && tp->undo_retrans > 0 &&
|
|
after(end_seq, tp->undo_marker))
|
|
tp->undo_retrans--;
|
|
if (sacked & TCPCB_SACKED_ACKED)
|
|
@@ -2226,7 +2226,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
|
|
tp->lost_out = 0;
|
|
|
|
tp->undo_marker = 0;
|
|
- tp->undo_retrans = 0;
|
|
+ tp->undo_retrans = -1;
|
|
}
|
|
|
|
void tcp_clear_retrans(struct tcp_sock *tp)
|
|
@@ -3165,7 +3165,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
|
|
tp->high_seq = tp->snd_nxt;
|
|
tp->prior_ssthresh = 0;
|
|
tp->undo_marker = tp->snd_una;
|
|
- tp->undo_retrans = tp->retrans_out;
|
|
+ tp->undo_retrans = tp->retrans_out ? : -1;
|
|
|
|
if (icsk->icsk_ca_state < TCP_CA_CWR) {
|
|
if (!(flag & FLAG_ECE))
|
|
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
|
|
index 987f5cc706b4..fd414b61f966 100644
|
|
--- a/net/ipv4/tcp_output.c
|
|
+++ b/net/ipv4/tcp_output.c
|
|
@@ -2194,13 +2194,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
|
|
if (!tp->retrans_stamp)
|
|
tp->retrans_stamp = TCP_SKB_CB(skb)->when;
|
|
|
|
- tp->undo_retrans += tcp_skb_pcount(skb);
|
|
-
|
|
/* snd_nxt is stored to detect loss of retransmitted segment,
|
|
* see tcp_input.c tcp_sacktag_write_queue().
|
|
*/
|
|
TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
|
|
}
|
|
+
|
|
+ if (tp->undo_retrans < 0)
|
|
+ tp->undo_retrans = 0;
|
|
+ tp->undo_retrans += tcp_skb_pcount(skb);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
|
|
index 8a84017834c2..57da44707eb1 100644
|
|
--- a/net/sctp/ulpevent.c
|
|
+++ b/net/sctp/ulpevent.c
|
|
@@ -373,9 +373,10 @@ fail:
|
|
* specification [SCTP] and any extensions for a list of possible
|
|
* error formats.
|
|
*/
|
|
-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
|
|
- const struct sctp_association *asoc, struct sctp_chunk *chunk,
|
|
- __u16 flags, gfp_t gfp)
|
|
+struct sctp_ulpevent *
|
|
+sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
|
|
+ struct sctp_chunk *chunk, __u16 flags,
|
|
+ gfp_t gfp)
|
|
{
|
|
struct sctp_ulpevent *event;
|
|
struct sctp_remote_error *sre;
|
|
@@ -394,8 +395,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
|
|
/* Copy the skb to a new skb with room for us to prepend
|
|
* notification with.
|
|
*/
|
|
- skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
|
|
- 0, gfp);
|
|
+ skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
|
|
|
|
/* Pull off the rest of the cause TLV from the chunk. */
|
|
skb_pull(chunk->skb, elen);
|
|
@@ -406,62 +406,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
|
|
event = sctp_skb2event(skb);
|
|
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
|
|
|
|
- sre = (struct sctp_remote_error *)
|
|
- skb_push(skb, sizeof(struct sctp_remote_error));
|
|
+ sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
|
|
|
|
/* Trim the buffer to the right length. */
|
|
- skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
|
|
+ skb_trim(skb, sizeof(*sre) + elen);
|
|
|
|
- /* Socket Extensions for SCTP
|
|
- * 5.3.1.3 SCTP_REMOTE_ERROR
|
|
- *
|
|
- * sre_type:
|
|
- * It should be SCTP_REMOTE_ERROR.
|
|
- */
|
|
+ /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
|
|
+ memset(sre, 0, sizeof(*sre));
|
|
sre->sre_type = SCTP_REMOTE_ERROR;
|
|
-
|
|
- /*
|
|
- * Socket Extensions for SCTP
|
|
- * 5.3.1.3 SCTP_REMOTE_ERROR
|
|
- *
|
|
- * sre_flags: 16 bits (unsigned integer)
|
|
- * Currently unused.
|
|
- */
|
|
sre->sre_flags = 0;
|
|
-
|
|
- /* Socket Extensions for SCTP
|
|
- * 5.3.1.3 SCTP_REMOTE_ERROR
|
|
- *
|
|
- * sre_length: sizeof (__u32)
|
|
- *
|
|
- * This field is the total length of the notification data,
|
|
- * including the notification header.
|
|
- */
|
|
sre->sre_length = skb->len;
|
|
-
|
|
- /* Socket Extensions for SCTP
|
|
- * 5.3.1.3 SCTP_REMOTE_ERROR
|
|
- *
|
|
- * sre_error: 16 bits (unsigned integer)
|
|
- * This value represents one of the Operational Error causes defined in
|
|
- * the SCTP specification, in network byte order.
|
|
- */
|
|
sre->sre_error = cause;
|
|
-
|
|
- /* Socket Extensions for SCTP
|
|
- * 5.3.1.3 SCTP_REMOTE_ERROR
|
|
- *
|
|
- * sre_assoc_id: sizeof (sctp_assoc_t)
|
|
- *
|
|
- * The association id field, holds the identifier for the association.
|
|
- * All notifications for a given association have the same association
|
|
- * identifier. For TCP style socket, this field is ignored.
|
|
- */
|
|
sctp_ulpevent_set_owner(event, asoc);
|
|
sre->sre_assoc_id = sctp_assoc2id(asoc);
|
|
|
|
return event;
|
|
-
|
|
fail:
|
|
return NULL;
|
|
}
|
|
@@ -904,7 +863,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
|
|
return notification->sn_header.sn_type;
|
|
}
|
|
|
|
-/* Copy out the sndrcvinfo into a msghdr. */
|
|
+/* RFC6458, Section 5.3.2. SCTP Header Information Structure
|
|
+ * (SCTP_SNDRCV, DEPRECATED)
|
|
+ */
|
|
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
|
|
struct msghdr *msghdr)
|
|
{
|
|
@@ -913,74 +874,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
|
|
if (sctp_ulpevent_is_notification(event))
|
|
return;
|
|
|
|
- /* Sockets API Extensions for SCTP
|
|
- * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
|
|
- *
|
|
- * sinfo_stream: 16 bits (unsigned integer)
|
|
- *
|
|
- * For recvmsg() the SCTP stack places the message's stream number in
|
|
- * this value.
|
|
- */
|
|
+ memset(&sinfo, 0, sizeof(sinfo));
|
|
sinfo.sinfo_stream = event->stream;
|
|
- /* sinfo_ssn: 16 bits (unsigned integer)
|
|
- *
|
|
- * For recvmsg() this value contains the stream sequence number that
|
|
- * the remote endpoint placed in the DATA chunk. For fragmented
|
|
- * messages this is the same number for all deliveries of the message
|
|
- * (if more than one recvmsg() is needed to read the message).
|
|
- */
|
|
sinfo.sinfo_ssn = event->ssn;
|
|
- /* sinfo_ppid: 32 bits (unsigned integer)
|
|
- *
|
|
- * In recvmsg() this value is
|
|
- * the same information that was passed by the upper layer in the peer
|
|
- * application. Please note that byte order issues are NOT accounted
|
|
- * for and this information is passed opaquely by the SCTP stack from
|
|
- * one end to the other.
|
|
- */
|
|
sinfo.sinfo_ppid = event->ppid;
|
|
- /* sinfo_flags: 16 bits (unsigned integer)
|
|
- *
|
|
- * This field may contain any of the following flags and is composed of
|
|
- * a bitwise OR of these values.
|
|
- *
|
|
- * recvmsg() flags:
|
|
- *
|
|
- * SCTP_UNORDERED - This flag is present when the message was sent
|
|
- * non-ordered.
|
|
- */
|
|
sinfo.sinfo_flags = event->flags;
|
|
- /* sinfo_tsn: 32 bit (unsigned integer)
|
|
- *
|
|
- * For the receiving side, this field holds a TSN that was
|
|
- * assigned to one of the SCTP Data Chunks.
|
|
- */
|
|
sinfo.sinfo_tsn = event->tsn;
|
|
- /* sinfo_cumtsn: 32 bit (unsigned integer)
|
|
- *
|
|
- * This field will hold the current cumulative TSN as
|
|
- * known by the underlying SCTP layer. Note this field is
|
|
- * ignored when sending and only valid for a receive
|
|
- * operation when sinfo_flags are set to SCTP_UNORDERED.
|
|
- */
|
|
sinfo.sinfo_cumtsn = event->cumtsn;
|
|
- /* sinfo_assoc_id: sizeof (sctp_assoc_t)
|
|
- *
|
|
- * The association handle field, sinfo_assoc_id, holds the identifier
|
|
- * for the association announced in the COMMUNICATION_UP notification.
|
|
- * All notifications for a given association have the same identifier.
|
|
- * Ignored for one-to-one style sockets.
|
|
- */
|
|
sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
|
|
-
|
|
- /* context value that is set via SCTP_CONTEXT socket option. */
|
|
+ /* Context value that is set via SCTP_CONTEXT socket option. */
|
|
sinfo.sinfo_context = event->asoc->default_rcv_context;
|
|
-
|
|
/* These fields are not used while receiving. */
|
|
sinfo.sinfo_timetolive = 0;
|
|
|
|
put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
|
|
- sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
|
|
+ sizeof(sinfo), &sinfo);
|
|
}
|
|
|
|
/* Do accounting for bytes received and hold a reference to the association
|
|
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
|
|
index e00441a2092f..9495be3a61e0 100644
|
|
--- a/net/tipc/bcast.c
|
|
+++ b/net/tipc/bcast.c
|
|
@@ -541,6 +541,7 @@ receive:
|
|
|
|
buf = node->bclink.deferred_head;
|
|
node->bclink.deferred_head = buf->next;
|
|
+ buf->next = NULL;
|
|
node->bclink.deferred_size--;
|
|
goto receive;
|
|
}
|