mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-30 02:31:46 +00:00
579 lines
20 KiB
Diff
579 lines
20 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index efa1453..0027fbe 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 3
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 59
|
|
+SUBLEVEL = 60
|
|
EXTRAVERSION =
|
|
NAME = Saber-toothed Squirrel
|
|
|
|
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
|
|
index 017d48a..f8b0260 100644
|
|
--- a/arch/x86/xen/setup.c
|
|
+++ b/arch/x86/xen/setup.c
|
|
@@ -213,6 +213,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
|
|
e820_add_region(start, end - start, type);
|
|
}
|
|
|
|
+void xen_ignore_unusable(struct e820entry *list, size_t map_size)
|
|
+{
|
|
+ struct e820entry *entry;
|
|
+ unsigned int i;
|
|
+
|
|
+ for (i = 0, entry = list; i < map_size; i++, entry++) {
|
|
+ if (entry->type == E820_UNUSABLE)
|
|
+ entry->type = E820_RAM;
|
|
+ }
|
|
+}
|
|
+
|
|
/**
|
|
* machine_specific_memory_setup - Hook for machine specific memory setup.
|
|
**/
|
|
@@ -251,6 +262,17 @@ char * __init xen_memory_setup(void)
|
|
}
|
|
BUG_ON(rc);
|
|
|
|
+ /*
|
|
+ * Xen won't allow a 1:1 mapping to be created to UNUSABLE
|
|
+ * regions, so if we're using the machine memory map leave the
|
|
+ * region as RAM as it is in the pseudo-physical map.
|
|
+ *
|
|
+ * UNUSABLE regions in domUs are not handled and will need
|
|
+ * a patch in the future.
|
|
+ */
|
|
+ if (xen_initial_domain())
|
|
+ xen_ignore_unusable(map, memmap.nr_entries);
|
|
+
|
|
/* Make sure the Xen-supplied memory map is well-ordered. */
|
|
sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
|
|
|
|
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
|
|
index f63a588..f5c35be 100644
|
|
--- a/drivers/ata/libata-pmp.c
|
|
+++ b/drivers/ata/libata-pmp.c
|
|
@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
|
|
|
|
/* Disable sending Early R_OK.
|
|
* With "cached read" HDD testing and multiple ports busy on a SATA
|
|
- * host controller, 3726 PMP will very rarely drop a deferred
|
|
+ * host controller, 3x26 PMP will very rarely drop a deferred
|
|
* R_OK that was intended for the host. Symptom will be all
|
|
* 5 drives under test will timeout, get reset, and recover.
|
|
*/
|
|
- if (vendor == 0x1095 && devid == 0x3726) {
|
|
+ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
|
|
u32 reg;
|
|
|
|
err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®);
|
|
if (err_mask) {
|
|
rc = -EIO;
|
|
- reason = "failed to read Sil3726 Private Register";
|
|
+ reason = "failed to read Sil3x26 Private Register";
|
|
goto fail;
|
|
}
|
|
reg &= ~0x1;
|
|
err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
|
|
if (err_mask) {
|
|
rc = -EIO;
|
|
- reason = "failed to write Sil3726 Private Register";
|
|
+ reason = "failed to write Sil3x26 Private Register";
|
|
goto fail;
|
|
}
|
|
}
|
|
@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
|
|
u16 devid = sata_pmp_gscr_devid(gscr);
|
|
struct ata_link *link;
|
|
|
|
- if (vendor == 0x1095 && devid == 0x3726) {
|
|
- /* sil3726 quirks */
|
|
+ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
|
|
+ /* sil3x26 quirks */
|
|
ata_for_each_link(link, ap, EDGE) {
|
|
/* link reports offline after LPM */
|
|
link->flags |= ATA_LFLAG_NO_LPM;
|
|
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
|
|
index dde62bf..d031932 100644
|
|
--- a/drivers/gpu/drm/i915/i915_reg.h
|
|
+++ b/drivers/gpu/drm/i915/i915_reg.h
|
|
@@ -502,6 +502,8 @@
|
|
will not assert AGPBUSY# and will only
|
|
be delivered when out of C3. */
|
|
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
|
|
+#define INSTPM_TLB_INVALIDATE (1<<9)
|
|
+#define INSTPM_SYNC_FLUSH (1<<5)
|
|
#define ACTHD 0x020c8
|
|
#define FW_BLC 0x020d8
|
|
#define FW_BLC2 0x020dc
|
|
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
|
|
index c17325c..99a9df8 100644
|
|
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
|
|
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
|
|
@@ -767,6 +767,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
|
|
|
|
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
|
|
POSTING_READ(mmio);
|
|
+
|
|
+ /* Flush the TLB for this page */
|
|
+ if (INTEL_INFO(dev)->gen >= 6) {
|
|
+ u32 reg = RING_INSTPM(ring->mmio_base);
|
|
+ I915_WRITE(reg,
|
|
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
|
|
+ INSTPM_SYNC_FLUSH));
|
|
+ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
|
|
+ 1000))
|
|
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
|
|
+ ring->name);
|
|
+ }
|
|
}
|
|
|
|
static int
|
|
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
|
|
index 18054d9..dbec2ff 100644
|
|
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
|
|
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
|
|
@@ -522,9 +522,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
|
|
|
|
data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
|
|
|
|
- memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
|
|
+ memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
|
|
data->flags = 1; /* has quality information */
|
|
- memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
|
|
+ memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
|
|
sizeof(struct iw_quality) * data->length);
|
|
|
|
kfree(addr);
|
|
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
|
|
index a66b93b..1662fcc 100644
|
|
--- a/drivers/net/wireless/zd1201.c
|
|
+++ b/drivers/net/wireless/zd1201.c
|
|
@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
|
|
goto exit;
|
|
|
|
err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
|
|
- USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
|
|
+ USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
|
|
if (err < 0)
|
|
goto exit;
|
|
|
|
+ memcpy(&ret, buf, sizeof(ret));
|
|
+
|
|
if (ret & 0x80) {
|
|
err = -EIO;
|
|
goto exit;
|
|
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
|
|
index 91a375f..17fad3b 100644
|
|
--- a/drivers/of/fdt.c
|
|
+++ b/drivers/of/fdt.c
|
|
@@ -390,6 +390,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
|
|
mem = (unsigned long)
|
|
dt_alloc(size + 4, __alignof__(struct device_node));
|
|
|
|
+ memset((void *)mem, 0, size);
|
|
+
|
|
((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
|
|
|
|
pr_debug(" unflattening %lx...\n", mem);
|
|
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
|
|
index e1b4f80..5c87270 100644
|
|
--- a/drivers/s390/scsi/zfcp_erp.c
|
|
+++ b/drivers/s390/scsi/zfcp_erp.c
|
|
@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
|
|
|
|
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
|
|
zfcp_erp_action_dismiss(&port->erp_action);
|
|
- else
|
|
- shost_for_each_device(sdev, port->adapter->scsi_host)
|
|
+ else {
|
|
+ spin_lock(port->adapter->scsi_host->host_lock);
|
|
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
|
|
if (sdev_to_zfcp(sdev)->port == port)
|
|
zfcp_erp_action_dismiss_lun(sdev);
|
|
+ spin_unlock(port->adapter->scsi_host->host_lock);
|
|
+ }
|
|
}
|
|
|
|
static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
|
|
@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
|
|
{
|
|
struct scsi_device *sdev;
|
|
|
|
- shost_for_each_device(sdev, port->adapter->scsi_host)
|
|
+ spin_lock(port->adapter->scsi_host->host_lock);
|
|
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
|
|
if (sdev_to_zfcp(sdev)->port == port)
|
|
_zfcp_erp_lun_reopen(sdev, clear, id, 0);
|
|
+ spin_unlock(port->adapter->scsi_host->host_lock);
|
|
}
|
|
|
|
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
|
|
@@ -1435,8 +1440,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
|
|
atomic_set_mask(common_mask, &port->status);
|
|
read_unlock_irqrestore(&adapter->port_list_lock, flags);
|
|
|
|
- shost_for_each_device(sdev, adapter->scsi_host)
|
|
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
|
|
+ __shost_for_each_device(sdev, adapter->scsi_host)
|
|
atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
|
|
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
|
|
}
|
|
|
|
/**
|
|
@@ -1470,11 +1477,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
|
|
}
|
|
read_unlock_irqrestore(&adapter->port_list_lock, flags);
|
|
|
|
- shost_for_each_device(sdev, adapter->scsi_host) {
|
|
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
|
|
+ __shost_for_each_device(sdev, adapter->scsi_host) {
|
|
atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
|
|
if (clear_counter)
|
|
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
|
|
}
|
|
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
|
|
}
|
|
|
|
/**
|
|
@@ -1488,16 +1497,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
|
|
{
|
|
struct scsi_device *sdev;
|
|
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
|
|
+ unsigned long flags;
|
|
|
|
atomic_set_mask(mask, &port->status);
|
|
|
|
if (!common_mask)
|
|
return;
|
|
|
|
- shost_for_each_device(sdev, port->adapter->scsi_host)
|
|
+ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
|
|
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
|
|
if (sdev_to_zfcp(sdev)->port == port)
|
|
atomic_set_mask(common_mask,
|
|
&sdev_to_zfcp(sdev)->status);
|
|
+ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
|
|
}
|
|
|
|
/**
|
|
@@ -1512,6 +1524,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
|
|
struct scsi_device *sdev;
|
|
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
|
|
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
|
|
+ unsigned long flags;
|
|
|
|
atomic_clear_mask(mask, &port->status);
|
|
|
|
@@ -1521,13 +1534,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
|
|
if (clear_counter)
|
|
atomic_set(&port->erp_counter, 0);
|
|
|
|
- shost_for_each_device(sdev, port->adapter->scsi_host)
|
|
+ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
|
|
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
|
|
if (sdev_to_zfcp(sdev)->port == port) {
|
|
atomic_clear_mask(common_mask,
|
|
&sdev_to_zfcp(sdev)->status);
|
|
if (clear_counter)
|
|
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
|
|
}
|
|
+ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
|
|
index e76d003..52c6b59 100644
|
|
--- a/drivers/s390/scsi/zfcp_qdio.c
|
|
+++ b/drivers/s390/scsi/zfcp_qdio.c
|
|
@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
|
|
|
|
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
|
|
{
|
|
- spin_lock_irq(&qdio->req_q_lock);
|
|
if (atomic_read(&qdio->req_q_free) ||
|
|
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
|
|
return 1;
|
|
- spin_unlock_irq(&qdio->req_q_lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
|
|
{
|
|
long ret;
|
|
|
|
- spin_unlock_irq(&qdio->req_q_lock);
|
|
- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
|
|
- zfcp_qdio_sbal_check(qdio), 5 * HZ);
|
|
+ ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
|
|
+ zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
|
|
|
|
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
|
|
return -EIO;
|
|
@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
|
|
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
|
|
}
|
|
|
|
- spin_lock_irq(&qdio->req_q_lock);
|
|
return -EIO;
|
|
}
|
|
|
|
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
|
|
index 417c133..33dcad6 100644
|
|
--- a/drivers/xen/events.c
|
|
+++ b/drivers/xen/events.c
|
|
@@ -324,7 +324,7 @@ static void init_evtchn_cpu_bindings(void)
|
|
|
|
for_each_possible_cpu(i)
|
|
memset(per_cpu(cpu_evtchn_mask, i),
|
|
- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
|
|
+ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
|
|
}
|
|
|
|
static inline void clear_evtchn(int port)
|
|
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
|
|
index dc9a913..2d8be51 100644
|
|
--- a/fs/nilfs2/segbuf.c
|
|
+++ b/fs/nilfs2/segbuf.c
|
|
@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
|
|
|
|
if (err == -EOPNOTSUPP) {
|
|
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
|
|
- bio_put(bio);
|
|
- /* to be detected by submit_seg_bio() */
|
|
+ /* to be detected by nilfs_segbuf_submit_bio() */
|
|
}
|
|
|
|
if (!uptodate)
|
|
@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
|
|
bio->bi_private = segbuf;
|
|
bio_get(bio);
|
|
submit_bio(mode, bio);
|
|
+ segbuf->sb_nbio++;
|
|
if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
|
|
bio_put(bio);
|
|
err = -EOPNOTSUPP;
|
|
goto failed;
|
|
}
|
|
- segbuf->sb_nbio++;
|
|
bio_put(bio);
|
|
|
|
wi->bio = NULL;
|
|
diff --git a/include/linux/wait.h b/include/linux/wait.h
|
|
index 6c6c20e..b305b31 100644
|
|
--- a/include/linux/wait.h
|
|
+++ b/include/linux/wait.h
|
|
@@ -530,6 +530,63 @@ do { \
|
|
? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
|
|
|
|
|
|
+#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
|
|
+ lock, ret) \
|
|
+do { \
|
|
+ DEFINE_WAIT(__wait); \
|
|
+ \
|
|
+ for (;;) { \
|
|
+ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
|
|
+ if (condition) \
|
|
+ break; \
|
|
+ if (signal_pending(current)) { \
|
|
+ ret = -ERESTARTSYS; \
|
|
+ break; \
|
|
+ } \
|
|
+ spin_unlock_irq(&lock); \
|
|
+ ret = schedule_timeout(ret); \
|
|
+ spin_lock_irq(&lock); \
|
|
+ if (!ret) \
|
|
+ break; \
|
|
+ } \
|
|
+ finish_wait(&wq, &__wait); \
|
|
+} while (0)
|
|
+
|
|
+/**
|
|
+ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
|
|
+ * The condition is checked under the lock. This is expected
|
|
+ * to be called with the lock taken.
|
|
+ * @wq: the waitqueue to wait on
|
|
+ * @condition: a C expression for the event to wait for
|
|
+ * @lock: a locked spinlock_t, which will be released before schedule()
|
|
+ * and reacquired afterwards.
|
|
+ * @timeout: timeout, in jiffies
|
|
+ *
|
|
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
|
|
+ * @condition evaluates to true or signal is received. The @condition is
|
|
+ * checked each time the waitqueue @wq is woken up.
|
|
+ *
|
|
+ * wake_up() has to be called after changing any variable that could
|
|
+ * change the result of the wait condition.
|
|
+ *
|
|
+ * This is supposed to be called while holding the lock. The lock is
|
|
+ * dropped before going to sleep and is reacquired afterwards.
|
|
+ *
|
|
+ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
|
|
+ * was interrupted by a signal, and the remaining jiffies otherwise
|
|
+ * if the condition evaluated to true before the timeout elapsed.
|
|
+ */
|
|
+#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
|
|
+ timeout) \
|
|
+({ \
|
|
+ int __ret = timeout; \
|
|
+ \
|
|
+ if (!(condition)) \
|
|
+ __wait_event_interruptible_lock_irq_timeout( \
|
|
+ wq, condition, lock, __ret); \
|
|
+ __ret; \
|
|
+})
|
|
+
|
|
|
|
#define __wait_event_killable(wq, condition, ret) \
|
|
do { \
|
|
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
|
|
index a64b94e..575d092 100644
|
|
--- a/kernel/workqueue.c
|
|
+++ b/kernel/workqueue.c
|
|
@@ -128,6 +128,7 @@ struct worker {
|
|
};
|
|
|
|
struct work_struct *current_work; /* L: work being processed */
|
|
+ work_func_t current_func; /* L: current_work's fn */
|
|
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
|
|
struct list_head scheduled; /* L: scheduled works */
|
|
struct task_struct *task; /* I: worker task */
|
|
@@ -838,7 +839,8 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
|
|
struct hlist_node *tmp;
|
|
|
|
hlist_for_each_entry(worker, tmp, bwh, hentry)
|
|
- if (worker->current_work == work)
|
|
+ if (worker->current_work == work &&
|
|
+ worker->current_func == work->func)
|
|
return worker;
|
|
return NULL;
|
|
}
|
|
@@ -848,9 +850,27 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
|
|
* @gcwq: gcwq of interest
|
|
* @work: work to find worker for
|
|
*
|
|
- * Find a worker which is executing @work on @gcwq. This function is
|
|
- * identical to __find_worker_executing_work() except that this
|
|
- * function calculates @bwh itself.
|
|
+ * Find a worker which is executing @work on @gcwq by searching
|
|
+ * @gcwq->busy_hash which is keyed by the address of @work. For a worker
|
|
+ * to match, its current execution should match the address of @work and
|
|
+ * its work function. This is to avoid unwanted dependency between
|
|
+ * unrelated work executions through a work item being recycled while still
|
|
+ * being executed.
|
|
+ *
|
|
+ * This is a bit tricky. A work item may be freed once its execution
|
|
+ * starts and nothing prevents the freed area from being recycled for
|
|
+ * another work item. If the same work item address ends up being reused
|
|
+ * before the original execution finishes, workqueue will identify the
|
|
+ * recycled work item as currently executing and make it wait until the
|
|
+ * current execution finishes, introducing an unwanted dependency.
|
|
+ *
|
|
+ * This function checks the work item address, work function and workqueue
|
|
+ * to avoid false positives. Note that this isn't complete as one may
|
|
+ * construct a work function which can introduce dependency onto itself
|
|
+ * through a recycled work item. Well, if somebody wants to shoot oneself
|
|
+ * in the foot that badly, there's only so much we can do, and if such
|
|
+ * deadlock actually occurs, it should be easy to locate the culprit work
|
|
+ * function.
|
|
*
|
|
* CONTEXT:
|
|
* spin_lock_irq(gcwq->lock).
|
|
@@ -1721,10 +1741,9 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
|
|
*nextp = n;
|
|
}
|
|
|
|
-static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
|
|
+static void cwq_activate_delayed_work(struct work_struct *work)
|
|
{
|
|
- struct work_struct *work = list_first_entry(&cwq->delayed_works,
|
|
- struct work_struct, entry);
|
|
+ struct cpu_workqueue_struct *cwq = get_work_cwq(work);
|
|
struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
|
|
|
|
trace_workqueue_activate_work(work);
|
|
@@ -1733,6 +1752,14 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
|
|
cwq->nr_active++;
|
|
}
|
|
|
|
+static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
|
|
+{
|
|
+ struct work_struct *work = list_first_entry(&cwq->delayed_works,
|
|
+ struct work_struct, entry);
|
|
+
|
|
+ cwq_activate_delayed_work(work);
|
|
+}
|
|
+
|
|
/**
|
|
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
|
|
* @cwq: cwq of interest
|
|
@@ -1804,7 +1831,6 @@ __acquires(&gcwq->lock)
|
|
struct global_cwq *gcwq = cwq->gcwq;
|
|
struct hlist_head *bwh = busy_worker_head(gcwq, work);
|
|
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
|
|
- work_func_t f = work->func;
|
|
int work_color;
|
|
struct worker *collision;
|
|
#ifdef CONFIG_LOCKDEP
|
|
@@ -1833,6 +1859,7 @@ __acquires(&gcwq->lock)
|
|
debug_work_deactivate(work);
|
|
hlist_add_head(&worker->hentry, bwh);
|
|
worker->current_work = work;
|
|
+ worker->current_func = work->func;
|
|
worker->current_cwq = cwq;
|
|
work_color = get_work_color(work);
|
|
|
|
@@ -1870,7 +1897,7 @@ __acquires(&gcwq->lock)
|
|
lock_map_acquire_read(&cwq->wq->lockdep_map);
|
|
lock_map_acquire(&lockdep_map);
|
|
trace_workqueue_execute_start(work);
|
|
- f(work);
|
|
+ worker->current_func(work);
|
|
/*
|
|
* While we must be careful to not use "work" after this, the trace
|
|
* point will only record its address.
|
|
@@ -1880,11 +1907,10 @@ __acquires(&gcwq->lock)
|
|
lock_map_release(&cwq->wq->lockdep_map);
|
|
|
|
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
|
|
- printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
|
|
- "%s/0x%08x/%d\n",
|
|
- current->comm, preempt_count(), task_pid_nr(current));
|
|
- printk(KERN_ERR " last function: ");
|
|
- print_symbol("%s\n", (unsigned long)f);
|
|
+ pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
|
|
+ " last function: %pf\n",
|
|
+ current->comm, preempt_count(), task_pid_nr(current),
|
|
+ worker->current_func);
|
|
debug_show_held_locks(current);
|
|
dump_stack();
|
|
}
|
|
@@ -1898,6 +1924,7 @@ __acquires(&gcwq->lock)
|
|
/* we're done with it, release */
|
|
hlist_del_init(&worker->hentry);
|
|
worker->current_work = NULL;
|
|
+ worker->current_func = NULL;
|
|
worker->current_cwq = NULL;
|
|
cwq_dec_nr_in_flight(cwq, work_color, false);
|
|
}
|
|
@@ -2625,6 +2652,18 @@ static int try_to_grab_pending(struct work_struct *work)
|
|
smp_rmb();
|
|
if (gcwq == get_work_gcwq(work)) {
|
|
debug_work_deactivate(work);
|
|
+
|
|
+ /*
|
|
+ * A delayed work item cannot be grabbed directly
|
|
+ * because it might have linked NO_COLOR work items
|
|
+ * which, if left on the delayed_list, will confuse
|
|
+ * cwq->nr_active management later on and cause
|
|
+ * stall. Make sure the work item is activated
|
|
+ * before grabbing.
|
|
+ */
|
|
+ if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
|
|
+ cwq_activate_delayed_work(work);
|
|
+
|
|
list_del_init(&work->entry);
|
|
cwq_dec_nr_in_flight(get_work_cwq(work),
|
|
get_work_color(work),
|