mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
unify flush_work/flush_work_keventd and rename it to cancel_work_sync
flush_work(wq, work) doesn't need the first parameter, we can use cwq->wq (this was possible from the very beginnig, I missed this). So we can unify flush_work_keventd and flush_work. Also, rename flush_work() to cancel_work_sync() and fix all callers. Perhaps this is not the best name, but "flush_work" is really bad. (akpm: this is why the earlier patches bypassed maintainers) Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Jeff Garzik <jeff@garzik.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Tejun Heo <htejun@gmail.com> Cc: Auke Kok <auke-jan.h.kok@intel.com>, Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5830c59021
commit
28e53bddf8
9 changed files with 41 additions and 40 deletions
|
@ -3633,7 +3633,7 @@ EXPORT_SYMBOL(kblockd_schedule_work);
|
||||||
|
|
||||||
void kblockd_flush_work(struct work_struct *work)
|
void kblockd_flush_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
flush_work(kblockd_workqueue, work);
|
cancel_work_sync(work);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kblockd_flush_work);
|
EXPORT_SYMBOL(kblockd_flush_work);
|
||||||
|
|
||||||
|
|
|
@ -1316,7 +1316,7 @@ void ata_port_flush_task(struct ata_port *ap)
|
||||||
spin_unlock_irqrestore(ap->lock, flags);
|
spin_unlock_irqrestore(ap->lock, flags);
|
||||||
|
|
||||||
DPRINTK("flush #1\n");
|
DPRINTK("flush #1\n");
|
||||||
flush_work(ata_wq, &ap->port_task.work); /* akpm: seems unneeded */
|
cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point, if a task is running, it's guaranteed to see
|
* At this point, if a task is running, it's guaranteed to see
|
||||||
|
@ -1327,7 +1327,7 @@ void ata_port_flush_task(struct ata_port *ap)
|
||||||
if (ata_msg_ctl(ap))
|
if (ata_msg_ctl(ap))
|
||||||
ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
|
ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
|
||||||
__FUNCTION__);
|
__FUNCTION__);
|
||||||
flush_work(ata_wq, &ap->port_task.work);
|
cancel_work_sync(&ap->port_task.work);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(ap->lock, flags);
|
spin_lock_irqsave(ap->lock, flags);
|
||||||
|
@ -6475,9 +6475,9 @@ void ata_port_detach(struct ata_port *ap)
|
||||||
/* Flush hotplug task. The sequence is similar to
|
/* Flush hotplug task. The sequence is similar to
|
||||||
* ata_port_flush_task().
|
* ata_port_flush_task().
|
||||||
*/
|
*/
|
||||||
flush_work(ata_aux_wq, &ap->hotplug_task.work); /* akpm: why? */
|
cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
|
||||||
cancel_delayed_work(&ap->hotplug_task);
|
cancel_delayed_work(&ap->hotplug_task);
|
||||||
flush_work(ata_aux_wq, &ap->hotplug_task.work);
|
cancel_work_sync(&ap->hotplug_task.work);
|
||||||
|
|
||||||
skip_eh:
|
skip_eh:
|
||||||
/* remove the associated SCSI host */
|
/* remove the associated SCSI host */
|
||||||
|
|
|
@ -1214,7 +1214,7 @@ e1000_remove(struct pci_dev *pdev)
|
||||||
int i;
|
int i;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
flush_work_keventd(&adapter->reset_task);
|
cancel_work_sync(&adapter->reset_task);
|
||||||
|
|
||||||
e1000_release_manageability(adapter);
|
e1000_release_manageability(adapter);
|
||||||
|
|
||||||
|
|
|
@ -663,9 +663,9 @@ int phy_stop_interrupts(struct phy_device *phydev)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Finish any pending work; we might have been scheduled to be called
|
* Finish any pending work; we might have been scheduled to be called
|
||||||
* from keventd ourselves, but flush_work_keventd() handles that.
|
* from keventd ourselves, but cancel_work_sync() handles that.
|
||||||
*/
|
*/
|
||||||
flush_work_keventd(&phydev->phy_queue);
|
cancel_work_sync(&phydev->phy_queue);
|
||||||
|
|
||||||
free_irq(phydev->irq, phydev);
|
free_irq(phydev->irq, phydev);
|
||||||
|
|
||||||
|
|
|
@ -7386,7 +7386,7 @@ static int tg3_close(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct tg3 *tp = netdev_priv(dev);
|
struct tg3 *tp = netdev_priv(dev);
|
||||||
|
|
||||||
flush_work_keventd(&tp->reset_task);
|
cancel_work_sync(&tp->reset_task);
|
||||||
|
|
||||||
netif_stop_queue(dev);
|
netif_stop_queue(dev);
|
||||||
|
|
||||||
|
|
4
fs/aio.c
4
fs/aio.c
|
@ -348,7 +348,7 @@ void fastcall exit_aio(struct mm_struct *mm)
|
||||||
/*
|
/*
|
||||||
* Ensure we don't leave the ctx on the aio_wq
|
* Ensure we don't leave the ctx on the aio_wq
|
||||||
*/
|
*/
|
||||||
flush_work(aio_wq, &ctx->wq.work);
|
cancel_work_sync(&ctx->wq.work);
|
||||||
|
|
||||||
if (1 != atomic_read(&ctx->users))
|
if (1 != atomic_read(&ctx->users))
|
||||||
printk(KERN_DEBUG
|
printk(KERN_DEBUG
|
||||||
|
@ -371,7 +371,7 @@ void fastcall __put_ioctx(struct kioctx *ctx)
|
||||||
BUG_ON(ctx->reqs_active);
|
BUG_ON(ctx->reqs_active);
|
||||||
|
|
||||||
cancel_delayed_work(&ctx->wq);
|
cancel_delayed_work(&ctx->wq);
|
||||||
flush_work(aio_wq, &ctx->wq.work);
|
cancel_work_sync(&ctx->wq.work);
|
||||||
aio_free_ring(ctx);
|
aio_free_ring(ctx);
|
||||||
mmdrop(ctx->mm);
|
mmdrop(ctx->mm);
|
||||||
ctx->mm = NULL;
|
ctx->mm = NULL;
|
||||||
|
|
|
@ -128,30 +128,33 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
|
||||||
extern void destroy_workqueue(struct workqueue_struct *wq);
|
extern void destroy_workqueue(struct workqueue_struct *wq);
|
||||||
|
|
||||||
extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
|
extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
|
||||||
extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
|
extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq,
|
||||||
|
struct delayed_work *work, unsigned long delay));
|
||||||
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||||
struct delayed_work *work, unsigned long delay);
|
struct delayed_work *work, unsigned long delay);
|
||||||
|
|
||||||
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
|
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
|
||||||
extern void flush_work(struct workqueue_struct *wq, struct work_struct *work);
|
extern void flush_scheduled_work(void);
|
||||||
extern void flush_work_keventd(struct work_struct *work);
|
|
||||||
|
|
||||||
extern int FASTCALL(schedule_work(struct work_struct *work));
|
extern int FASTCALL(schedule_work(struct work_struct *work));
|
||||||
extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
|
extern int FASTCALL(schedule_delayed_work(struct delayed_work *work,
|
||||||
|
unsigned long delay));
|
||||||
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
|
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
|
||||||
|
unsigned long delay);
|
||||||
extern int schedule_on_each_cpu(work_func_t func);
|
extern int schedule_on_each_cpu(work_func_t func);
|
||||||
extern void flush_scheduled_work(void);
|
|
||||||
extern int current_is_keventd(void);
|
extern int current_is_keventd(void);
|
||||||
extern int keventd_up(void);
|
extern int keventd_up(void);
|
||||||
|
|
||||||
extern void init_workqueues(void);
|
extern void init_workqueues(void);
|
||||||
int execute_in_process_context(work_func_t fn, struct execute_work *);
|
int execute_in_process_context(work_func_t fn, struct execute_work *);
|
||||||
|
|
||||||
|
extern void cancel_work_sync(struct work_struct *work);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Kill off a pending schedule_delayed_work(). Note that the work callback
|
* Kill off a pending schedule_delayed_work(). Note that the work callback
|
||||||
* function may still be running on return from cancel_delayed_work(), unless
|
* function may still be running on return from cancel_delayed_work(), unless
|
||||||
* it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
|
* it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
|
||||||
* flush_work() or cancel_work_sync() to wait on it.
|
* cancel_work_sync() to wait on it.
|
||||||
*/
|
*/
|
||||||
static inline int cancel_delayed_work(struct delayed_work *work)
|
static inline int cancel_delayed_work(struct delayed_work *work)
|
||||||
{
|
{
|
||||||
|
|
|
@ -413,23 +413,23 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* flush_work - block until a work_struct's callback has terminated
|
* cancel_work_sync - block until a work_struct's callback has terminated
|
||||||
* @wq: the workqueue on which the work is queued
|
|
||||||
* @work: the work which is to be flushed
|
* @work: the work which is to be flushed
|
||||||
*
|
*
|
||||||
* flush_work() will attempt to cancel the work if it is queued. If the work's
|
* cancel_work_sync() will attempt to cancel the work if it is queued. If the
|
||||||
* callback appears to be running, flush_work() will block until it has
|
* work's callback appears to be running, cancel_work_sync() will block until
|
||||||
* completed.
|
* it has completed.
|
||||||
*
|
*
|
||||||
* flush_work() is designed to be used when the caller is tearing down data
|
* cancel_work_sync() is designed to be used when the caller is tearing down
|
||||||
* structures which the callback function operates upon. It is expected that,
|
* data structures which the callback function operates upon. It is expected
|
||||||
* prior to calling flush_work(), the caller has arranged for the work to not
|
* that, prior to calling cancel_work_sync(), the caller has arranged for the
|
||||||
* be requeued.
|
* work to not be requeued.
|
||||||
*/
|
*/
|
||||||
void flush_work(struct workqueue_struct *wq, struct work_struct *work)
|
void cancel_work_sync(struct work_struct *work)
|
||||||
{
|
{
|
||||||
const cpumask_t *cpu_map = wq_cpu_map(wq);
|
|
||||||
struct cpu_workqueue_struct *cwq;
|
struct cpu_workqueue_struct *cwq;
|
||||||
|
struct workqueue_struct *wq;
|
||||||
|
const cpumask_t *cpu_map;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
@ -448,10 +448,13 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work)
|
||||||
work_clear_pending(work);
|
work_clear_pending(work);
|
||||||
spin_unlock_irq(&cwq->lock);
|
spin_unlock_irq(&cwq->lock);
|
||||||
|
|
||||||
|
wq = cwq->wq;
|
||||||
|
cpu_map = wq_cpu_map(wq);
|
||||||
|
|
||||||
for_each_cpu_mask(cpu, *cpu_map)
|
for_each_cpu_mask(cpu, *cpu_map)
|
||||||
wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(flush_work);
|
EXPORT_SYMBOL_GPL(cancel_work_sync);
|
||||||
|
|
||||||
|
|
||||||
static struct workqueue_struct *keventd_wq;
|
static struct workqueue_struct *keventd_wq;
|
||||||
|
@ -540,18 +543,13 @@ void flush_scheduled_work(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(flush_scheduled_work);
|
EXPORT_SYMBOL(flush_scheduled_work);
|
||||||
|
|
||||||
void flush_work_keventd(struct work_struct *work)
|
|
||||||
{
|
|
||||||
flush_work(keventd_wq, work);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(flush_work_keventd);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work.
|
* cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work.
|
||||||
* @dwork: the delayed work struct
|
* @dwork: the delayed work struct
|
||||||
*
|
*
|
||||||
* Note that the work callback function may still be running on return from
|
* Note that the work callback function may still be running on return from
|
||||||
* cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it.
|
* cancel_delayed_work(). Run flush_workqueue() or cancel_work_sync() to wait
|
||||||
|
* on it.
|
||||||
*/
|
*/
|
||||||
void cancel_rearming_delayed_work(struct delayed_work *dwork)
|
void cancel_rearming_delayed_work(struct delayed_work *dwork)
|
||||||
{
|
{
|
||||||
|
|
|
@ -2387,7 +2387,7 @@ void ip_vs_control_cleanup(void)
|
||||||
EnterFunction(2);
|
EnterFunction(2);
|
||||||
ip_vs_trash_cleanup();
|
ip_vs_trash_cleanup();
|
||||||
cancel_rearming_delayed_work(&defense_work);
|
cancel_rearming_delayed_work(&defense_work);
|
||||||
flush_work_keventd(&defense_work.work);
|
cancel_work_sync(&defense_work.work);
|
||||||
ip_vs_kill_estimator(&ip_vs_stats);
|
ip_vs_kill_estimator(&ip_vs_stats);
|
||||||
unregister_sysctl_table(sysctl_header);
|
unregister_sysctl_table(sysctl_header);
|
||||||
proc_net_remove("ip_vs_stats");
|
proc_net_remove("ip_vs_stats");
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue