mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-30 19:15:14 +00:00
workqueue: implement flush[_delayed]_work_sync()
Implement flush[_delayed]_work_sync(). These are flush functions which also make sure no CPU is still executing the target work from earlier queueing instances. These are similar to cancel[_delayed]_work_sync() except that the target work item is flushed instead of cancelled. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
baf59022c3
commit
09383498c5
2 changed files with 58 additions and 0 deletions
|
@ -355,9 +355,11 @@ extern int keventd_up(void);
|
||||||
int execute_in_process_context(work_func_t fn, struct execute_work *);
|
int execute_in_process_context(work_func_t fn, struct execute_work *);
|
||||||
|
|
||||||
extern bool flush_work(struct work_struct *work);
|
extern bool flush_work(struct work_struct *work);
|
||||||
|
extern bool flush_work_sync(struct work_struct *work);
|
||||||
extern bool cancel_work_sync(struct work_struct *work);
|
extern bool cancel_work_sync(struct work_struct *work);
|
||||||
|
|
||||||
extern bool flush_delayed_work(struct delayed_work *dwork);
|
extern bool flush_delayed_work(struct delayed_work *dwork);
|
||||||
|
extern bool flush_delayed_work_sync(struct delayed_work *work);
|
||||||
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
|
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
|
||||||
|
|
||||||
extern void workqueue_set_max_active(struct workqueue_struct *wq,
|
extern void workqueue_set_max_active(struct workqueue_struct *wq,
|
||||||
|
|
|
@ -2435,6 +2435,41 @@ static bool wait_on_work(struct work_struct *work)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* flush_work_sync - wait until a work has finished execution
|
||||||
|
* @work: the work to flush
|
||||||
|
*
|
||||||
|
* Wait until @work has finished execution. On return, it's
|
||||||
|
* guaranteed that all queueing instances of @work which happened
|
||||||
|
* before this function is called are finished. In other words, if
|
||||||
|
* @work hasn't been requeued since this function was called, @work is
|
||||||
|
* guaranteed to be idle on return.
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* %true if flush_work_sync() waited for the work to finish execution,
|
||||||
|
* %false if it was already idle.
|
||||||
|
*/
|
||||||
|
bool flush_work_sync(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct wq_barrier barr;
|
||||||
|
bool pending, waited;
|
||||||
|
|
||||||
|
/* we'll wait for executions separately, queue barr only if pending */
|
||||||
|
pending = start_flush_work(work, &barr, false);
|
||||||
|
|
||||||
|
/* wait for executions to finish */
|
||||||
|
waited = wait_on_work(work);
|
||||||
|
|
||||||
|
/* wait for the pending one */
|
||||||
|
if (pending) {
|
||||||
|
wait_for_completion(&barr.done);
|
||||||
|
destroy_work_on_stack(&barr.work);
|
||||||
|
}
|
||||||
|
|
||||||
|
return pending || waited;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(flush_work_sync);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
|
* Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
|
||||||
* so this work can't be re-armed in any way.
|
* so this work can't be re-armed in any way.
|
||||||
|
@ -2538,6 +2573,27 @@ bool flush_delayed_work(struct delayed_work *dwork)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(flush_delayed_work);
|
EXPORT_SYMBOL(flush_delayed_work);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* flush_delayed_work_sync - wait for a dwork to finish
|
||||||
|
* @dwork: the delayed work to flush
|
||||||
|
*
|
||||||
|
* Delayed timer is cancelled and the pending work is queued for
|
||||||
|
* execution immediately. Other than timer handling, its behavior
|
||||||
|
* is identical to flush_work_sync().
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* %true if flush_work_sync() waited for the work to finish execution,
|
||||||
|
* %false if it was already idle.
|
||||||
|
*/
|
||||||
|
bool flush_delayed_work_sync(struct delayed_work *dwork)
|
||||||
|
{
|
||||||
|
if (del_timer_sync(&dwork->timer))
|
||||||
|
__queue_work(raw_smp_processor_id(),
|
||||||
|
get_work_cwq(&dwork->work)->wq, &dwork->work);
|
||||||
|
return flush_work_sync(&dwork->work);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(flush_delayed_work_sync);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
|
* cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
|
||||||
* @dwork: the delayed work cancel
|
* @dwork: the delayed work cancel
|
||||||
|
|
Loading…
Add table
Reference in a new issue