mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-23 15:11:16 +00:00
workqueue: avoid recursion in run_workqueue()
1) lockdep will complain when run_workqueue() performs recursion. 2) The recursive implementation of run_workqueue() means that flush_workqueue() and its documentation are inconsistent. This may hide deadlocks and other bugs. 3) The recursion in run_workqueue() will poison cwq->current_work, but flush_work() and __cancel_work_timer(), etcetera need a reliable cwq->current_work. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Acked-by: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Eric Dumazet <dada1@cosmosbay.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1ee1184485
commit
2355b70fd5
1 changed files with 11 additions and 30 deletions
|
@ -48,8 +48,6 @@ struct cpu_workqueue_struct {
|
||||||
|
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
struct task_struct *thread;
|
struct task_struct *thread;
|
||||||
|
|
||||||
int run_depth; /* Detect run_workqueue() recursion depth */
|
|
||||||
} ____cacheline_aligned;
|
} ____cacheline_aligned;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -262,13 +260,6 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on);
|
||||||
static void run_workqueue(struct cpu_workqueue_struct *cwq)
|
static void run_workqueue(struct cpu_workqueue_struct *cwq)
|
||||||
{
|
{
|
||||||
spin_lock_irq(&cwq->lock);
|
spin_lock_irq(&cwq->lock);
|
||||||
cwq->run_depth++;
|
|
||||||
if (cwq->run_depth > 3) {
|
|
||||||
/* morton gets to eat his hat */
|
|
||||||
printk("%s: recursion depth exceeded: %d\n",
|
|
||||||
__func__, cwq->run_depth);
|
|
||||||
dump_stack();
|
|
||||||
}
|
|
||||||
while (!list_empty(&cwq->worklist)) {
|
while (!list_empty(&cwq->worklist)) {
|
||||||
struct work_struct *work = list_entry(cwq->worklist.next,
|
struct work_struct *work = list_entry(cwq->worklist.next,
|
||||||
struct work_struct, entry);
|
struct work_struct, entry);
|
||||||
|
@ -311,7 +302,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
|
||||||
spin_lock_irq(&cwq->lock);
|
spin_lock_irq(&cwq->lock);
|
||||||
cwq->current_work = NULL;
|
cwq->current_work = NULL;
|
||||||
}
|
}
|
||||||
cwq->run_depth--;
|
|
||||||
spin_unlock_irq(&cwq->lock);
|
spin_unlock_irq(&cwq->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -368,19 +358,11 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
|
||||||
|
|
||||||
static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
|
static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
|
||||||
{
|
{
|
||||||
int active;
|
int active = 0;
|
||||||
|
|
||||||
if (cwq->thread == current) {
|
|
||||||
/*
|
|
||||||
* Probably keventd trying to flush its own queue. So simply run
|
|
||||||
* it by hand rather than deadlocking.
|
|
||||||
*/
|
|
||||||
run_workqueue(cwq);
|
|
||||||
active = 1;
|
|
||||||
} else {
|
|
||||||
struct wq_barrier barr;
|
struct wq_barrier barr;
|
||||||
|
|
||||||
active = 0;
|
WARN_ON(cwq->thread == current);
|
||||||
|
|
||||||
spin_lock_irq(&cwq->lock);
|
spin_lock_irq(&cwq->lock);
|
||||||
if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
|
if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
|
||||||
insert_wq_barrier(cwq, &barr, &cwq->worklist);
|
insert_wq_barrier(cwq, &barr, &cwq->worklist);
|
||||||
|
@ -390,7 +372,6 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
|
||||||
|
|
||||||
if (active)
|
if (active)
|
||||||
wait_for_completion(&barr.done);
|
wait_for_completion(&barr.done);
|
||||||
}
|
|
||||||
|
|
||||||
return active;
|
return active;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue