mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-23 15:11:16 +00:00
workqueue: don't use WQ_HIGHPRI for unbound workqueues
Unbound wqs aren't concurrency-managed and try to execute work items as soon as possible. This is currently achieved by implicitly setting %WQ_HIGHPRI on all unbound workqueues; however, WQ_HIGHPRI implementation is about to be restructured and this usage won't be valid anymore. Add an explicit chain-wakeup path for unbound workqueues in process_one_work() instead of piggy backing on %WQ_HIGHPRI. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
918227bb1b
commit
974271c485
1 changed files with 11 additions and 7 deletions
|
@ -580,6 +580,10 @@ static bool __need_more_worker(struct global_cwq *gcwq)
|
||||||
/*
|
/*
|
||||||
* Need to wake up a worker? Called from anything but currently
|
* Need to wake up a worker? Called from anything but currently
|
||||||
* running workers.
|
* running workers.
|
||||||
|
*
|
||||||
|
* Note that, because unbound workers never contribute to nr_running, this
|
||||||
|
* function will always return %true for unbound gcwq as long as the
|
||||||
|
* worklist isn't empty.
|
||||||
*/
|
*/
|
||||||
static bool need_more_worker(struct global_cwq *gcwq)
|
static bool need_more_worker(struct global_cwq *gcwq)
|
||||||
{
|
{
|
||||||
|
@ -1867,6 +1871,13 @@ __acquires(&gcwq->lock)
|
||||||
if (unlikely(cpu_intensive))
|
if (unlikely(cpu_intensive))
|
||||||
worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
|
worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unbound gcwq isn't concurrency managed and work items should be
|
||||||
|
* executed ASAP. Wake up another worker if necessary.
|
||||||
|
*/
|
||||||
|
if ((worker->flags & WORKER_UNBOUND) && need_more_worker(gcwq))
|
||||||
|
wake_up_worker(gcwq);
|
||||||
|
|
||||||
spin_unlock_irq(&gcwq->lock);
|
spin_unlock_irq(&gcwq->lock);
|
||||||
|
|
||||||
work_clear_pending(work);
|
work_clear_pending(work);
|
||||||
|
@ -2984,13 +2995,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
||||||
if (flags & WQ_MEM_RECLAIM)
|
if (flags & WQ_MEM_RECLAIM)
|
||||||
flags |= WQ_RESCUER;
|
flags |= WQ_RESCUER;
|
||||||
|
|
||||||
/*
|
|
||||||
* Unbound workqueues aren't concurrency managed and should be
|
|
||||||
* dispatched to workers immediately.
|
|
||||||
*/
|
|
||||||
if (flags & WQ_UNBOUND)
|
|
||||||
flags |= WQ_HIGHPRI;
|
|
||||||
|
|
||||||
max_active = max_active ?: WQ_DFL_ACTIVE;
|
max_active = max_active ?: WQ_DFL_ACTIVE;
|
||||||
max_active = wq_clamp_max_active(max_active, flags, wq->name);
|
max_active = wq_clamp_max_active(max_active, flags, wq->name);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue