mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-06 14:31:46 +00:00
drm/i915/gt: Flush submission tasklet before waiting/retiring
A common bane of ours is arbitrary delays in ksoftirqd processing our submission tasklet. Give the submission tasklet a kick before we wait to avoid those delays eating into a tight timeout. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Stuart Summers <stuart.summers@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191008105655.13256-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
23b9e41a3d
commit
d99f7b079c
3 changed files with 34 additions and 14 deletions
|
@ -407,8 +407,9 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
|
||||||
engine->serial++; /* contexts lost */
|
engine->serial++; /* contexts lost */
|
||||||
}
|
}
|
||||||
|
|
||||||
bool intel_engine_is_idle(struct intel_engine_cs *engine);
|
|
||||||
bool intel_engines_are_idle(struct intel_gt *gt);
|
bool intel_engines_are_idle(struct intel_gt *gt);
|
||||||
|
bool intel_engine_is_idle(struct intel_engine_cs *engine);
|
||||||
|
void intel_engine_flush_submission(struct intel_engine_cs *engine);
|
||||||
|
|
||||||
void intel_engines_reset_default_submission(struct intel_gt *gt);
|
void intel_engines_reset_default_submission(struct intel_gt *gt);
|
||||||
|
|
||||||
|
|
|
@ -1040,6 +1040,25 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
|
||||||
return idle;
|
return idle;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void intel_engine_flush_submission(struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
struct tasklet_struct *t = &engine->execlists.tasklet;
|
||||||
|
|
||||||
|
if (__tasklet_is_scheduled(t)) {
|
||||||
|
local_bh_disable();
|
||||||
|
if (tasklet_trylock(t)) {
|
||||||
|
/* Must wait for any GPU reset in progress. */
|
||||||
|
if (__tasklet_is_enabled(t))
|
||||||
|
t->func(t->data);
|
||||||
|
tasklet_unlock(t);
|
||||||
|
}
|
||||||
|
local_bh_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Otherwise flush the tasklet if it was running on another cpu */
|
||||||
|
tasklet_unlock_wait(t);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_engine_is_idle() - Report if the engine has finished process all work
|
* intel_engine_is_idle() - Report if the engine has finished process all work
|
||||||
* @engine: the intel_engine_cs
|
* @engine: the intel_engine_cs
|
||||||
|
@ -1058,21 +1077,9 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
|
||||||
|
|
||||||
/* Waiting to drain ELSP? */
|
/* Waiting to drain ELSP? */
|
||||||
if (execlists_active(&engine->execlists)) {
|
if (execlists_active(&engine->execlists)) {
|
||||||
struct tasklet_struct *t = &engine->execlists.tasklet;
|
|
||||||
|
|
||||||
synchronize_hardirq(engine->i915->drm.pdev->irq);
|
synchronize_hardirq(engine->i915->drm.pdev->irq);
|
||||||
|
|
||||||
local_bh_disable();
|
intel_engine_flush_submission(engine);
|
||||||
if (tasklet_trylock(t)) {
|
|
||||||
/* Must wait for any GPU reset in progress. */
|
|
||||||
if (__tasklet_is_enabled(t))
|
|
||||||
t->func(t->data);
|
|
||||||
tasklet_unlock(t);
|
|
||||||
}
|
|
||||||
local_bh_enable();
|
|
||||||
|
|
||||||
/* Otherwise flush the tasklet if it was on another cpu */
|
|
||||||
tasklet_unlock_wait(t);
|
|
||||||
|
|
||||||
if (execlists_active(&engine->execlists))
|
if (execlists_active(&engine->execlists))
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
* Copyright © 2019 Intel Corporation
|
* Copyright © 2019 Intel Corporation
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "i915_drv.h" /* for_each_engine() */
|
||||||
#include "i915_request.h"
|
#include "i915_request.h"
|
||||||
#include "intel_gt.h"
|
#include "intel_gt.h"
|
||||||
#include "intel_gt_pm.h"
|
#include "intel_gt_pm.h"
|
||||||
|
@ -19,6 +20,15 @@ static void retire_requests(struct intel_timeline *tl)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void flush_submission(struct intel_gt *gt)
|
||||||
|
{
|
||||||
|
struct intel_engine_cs *engine;
|
||||||
|
enum intel_engine_id id;
|
||||||
|
|
||||||
|
for_each_engine(engine, gt->i915, id)
|
||||||
|
intel_engine_flush_submission(engine);
|
||||||
|
}
|
||||||
|
|
||||||
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
||||||
{
|
{
|
||||||
struct intel_gt_timelines *timelines = >->timelines;
|
struct intel_gt_timelines *timelines = >->timelines;
|
||||||
|
@ -32,6 +42,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
||||||
if (unlikely(timeout < 0))
|
if (unlikely(timeout < 0))
|
||||||
timeout = -timeout, interruptible = false;
|
timeout = -timeout, interruptible = false;
|
||||||
|
|
||||||
|
flush_submission(gt); /* kick the ksoftirqd tasklets */
|
||||||
|
|
||||||
spin_lock_irqsave(&timelines->lock, flags);
|
spin_lock_irqsave(&timelines->lock, flags);
|
||||||
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
|
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
|
||||||
if (!mutex_trylock(&tl->mutex)) {
|
if (!mutex_trylock(&tl->mutex)) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue