mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 07:12:09 +00:00
drm/i915: Pull scheduling under standalone lock
Currently, the backend scheduling code abuses struct_mutex into order to have a global lock to manipulate a temporary list (without widespread allocation) and to protect against list modifications. This is an extraneous coupling to struct_mutex and further can not extend beyond the local device. Pull all the code that needs to be under the one true lock into i915_scheduler.c, and make it so. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181001144755.7978-2-chris@chris-wilson.co.uk
This commit is contained in:
parent
b16c765122
commit
e2f3496e93
8 changed files with 411 additions and 361 deletions
|
@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request)
|
|||
spin_unlock(&file_priv->mm.lock);
|
||||
}
|
||||
|
||||
static struct i915_dependency *
|
||||
i915_dependency_alloc(struct drm_i915_private *i915)
|
||||
{
|
||||
return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_dependency_free(struct drm_i915_private *i915,
|
||||
struct i915_dependency *dep)
|
||||
{
|
||||
kmem_cache_free(i915->dependencies, dep);
|
||||
}
|
||||
|
||||
static void
|
||||
__i915_sched_node_add_dependency(struct i915_sched_node *node,
|
||||
struct i915_sched_node *signal,
|
||||
struct i915_dependency *dep,
|
||||
unsigned long flags)
|
||||
{
|
||||
INIT_LIST_HEAD(&dep->dfs_link);
|
||||
list_add(&dep->wait_link, &signal->waiters_list);
|
||||
list_add(&dep->signal_link, &node->signalers_list);
|
||||
dep->signaler = signal;
|
||||
dep->flags = flags;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_sched_node_add_dependency(struct drm_i915_private *i915,
|
||||
struct i915_sched_node *node,
|
||||
struct i915_sched_node *signal)
|
||||
{
|
||||
struct i915_dependency *dep;
|
||||
|
||||
dep = i915_dependency_alloc(i915);
|
||||
if (!dep)
|
||||
return -ENOMEM;
|
||||
|
||||
__i915_sched_node_add_dependency(node, signal, dep,
|
||||
I915_DEPENDENCY_ALLOC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_sched_node_fini(struct drm_i915_private *i915,
|
||||
struct i915_sched_node *node)
|
||||
{
|
||||
struct i915_dependency *dep, *tmp;
|
||||
|
||||
GEM_BUG_ON(!list_empty(&node->link));
|
||||
|
||||
/*
|
||||
* Everyone we depended upon (the fences we wait to be signaled)
|
||||
* should retire before us and remove themselves from our list.
|
||||
* However, retirement is run independently on each timeline and
|
||||
* so we may be called out-of-order.
|
||||
*/
|
||||
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
|
||||
GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
|
||||
GEM_BUG_ON(!list_empty(&dep->dfs_link));
|
||||
|
||||
list_del(&dep->wait_link);
|
||||
if (dep->flags & I915_DEPENDENCY_ALLOC)
|
||||
i915_dependency_free(i915, dep);
|
||||
}
|
||||
|
||||
/* Remove ourselves from everyone who depends upon us */
|
||||
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
|
||||
GEM_BUG_ON(dep->signaler != node);
|
||||
GEM_BUG_ON(!list_empty(&dep->dfs_link));
|
||||
|
||||
list_del(&dep->signal_link);
|
||||
if (dep->flags & I915_DEPENDENCY_ALLOC)
|
||||
i915_dependency_free(i915, dep);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
i915_sched_node_init(struct i915_sched_node *node)
|
||||
{
|
||||
INIT_LIST_HEAD(&node->signalers_list);
|
||||
INIT_LIST_HEAD(&node->waiters_list);
|
||||
INIT_LIST_HEAD(&node->link);
|
||||
node->attr.priority = I915_PRIORITY_INVALID;
|
||||
}
|
||||
|
||||
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue