mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-07 15:18:15 +00:00
drm/amdgpu: fix preemption unit test
Remove signaled jobs from job list and ensure the job was indeed preempted. Signed-off-by: Jack Xiao <Jack.Xiao@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
7d65a577bb
commit
d845a2051b
1 changed files with 15 additions and 5 deletions
|
@ -1295,27 +1295,37 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
|
||||||
static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
|
static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
struct drm_sched_job *s_job;
|
struct drm_sched_job *s_job, *tmp;
|
||||||
uint32_t preempt_seq;
|
uint32_t preempt_seq;
|
||||||
struct dma_fence *fence, **ptr;
|
struct dma_fence *fence, **ptr;
|
||||||
struct amdgpu_fence_driver *drv = &ring->fence_drv;
|
struct amdgpu_fence_driver *drv = &ring->fence_drv;
|
||||||
struct drm_gpu_scheduler *sched = &ring->sched;
|
struct drm_gpu_scheduler *sched = &ring->sched;
|
||||||
|
bool preempted = true;
|
||||||
|
|
||||||
if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
|
if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
|
preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
|
||||||
if (preempt_seq <= atomic_read(&drv->last_seq))
|
if (preempt_seq <= atomic_read(&drv->last_seq)) {
|
||||||
return;
|
preempted = false;
|
||||||
|
goto no_preempt;
|
||||||
|
}
|
||||||
|
|
||||||
preempt_seq &= drv->num_fences_mask;
|
preempt_seq &= drv->num_fences_mask;
|
||||||
ptr = &drv->fences[preempt_seq];
|
ptr = &drv->fences[preempt_seq];
|
||||||
fence = rcu_dereference_protected(*ptr, 1);
|
fence = rcu_dereference_protected(*ptr, 1);
|
||||||
|
|
||||||
|
no_preempt:
|
||||||
spin_lock(&sched->job_list_lock);
|
spin_lock(&sched->job_list_lock);
|
||||||
list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
|
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
|
||||||
|
if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
|
||||||
|
/* remove job from ring_mirror_list */
|
||||||
|
list_del_init(&s_job->node);
|
||||||
|
sched->ops->free_job(s_job);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
job = to_amdgpu_job(s_job);
|
job = to_amdgpu_job(s_job);
|
||||||
if (job->fence == fence)
|
if (preempted && job->fence == fence)
|
||||||
/* mark the job as preempted */
|
/* mark the job as preempted */
|
||||||
job->preemption_status |= AMDGPU_IB_PREEMPTED;
|
job->preemption_status |= AMDGPU_IB_PREEMPTED;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue