mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-03 21:01:50 +00:00
drm/amdgpu: add amdgpu_job_submit_direct helper
Make sure that we properly initialize at least the sched member. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Acked-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
3320b8d2ac
commit
ee913fd9e1
8 changed files with 48 additions and 70 deletions
|
@ -140,6 +140,21 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||||
|
struct dma_fence **fence)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
|
job->base.sched = &ring->sched;
|
||||||
|
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
|
||||||
|
job->fence = dma_fence_get(*fence);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
amdgpu_job_free(job);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
||||||
struct drm_sched_entity *s_entity)
|
struct drm_sched_entity *s_entity)
|
||||||
{
|
{
|
||||||
|
|
|
@ -33,6 +33,8 @@
|
||||||
#define to_amdgpu_job(sched_job) \
|
#define to_amdgpu_job(sched_job) \
|
||||||
container_of((sched_job), struct amdgpu_job, base)
|
container_of((sched_job), struct amdgpu_job, base)
|
||||||
|
|
||||||
|
struct amdgpu_fence;
|
||||||
|
|
||||||
struct amdgpu_job {
|
struct amdgpu_job {
|
||||||
struct drm_sched_job base;
|
struct drm_sched_job base;
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
|
@ -68,4 +70,6 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
|
||||||
void amdgpu_job_free(struct amdgpu_job *job);
|
void amdgpu_job_free(struct amdgpu_job *job);
|
||||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||||
void *owner, struct dma_fence **f);
|
void *owner, struct dma_fence **f);
|
||||||
|
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||||
|
struct dma_fence **fence);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -2075,24 +2075,19 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||||
|
|
||||||
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
||||||
WARN_ON(job->ibs[0].length_dw > num_dw);
|
WARN_ON(job->ibs[0].length_dw > num_dw);
|
||||||
if (direct_submit) {
|
if (direct_submit)
|
||||||
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
|
r = amdgpu_job_submit_direct(job, ring, fence);
|
||||||
NULL, fence);
|
else
|
||||||
job->fence = dma_fence_get(*fence);
|
|
||||||
if (r)
|
|
||||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
} else {
|
|
||||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||||
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
|
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free;
|
goto error_free;
|
||||||
}
|
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
error_free:
|
error_free:
|
||||||
amdgpu_job_free(job);
|
amdgpu_job_free(job);
|
||||||
|
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1062,12 +1062,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
goto err_free;
|
goto err_free;
|
||||||
|
|
||||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||||
job->fence = dma_fence_get(f);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto err_free;
|
goto err_free;
|
||||||
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
} else {
|
} else {
|
||||||
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
|
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
|
||||||
AMDGPU_FENCE_OWNER_UNDEFINED, false);
|
AMDGPU_FENCE_OWNER_UNDEFINED, false);
|
||||||
|
|
|
@ -469,12 +469,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||||
ib->ptr[i] = 0x0;
|
ib->ptr[i] = 0x0;
|
||||||
|
|
||||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||||
job->fence = dma_fence_get(f);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
if (fence)
|
if (fence)
|
||||||
*fence = dma_fence_get(f);
|
*fence = dma_fence_get(f);
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
|
@ -531,19 +529,13 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||||
ib->ptr[i] = 0x0;
|
ib->ptr[i] = 0x0;
|
||||||
|
|
||||||
if (direct) {
|
if (direct)
|
||||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||||
job->fence = dma_fence_get(f);
|
else
|
||||||
if (r)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
} else {
|
|
||||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||||
if (r)
|
if (r)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
|
||||||
|
|
||||||
if (fence)
|
if (fence)
|
||||||
*fence = dma_fence_get(f);
|
*fence = dma_fence_get(f);
|
||||||
|
|
|
@ -308,13 +308,10 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
|
||||||
}
|
}
|
||||||
ib->length_dw = 16;
|
ib->length_dw = 16;
|
||||||
|
|
||||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||||
job->fence = dma_fence_get(f);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto err_free;
|
goto err_free;
|
||||||
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
|
|
||||||
amdgpu_bo_fence(bo, f, false);
|
amdgpu_bo_fence(bo, f, false);
|
||||||
amdgpu_bo_unreserve(bo);
|
amdgpu_bo_unreserve(bo);
|
||||||
amdgpu_bo_unref(&bo);
|
amdgpu_bo_unref(&bo);
|
||||||
|
@ -499,12 +496,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
||||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||||
ib->ptr[i] = 0x0;
|
ib->ptr[i] = 0x0;
|
||||||
|
|
||||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||||
job->fence = dma_fence_get(f);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
if (fence)
|
if (fence)
|
||||||
*fence = dma_fence_get(f);
|
*fence = dma_fence_get(f);
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
|
@ -553,12 +548,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
||||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||||
ib->ptr[i] = 0x0;
|
ib->ptr[i] = 0x0;
|
||||||
|
|
||||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||||
job->fence = dma_fence_get(f);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
if (fence)
|
if (fence)
|
||||||
*fence = dma_fence_get(f);
|
*fence = dma_fence_get(f);
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
|
@ -666,12 +659,10 @@ static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
}
|
}
|
||||||
ib->length_dw = 16;
|
ib->length_dw = 16;
|
||||||
|
|
||||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||||
job->fence = dma_fence_get(f);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
if (fence)
|
if (fence)
|
||||||
*fence = dma_fence_get(f);
|
*fence = dma_fence_get(f);
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
|
|
|
@ -248,12 +248,10 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
||||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||||
ib->ptr[i] = 0x0;
|
ib->ptr[i] = 0x0;
|
||||||
|
|
||||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||||
job->fence = dma_fence_get(f);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
if (fence)
|
if (fence)
|
||||||
*fence = dma_fence_get(f);
|
*fence = dma_fence_get(f);
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
|
@ -312,19 +310,13 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
||||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||||
ib->ptr[i] = 0x0;
|
ib->ptr[i] = 0x0;
|
||||||
|
|
||||||
if (direct) {
|
if (direct)
|
||||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||||
job->fence = dma_fence_get(f);
|
else
|
||||||
if (r)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
} else {
|
|
||||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||||
if (r)
|
if (r)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
|
||||||
|
|
||||||
if (fence)
|
if (fence)
|
||||||
*fence = dma_fence_get(f);
|
*fence = dma_fence_get(f);
|
||||||
|
|
|
@ -250,12 +250,10 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
||||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||||
ib->ptr[i] = 0x0;
|
ib->ptr[i] = 0x0;
|
||||||
|
|
||||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||||
job->fence = dma_fence_get(f);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
if (fence)
|
if (fence)
|
||||||
*fence = dma_fence_get(f);
|
*fence = dma_fence_get(f);
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
|
@ -313,19 +311,13 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||||
ib->ptr[i] = 0x0;
|
ib->ptr[i] = 0x0;
|
||||||
|
|
||||||
if (direct) {
|
if (direct)
|
||||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||||
job->fence = dma_fence_get(f);
|
else
|
||||||
if (r)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
amdgpu_job_free(job);
|
|
||||||
} else {
|
|
||||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||||
if (r)
|
if (r)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
|
||||||
|
|
||||||
if (fence)
|
if (fence)
|
||||||
*fence = dma_fence_get(f);
|
*fence = dma_fence_get(f);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue