mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 22:51:37 +00:00
drm/amdgpu: stop using BO status for user pages
Instead use a counter to figure out if we need to set new pages or not. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
b72cf4fca2
commit
ca666a3c29
3 changed files with 22 additions and 4 deletions
|
@ -1802,6 +1802,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||||
unsigned long end);
|
unsigned long end);
|
||||||
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
|
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
|
||||||
int *last_invalidated);
|
int *last_invalidated);
|
||||||
|
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm);
|
||||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
|
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
|
||||||
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
||||||
struct ttm_mem_reg *mem);
|
struct ttm_mem_reg *mem);
|
||||||
|
|
|
@ -473,7 +473,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
/* Check if we have user pages and nobody bound the BO already */
|
/* Check if we have user pages and nobody bound the BO already */
|
||||||
if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
|
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
|
||||||
|
lobj->user_pages) {
|
||||||
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
|
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
|
||||||
lobj->user_pages);
|
lobj->user_pages);
|
||||||
binding_userptr = true;
|
binding_userptr = true;
|
||||||
|
@ -534,23 +535,25 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||||
INIT_LIST_HEAD(&need_pages);
|
INIT_LIST_HEAD(&need_pages);
|
||||||
for (i = p->bo_list->first_userptr;
|
for (i = p->bo_list->first_userptr;
|
||||||
i < p->bo_list->num_entries; ++i) {
|
i < p->bo_list->num_entries; ++i) {
|
||||||
|
struct amdgpu_bo *bo;
|
||||||
|
|
||||||
e = &p->bo_list->array[i];
|
e = &p->bo_list->array[i];
|
||||||
|
bo = e->robj;
|
||||||
|
|
||||||
if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
|
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
|
||||||
&e->user_invalidated) && e->user_pages) {
|
&e->user_invalidated) && e->user_pages) {
|
||||||
|
|
||||||
/* We acquired a page array, but somebody
|
/* We acquired a page array, but somebody
|
||||||
* invalidated it. Free it and try again
|
* invalidated it. Free it and try again
|
||||||
*/
|
*/
|
||||||
release_pages(e->user_pages,
|
release_pages(e->user_pages,
|
||||||
e->robj->tbo.ttm->num_pages,
|
bo->tbo.ttm->num_pages,
|
||||||
false);
|
false);
|
||||||
kvfree(e->user_pages);
|
kvfree(e->user_pages);
|
||||||
e->user_pages = NULL;
|
e->user_pages = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (e->robj->tbo.ttm->state != tt_bound &&
|
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
|
||||||
!e->user_pages) {
|
!e->user_pages) {
|
||||||
list_del(&e->tv.head);
|
list_del(&e->tv.head);
|
||||||
list_add(&e->tv.head, &need_pages);
|
list_add(&e->tv.head, &need_pages);
|
||||||
|
|
|
@ -609,6 +609,7 @@ struct amdgpu_ttm_tt {
|
||||||
spinlock_t guptasklock;
|
spinlock_t guptasklock;
|
||||||
struct list_head guptasks;
|
struct list_head guptasks;
|
||||||
atomic_t mmu_invalidations;
|
atomic_t mmu_invalidations;
|
||||||
|
uint32_t last_set_pages;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -672,8 +673,10 @@ release_pages:
|
||||||
|
|
||||||
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
|
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
|
gtt->last_set_pages = atomic_read(>t->mmu_invalidations);
|
||||||
for (i = 0; i < ttm->num_pages; ++i) {
|
for (i = 0; i < ttm->num_pages; ++i) {
|
||||||
if (ttm->pages[i])
|
if (ttm->pages[i])
|
||||||
put_page(ttm->pages[i]);
|
put_page(ttm->pages[i]);
|
||||||
|
@ -1025,6 +1028,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
||||||
spin_lock_init(>t->guptasklock);
|
spin_lock_init(>t->guptasklock);
|
||||||
INIT_LIST_HEAD(>t->guptasks);
|
INIT_LIST_HEAD(>t->guptasks);
|
||||||
atomic_set(>t->mmu_invalidations, 0);
|
atomic_set(>t->mmu_invalidations, 0);
|
||||||
|
gtt->last_set_pages = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1077,6 +1081,16 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
|
||||||
return prev_invalidated != *last_invalidated;
|
return prev_invalidated != *last_invalidated;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
|
||||||
|
{
|
||||||
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
|
|
||||||
|
if (gtt == NULL || !gtt->userptr)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages;
|
||||||
|
}
|
||||||
|
|
||||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
|
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue