mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
drm/amdgpu: use embedded gem object
Drop drm_gem_object from amdgpu_bo, use the ttm_buffer_object.base instead. Build tested only. Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Reviewed-by: Christian König <christian.koenig@amd.com> Link: http://patchwork.freedesktop.org/patch/msgid/20190805140119.7337-6-kraxel@redhat.com
This commit is contained in:
parent
ce77038fda
commit
c105de2828
6 changed files with 12 additions and 13 deletions
|
@ -393,7 +393,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||||
bo->prime_shared_count = 1;
|
bo->prime_shared_count = 1;
|
||||||
|
|
||||||
reservation_object_unlock(resv);
|
reservation_object_unlock(resv);
|
||||||
return &bo->gem_base;
|
return &bo->tbo.base;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
reservation_object_unlock(resv);
|
reservation_object_unlock(resv);
|
||||||
|
|
|
@ -85,7 +85,7 @@ retry:
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
*obj = &bo->gem_base;
|
*obj = &bo->tbo.base;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -689,7 +689,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_amdgpu_gem_create_in info;
|
struct drm_amdgpu_gem_create_in info;
|
||||||
void __user *out = u64_to_user_ptr(args->value);
|
void __user *out = u64_to_user_ptr(args->value);
|
||||||
|
|
||||||
info.bo_size = robj->gem_base.size;
|
info.bo_size = robj->tbo.base.size;
|
||||||
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
|
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
|
||||||
info.domains = robj->preferred_domains;
|
info.domains = robj->preferred_domains;
|
||||||
info.domain_flags = robj->flags;
|
info.domain_flags = robj->flags;
|
||||||
|
@ -819,8 +819,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
|
||||||
if (pin_count)
|
if (pin_count)
|
||||||
seq_printf(m, " pin count %d", pin_count);
|
seq_printf(m, " pin count %d", pin_count);
|
||||||
|
|
||||||
dma_buf = READ_ONCE(bo->gem_base.dma_buf);
|
dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
|
||||||
attachment = READ_ONCE(bo->gem_base.import_attach);
|
attachment = READ_ONCE(bo->tbo.base.import_attach);
|
||||||
|
|
||||||
if (attachment)
|
if (attachment)
|
||||||
seq_printf(m, " imported from %p", dma_buf);
|
seq_printf(m, " imported from %p", dma_buf);
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define AMDGPU_GEM_DOMAIN_MAX 0x3
|
#define AMDGPU_GEM_DOMAIN_MAX 0x3
|
||||||
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
|
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
|
||||||
|
|
||||||
void amdgpu_gem_object_free(struct drm_gem_object *obj);
|
void amdgpu_gem_object_free(struct drm_gem_object *obj);
|
||||||
int amdgpu_gem_object_open(struct drm_gem_object *obj,
|
int amdgpu_gem_object_open(struct drm_gem_object *obj,
|
||||||
|
|
|
@ -85,9 +85,9 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
|
||||||
|
|
||||||
amdgpu_bo_kunmap(bo);
|
amdgpu_bo_kunmap(bo);
|
||||||
|
|
||||||
if (bo->gem_base.import_attach)
|
if (bo->tbo.base.import_attach)
|
||||||
drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
|
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
|
||||||
drm_gem_object_release(&bo->gem_base);
|
drm_gem_object_release(&bo->tbo.base);
|
||||||
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
|
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
|
||||||
if (!list_empty(&bo->shadow_list)) {
|
if (!list_empty(&bo->shadow_list)) {
|
||||||
mutex_lock(&adev->shadow_list_lock);
|
mutex_lock(&adev->shadow_list_lock);
|
||||||
|
@ -454,7 +454,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||||
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
|
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
|
||||||
if (bo == NULL)
|
if (bo == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
|
drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
|
||||||
INIT_LIST_HEAD(&bo->shadow_list);
|
INIT_LIST_HEAD(&bo->shadow_list);
|
||||||
bo->vm_bo = NULL;
|
bo->vm_bo = NULL;
|
||||||
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
|
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
|
||||||
|
@ -509,7 +509,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
bo->gem_base.resv = bo->tbo.resv;
|
bo->tbo.base.resv = bo->tbo.resv;
|
||||||
|
|
||||||
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||||
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||||
|
|
|
@ -94,7 +94,6 @@ struct amdgpu_bo {
|
||||||
/* per VM structure for page tables and with virtual addresses */
|
/* per VM structure for page tables and with virtual addresses */
|
||||||
struct amdgpu_vm_bo_base *vm_bo;
|
struct amdgpu_vm_bo_base *vm_bo;
|
||||||
/* Constant after initialization */
|
/* Constant after initialization */
|
||||||
struct drm_gem_object gem_base;
|
|
||||||
struct amdgpu_bo *parent;
|
struct amdgpu_bo *parent;
|
||||||
struct amdgpu_bo *shadow;
|
struct amdgpu_bo *shadow;
|
||||||
|
|
||||||
|
|
|
@ -227,7 +227,7 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||||
|
|
||||||
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
|
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
return drm_vma_node_verify_access(&abo->gem_base.vma_node,
|
return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
|
||||||
filp->private_data);
|
filp->private_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue