mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-19 21:21:09 +00:00
drm/amdgpu: distinct between allocated GART space and GMC addr
Most of the time we only need to know if the BO has a valid GMC addr. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Acked-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
22d8bfafcc
commit
0e33495d49
2 changed files with 5 additions and 10 deletions
|
@ -1362,8 +1362,6 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
|
||||||
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
|
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
|
||||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
|
|
||||||
!amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
|
|
||||||
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
|
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
|
||||||
!bo->pin_count);
|
!bo->pin_count);
|
||||||
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
||||||
|
|
|
@ -345,7 +345,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
|
||||||
{
|
{
|
||||||
uint64_t addr = 0;
|
uint64_t addr = 0;
|
||||||
|
|
||||||
if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
|
if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
|
||||||
addr = mm_node->start << PAGE_SHIFT;
|
addr = mm_node->start << PAGE_SHIFT;
|
||||||
addr += bo->bdev->man[mem->mem_type].gpu_offset;
|
addr += bo->bdev->man[mem->mem_type].gpu_offset;
|
||||||
}
|
}
|
||||||
|
@ -433,8 +433,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
||||||
/* Map only what needs to be accessed. Map src to window 0 and
|
/* Map only what needs to be accessed. Map src to window 0 and
|
||||||
* dst to window 1
|
* dst to window 1
|
||||||
*/
|
*/
|
||||||
if (src->mem->mem_type == TTM_PL_TT &&
|
if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
|
||||||
!amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
|
|
||||||
r = amdgpu_map_buffer(src->bo, src->mem,
|
r = amdgpu_map_buffer(src->bo, src->mem,
|
||||||
PFN_UP(cur_size + src_page_offset),
|
PFN_UP(cur_size + src_page_offset),
|
||||||
src_node_start, 0, ring,
|
src_node_start, 0, ring,
|
||||||
|
@ -447,8 +446,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
||||||
from += src_page_offset;
|
from += src_page_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dst->mem->mem_type == TTM_PL_TT &&
|
if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
|
||||||
!amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
|
|
||||||
r = amdgpu_map_buffer(dst->bo, dst->mem,
|
r = amdgpu_map_buffer(dst->bo, dst->mem,
|
||||||
PFN_UP(cur_size + dst_page_offset),
|
PFN_UP(cur_size + dst_page_offset),
|
||||||
dst_node_start, 1, ring,
|
dst_node_start, 1, ring,
|
||||||
|
@ -1086,11 +1084,10 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
||||||
uint64_t flags;
|
uint64_t flags;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (bo->mem.mem_type != TTM_PL_TT ||
|
if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
|
||||||
amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* allocate GTT space */
|
/* allocate GART space */
|
||||||
tmp = bo->mem;
|
tmp = bo->mem;
|
||||||
tmp.mm_node = NULL;
|
tmp.mm_node = NULL;
|
||||||
placement.num_placement = 1;
|
placement.num_placement = 1;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue