drm/amdgpu: bypass tmr when reserve c2p memory

C2P memory reserved should not in tmr memory range.

Signed-off-by: Likun Gao <Likun.Gao@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Likun Gao 2020-05-26 16:54:44 +08:00 committed by Alex Deucher
parent 72d208c23c
commit 2c6e83a134

View file

@ -1822,14 +1822,6 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
return 0; return 0;
} }
static u64 amdgpu_ttm_training_get_c2p_offset(u64 vram_size)
{
if ((vram_size & (SZ_1M - 1)) < (SZ_4K + 1) )
vram_size -= SZ_1M;
return ALIGN(vram_size, SZ_1M);
}
static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev) static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
{ {
struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
@ -1837,7 +1829,7 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
memset(ctx, 0, sizeof(*ctx)); memset(ctx, 0, sizeof(*ctx));
ctx->c2p_train_data_offset = ctx->c2p_train_data_offset =
amdgpu_ttm_training_get_c2p_offset(adev->gmc.mc_vram_size); ALIGN((adev->gmc.mc_vram_size - adev->discovery_tmr_size - SZ_1M), SZ_1M);
ctx->p2c_train_data_offset = ctx->p2c_train_data_offset =
(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
ctx->train_data_size = ctx->train_data_size =
@ -1861,10 +1853,9 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) { if (!amdgpu_sriov_vf(adev)) {
ret = amdgpu_mem_train_support(adev); ret = amdgpu_mem_train_support(adev);
if (ret == 1) { if (ret == 1)
mem_train_support = true; mem_train_support = true;
amdgpu_ttm_training_data_block_init(adev); else if (ret == -1)
} else if (ret == -1)
return -EINVAL; return -EINVAL;
else else
DRM_DEBUG("memory training does not support!\n"); DRM_DEBUG("memory training does not support!\n");
@ -1879,10 +1870,12 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
*/ */
adev->discovery_tmr_size = adev->discovery_tmr_size =
amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
if (!adev->discovery_tmr_size) { if (!adev->discovery_tmr_size)
adev->discovery_tmr_size = DISCOVERY_TMR_SIZE; adev->discovery_tmr_size = DISCOVERY_TMR_SIZE;
if (mem_train_support) { if (mem_train_support) {
/* reserve vram for mem train indepently */ /* reserve vram for mem train according to TMR location */
amdgpu_ttm_training_data_block_init(adev);
ret = amdgpu_bo_create_kernel_at(adev, ret = amdgpu_bo_create_kernel_at(adev,
ctx->c2p_train_data_offset, ctx->c2p_train_data_offset,
ctx->train_data_size, ctx->train_data_size,
@ -1894,7 +1887,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
amdgpu_ttm_training_reserve_vram_fini(adev); amdgpu_ttm_training_reserve_vram_fini(adev);
return ret; return ret;
} }
} ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
} }
ret = amdgpu_bo_create_kernel_at(adev, ret = amdgpu_bo_create_kernel_at(adev,
@ -1909,9 +1902,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
return ret; return ret;
} }
if (mem_train_support)
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
return 0; return 0;
} }