mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-01 03:11:59 +00:00
Merge tag 'amd-drm-fixes-5.6-2020-03-05' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
amd-drm-fixes-5.6-2020-03-05: amdgpu: - Gfx reset fix for gfx9, 10 - Fix for gfx10 - DP MST fix - DCC fix - Renoir power fixes - Navi power fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200305185957.4268-1-alexander.deucher@amd.com
This commit is contained in:
commit
2ac4853e29
8 changed files with 127 additions and 53 deletions
|
@ -52,7 +52,7 @@
|
||||||
* 1. Primary ring
|
* 1. Primary ring
|
||||||
* 2. Async ring
|
* 2. Async ring
|
||||||
*/
|
*/
|
||||||
#define GFX10_NUM_GFX_RINGS 2
|
#define GFX10_NUM_GFX_RINGS_NV1X 1
|
||||||
#define GFX10_MEC_HPD_SIZE 2048
|
#define GFX10_MEC_HPD_SIZE 2048
|
||||||
|
|
||||||
#define F32_CE_PROGRAM_RAM_SIZE 65536
|
#define F32_CE_PROGRAM_RAM_SIZE 65536
|
||||||
|
@ -1304,7 +1304,7 @@ static int gfx_v10_0_sw_init(void *handle)
|
||||||
case CHIP_NAVI14:
|
case CHIP_NAVI14:
|
||||||
case CHIP_NAVI12:
|
case CHIP_NAVI12:
|
||||||
adev->gfx.me.num_me = 1;
|
adev->gfx.me.num_me = 1;
|
||||||
adev->gfx.me.num_pipe_per_me = 2;
|
adev->gfx.me.num_pipe_per_me = 1;
|
||||||
adev->gfx.me.num_queue_per_pipe = 1;
|
adev->gfx.me.num_queue_per_pipe = 1;
|
||||||
adev->gfx.mec.num_mec = 2;
|
adev->gfx.mec.num_mec = 2;
|
||||||
adev->gfx.mec.num_pipe_per_mec = 4;
|
adev->gfx.mec.num_pipe_per_mec = 4;
|
||||||
|
@ -2710,6 +2710,8 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
|
||||||
amdgpu_ring_commit(ring);
|
amdgpu_ring_commit(ring);
|
||||||
|
|
||||||
/* submit cs packet to copy state 0 to next available state */
|
/* submit cs packet to copy state 0 to next available state */
|
||||||
|
if (adev->gfx.num_gfx_rings > 1) {
|
||||||
|
/* maximum supported gfx ring is 2 */
|
||||||
ring = &adev->gfx.gfx_ring[1];
|
ring = &adev->gfx.gfx_ring[1];
|
||||||
r = amdgpu_ring_alloc(ring, 2);
|
r = amdgpu_ring_alloc(ring, 2);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
@ -2721,7 +2723,7 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
|
||||||
amdgpu_ring_write(ring, 0);
|
amdgpu_ring_write(ring, 0);
|
||||||
|
|
||||||
amdgpu_ring_commit(ring);
|
amdgpu_ring_commit(ring);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2818,8 +2820,10 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
|
||||||
mutex_unlock(&adev->srbm_mutex);
|
mutex_unlock(&adev->srbm_mutex);
|
||||||
|
|
||||||
/* Init gfx ring 1 for pipe 1 */
|
/* Init gfx ring 1 for pipe 1 */
|
||||||
|
if (adev->gfx.num_gfx_rings > 1) {
|
||||||
mutex_lock(&adev->srbm_mutex);
|
mutex_lock(&adev->srbm_mutex);
|
||||||
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
|
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
|
||||||
|
/* maximum supported gfx ring is 2 */
|
||||||
ring = &adev->gfx.gfx_ring[1];
|
ring = &adev->gfx.gfx_ring[1];
|
||||||
rb_bufsz = order_base_2(ring->ring_size / 8);
|
rb_bufsz = order_base_2(ring->ring_size / 8);
|
||||||
tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
|
tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
|
||||||
|
@ -2850,7 +2854,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
|
||||||
|
|
||||||
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
|
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
|
||||||
mutex_unlock(&adev->srbm_mutex);
|
mutex_unlock(&adev->srbm_mutex);
|
||||||
|
}
|
||||||
/* Switch to pipe 0 */
|
/* Switch to pipe 0 */
|
||||||
mutex_lock(&adev->srbm_mutex);
|
mutex_lock(&adev->srbm_mutex);
|
||||||
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
|
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
|
||||||
|
@ -3513,6 +3517,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
|
||||||
|
|
||||||
/* reset ring buffer */
|
/* reset ring buffer */
|
||||||
ring->wptr = 0;
|
ring->wptr = 0;
|
||||||
|
atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
|
||||||
amdgpu_ring_clear_ring(ring);
|
amdgpu_ring_clear_ring(ring);
|
||||||
} else {
|
} else {
|
||||||
amdgpu_ring_clear_ring(ring);
|
amdgpu_ring_clear_ring(ring);
|
||||||
|
@ -3966,7 +3971,8 @@ static int gfx_v10_0_early_init(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
|
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
|
||||||
|
|
||||||
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
|
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
|
||||||
|
|
||||||
gfx_v10_0_set_kiq_pm4_funcs(adev);
|
gfx_v10_0_set_kiq_pm4_funcs(adev);
|
||||||
|
|
|
@ -3663,6 +3663,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
|
||||||
|
|
||||||
/* reset ring buffer */
|
/* reset ring buffer */
|
||||||
ring->wptr = 0;
|
ring->wptr = 0;
|
||||||
|
atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
|
||||||
amdgpu_ring_clear_ring(ring);
|
amdgpu_ring_clear_ring(ring);
|
||||||
} else {
|
} else {
|
||||||
amdgpu_ring_clear_ring(ring);
|
amdgpu_ring_clear_ring(ring);
|
||||||
|
|
|
@ -1422,6 +1422,73 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
|
||||||
drm_kms_helper_hotplug_event(dev);
|
drm_kms_helper_hotplug_event(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
struct smu_context *smu = &adev->smu;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!is_support_sw_smu(adev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
|
||||||
|
* on window driver dc implementation.
|
||||||
|
* For Navi1x, clock settings of dcn watermarks are fixed. the settings
|
||||||
|
* should be passed to smu during boot up and resume from s3.
|
||||||
|
* boot up: dc calculate dcn watermark clock settings within dc_create,
|
||||||
|
* dcn20_resource_construct
|
||||||
|
* then call pplib functions below to pass the settings to smu:
|
||||||
|
* smu_set_watermarks_for_clock_ranges
|
||||||
|
* smu_set_watermarks_table
|
||||||
|
* navi10_set_watermarks_table
|
||||||
|
* smu_write_watermarks_table
|
||||||
|
*
|
||||||
|
* For Renoir, clock settings of dcn watermark are also fixed values.
|
||||||
|
* dc has implemented different flow for window driver:
|
||||||
|
* dc_hardware_init / dc_set_power_state
|
||||||
|
* dcn10_init_hw
|
||||||
|
* notify_wm_ranges
|
||||||
|
* set_wm_ranges
|
||||||
|
* -- Linux
|
||||||
|
* smu_set_watermarks_for_clock_ranges
|
||||||
|
* renoir_set_watermarks_table
|
||||||
|
* smu_write_watermarks_table
|
||||||
|
*
|
||||||
|
* For Linux,
|
||||||
|
* dc_hardware_init -> amdgpu_dm_init
|
||||||
|
* dc_set_power_state --> dm_resume
|
||||||
|
*
|
||||||
|
* therefore, this function apply to navi10/12/14 but not Renoir
|
||||||
|
* *
|
||||||
|
*/
|
||||||
|
switch(adev->asic_type) {
|
||||||
|
case CHIP_NAVI10:
|
||||||
|
case CHIP_NAVI14:
|
||||||
|
case CHIP_NAVI12:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&smu->mutex);
|
||||||
|
|
||||||
|
/* pass data to smu controller */
|
||||||
|
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
|
||||||
|
!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
|
||||||
|
ret = smu_write_watermarks_table(smu);
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
mutex_unlock(&smu->mutex);
|
||||||
|
DRM_ERROR("Failed to update WMTABLE!\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
smu->watermarks_bitmap |= WATERMARKS_LOADED;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&smu->mutex);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dm_hw_init() - Initialize DC device
|
* dm_hw_init() - Initialize DC device
|
||||||
* @handle: The base driver device containing the amdgpu_dm device.
|
* @handle: The base driver device containing the amdgpu_dm device.
|
||||||
|
@ -1700,6 +1767,8 @@ static int dm_resume(void *handle)
|
||||||
|
|
||||||
amdgpu_dm_irq_resume_late(adev);
|
amdgpu_dm_irq_resume_late(adev);
|
||||||
|
|
||||||
|
amdgpu_dm_smu_write_watermarks_table(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -451,6 +451,7 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||||
aconnector->dc_sink);
|
aconnector->dc_sink);
|
||||||
dc_sink_release(aconnector->dc_sink);
|
dc_sink_release(aconnector->dc_sink);
|
||||||
aconnector->dc_sink = NULL;
|
aconnector->dc_sink = NULL;
|
||||||
|
aconnector->dc_link->cur_link_settings.lane_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_connector_unregister(connector);
|
drm_connector_unregister(connector);
|
||||||
|
|
|
@ -840,8 +840,8 @@ static void hubbub1_det_request_size(
|
||||||
|
|
||||||
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
|
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
|
||||||
|
|
||||||
swath_bytes_horz_wc = height * blk256_height * bpe;
|
swath_bytes_horz_wc = width * blk256_height * bpe;
|
||||||
swath_bytes_vert_wc = width * blk256_width * bpe;
|
swath_bytes_vert_wc = height * blk256_width * bpe;
|
||||||
|
|
||||||
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
|
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
|
||||||
false : /* full 256B request */
|
false : /* full 256B request */
|
||||||
|
|
|
@ -222,7 +222,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (min <= 0 && max <= 0)
|
if (min < 0 && max < 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!smu_clk_dpm_is_enabled(smu, clk_type))
|
if (!smu_clk_dpm_is_enabled(smu, clk_type))
|
||||||
|
|
|
@ -111,8 +111,8 @@ static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
|
||||||
CLK_MAP(GFXCLK, CLOCK_GFXCLK),
|
CLK_MAP(GFXCLK, CLOCK_GFXCLK),
|
||||||
CLK_MAP(SCLK, CLOCK_GFXCLK),
|
CLK_MAP(SCLK, CLOCK_GFXCLK),
|
||||||
CLK_MAP(SOCCLK, CLOCK_SOCCLK),
|
CLK_MAP(SOCCLK, CLOCK_SOCCLK),
|
||||||
CLK_MAP(UCLK, CLOCK_UMCCLK),
|
CLK_MAP(UCLK, CLOCK_FCLK),
|
||||||
CLK_MAP(MCLK, CLOCK_UMCCLK),
|
CLK_MAP(MCLK, CLOCK_FCLK),
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
|
static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
|
||||||
|
@ -280,7 +280,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
|
||||||
break;
|
break;
|
||||||
case SMU_MCLK:
|
case SMU_MCLK:
|
||||||
count = NUM_MEMCLK_DPM_LEVELS;
|
count = NUM_MEMCLK_DPM_LEVELS;
|
||||||
cur_value = metrics.ClockFrequency[CLOCK_UMCCLK];
|
cur_value = metrics.ClockFrequency[CLOCK_FCLK];
|
||||||
break;
|
break;
|
||||||
case SMU_DCEFCLK:
|
case SMU_DCEFCLK:
|
||||||
count = NUM_DCFCLK_DPM_LEVELS;
|
count = NUM_DCFCLK_DPM_LEVELS;
|
||||||
|
|
|
@ -458,9 +458,6 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (max < min)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
switch (clk_type) {
|
switch (clk_type) {
|
||||||
case SMU_GFXCLK:
|
case SMU_GFXCLK:
|
||||||
case SMU_SCLK:
|
case SMU_SCLK:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue