mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
Merge tag 'drm-fixes-for-v4.7-rc3' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "This weeks instalment of fixes: amdgpu: Lots of memory leak and firmware leak fixes nouveau: Collection of display fixes, KASAN fixes vc4: vblank/pageflipping fixes fsl-dcu: Regmap cache fix omap: Unused variable warning fix. Nothing too surprising so far" * tag 'drm-fixes-for-v4.7-rc3' of git://people.freedesktop.org/~airlied/linux: (46 commits) drm/amdgpu: fix warning with powerplay disabled. drm/amd/powerplay: delete useless code as pptable changed in vbios. drm/amd/powerplay: fix bug visit array out of bounds drm/amdgpu: fix smu ucode memleak (v2) drm/amdgpu: add release firmware for cgs drm/amdgpu: fix tonga smu_fini mem leak drm/amdgpu: fix fiji smu fini mem leak drm/amdgpu: fix cik sdma ucode memleak drm/amdgpu: fix sdma24 ucode mem leak drm/amdgpu: fix sdma3 ucode mem leak drm/amdgpu: fix uvd fini mem leak drm/amdgpu: fix gfx 7 ucode mem leak drm/amdgpu: fix gfx8 ucode mem leak drm/amdgpu: fix missing free wb for cond_exec drm/amdgpu: fix memleak in pptable_init drm/amdgpu: fix mem leak in atombios drm/amdgpu: fix mem leak in pplib/hwmgr drm/amdgpu: fix mem leak in smumgr drm/amdgpu: add pipeline sync while vmid switch in same ctx drm/amdgpu: vBIOS post only call when mem_size zero ...
This commit is contained in:
commit
00da90085e
62 changed files with 439 additions and 185 deletions
|
@ -799,6 +799,7 @@ struct amdgpu_ring {
|
||||||
unsigned cond_exe_offs;
|
unsigned cond_exe_offs;
|
||||||
u64 cond_exe_gpu_addr;
|
u64 cond_exe_gpu_addr;
|
||||||
volatile u32 *cond_exe_cpu_addr;
|
volatile u32 *cond_exe_cpu_addr;
|
||||||
|
int vmid;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -936,7 +937,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||||
unsigned vm_id, uint64_t pd_addr,
|
unsigned vm_id, uint64_t pd_addr,
|
||||||
uint32_t gds_base, uint32_t gds_size,
|
uint32_t gds_base, uint32_t gds_size,
|
||||||
uint32_t gws_base, uint32_t gws_size,
|
uint32_t gws_base, uint32_t gws_size,
|
||||||
uint32_t oa_base, uint32_t oa_size);
|
uint32_t oa_base, uint32_t oa_size,
|
||||||
|
bool vmid_switch);
|
||||||
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
|
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
|
||||||
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
|
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
|
||||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||||
|
|
|
@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
|
||||||
|
{
|
||||||
|
CGS_FUNC_ADEV;
|
||||||
|
if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
|
||||||
|
release_firmware(adev->pm.fw);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
/* cannot release other firmware because they are not created by cgs */
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
||||||
enum cgs_ucode_id type,
|
enum cgs_ucode_id type,
|
||||||
struct cgs_firmware_info *info)
|
struct cgs_firmware_info *info)
|
||||||
|
@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
|
||||||
amdgpu_cgs_pm_query_clock_limits,
|
amdgpu_cgs_pm_query_clock_limits,
|
||||||
amdgpu_cgs_set_camera_voltages,
|
amdgpu_cgs_set_camera_voltages,
|
||||||
amdgpu_cgs_get_firmware_info,
|
amdgpu_cgs_get_firmware_info,
|
||||||
|
amdgpu_cgs_rel_firmware,
|
||||||
amdgpu_cgs_set_powergating_state,
|
amdgpu_cgs_set_powergating_state,
|
||||||
amdgpu_cgs_set_clockgating_state,
|
amdgpu_cgs_set_clockgating_state,
|
||||||
amdgpu_cgs_get_active_displays_info,
|
amdgpu_cgs_get_active_displays_info,
|
||||||
|
|
|
@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
|
||||||
*/
|
*/
|
||||||
static void amdgpu_atombios_fini(struct amdgpu_device *adev)
|
static void amdgpu_atombios_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
if (adev->mode_info.atom_context)
|
if (adev->mode_info.atom_context) {
|
||||||
kfree(adev->mode_info.atom_context->scratch);
|
kfree(adev->mode_info.atom_context->scratch);
|
||||||
|
kfree(adev->mode_info.atom_context->iio);
|
||||||
|
}
|
||||||
kfree(adev->mode_info.atom_context);
|
kfree(adev->mode_info.atom_context);
|
||||||
adev->mode_info.atom_context = NULL;
|
adev->mode_info.atom_context = NULL;
|
||||||
kfree(adev->mode_info.atom_card_info);
|
kfree(adev->mode_info.atom_card_info);
|
||||||
|
@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
||||||
adev->ip_block_status[i].valid = false;
|
adev->ip_block_status[i].valid = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||||
|
if (adev->ip_blocks[i].funcs->late_fini)
|
||||||
|
adev->ip_blocks[i].funcs->late_fini((void *)adev);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1513,8 +1520,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
amdgpu_atombios_has_gpu_virtualization_table(adev);
|
amdgpu_atombios_has_gpu_virtualization_table(adev);
|
||||||
|
|
||||||
/* Post card if necessary */
|
/* Post card if necessary */
|
||||||
if (!amdgpu_card_posted(adev) ||
|
if (!amdgpu_card_posted(adev)) {
|
||||||
adev->virtualization.supports_sr_iov) {
|
|
||||||
if (!adev->bios) {
|
if (!adev->bios) {
|
||||||
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
|
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -122,6 +122,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
bool skip_preamble, need_ctx_switch;
|
bool skip_preamble, need_ctx_switch;
|
||||||
unsigned patch_offset = ~0;
|
unsigned patch_offset = ~0;
|
||||||
struct amdgpu_vm *vm;
|
struct amdgpu_vm *vm;
|
||||||
|
int vmid = 0, old_vmid = ring->vmid;
|
||||||
struct fence *hwf;
|
struct fence *hwf;
|
||||||
uint64_t ctx;
|
uint64_t ctx;
|
||||||
|
|
||||||
|
@ -135,9 +136,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
if (job) {
|
if (job) {
|
||||||
vm = job->vm;
|
vm = job->vm;
|
||||||
ctx = job->ctx;
|
ctx = job->ctx;
|
||||||
|
vmid = job->vm_id;
|
||||||
} else {
|
} else {
|
||||||
vm = NULL;
|
vm = NULL;
|
||||||
ctx = 0;
|
ctx = 0;
|
||||||
|
vmid = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ring->ready) {
|
if (!ring->ready) {
|
||||||
|
@ -163,7 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
|
r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
|
||||||
job->gds_base, job->gds_size,
|
job->gds_base, job->gds_size,
|
||||||
job->gws_base, job->gws_size,
|
job->gws_base, job->gws_size,
|
||||||
job->oa_base, job->oa_size);
|
job->oa_base, job->oa_size,
|
||||||
|
(ring->current_ctx == ctx) && (old_vmid != vmid));
|
||||||
if (r) {
|
if (r) {
|
||||||
amdgpu_ring_undo(ring);
|
amdgpu_ring_undo(ring);
|
||||||
return r;
|
return r;
|
||||||
|
@ -180,7 +184,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
need_ctx_switch = ring->current_ctx != ctx;
|
need_ctx_switch = ring->current_ctx != ctx;
|
||||||
for (i = 0; i < num_ibs; ++i) {
|
for (i = 0; i < num_ibs; ++i) {
|
||||||
ib = &ibs[i];
|
ib = &ibs[i];
|
||||||
|
|
||||||
/* drop preamble IBs if we don't have a context switch */
|
/* drop preamble IBs if we don't have a context switch */
|
||||||
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
|
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
|
||||||
continue;
|
continue;
|
||||||
|
@ -188,6 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
|
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
|
||||||
need_ctx_switch);
|
need_ctx_switch);
|
||||||
need_ctx_switch = false;
|
need_ctx_switch = false;
|
||||||
|
ring->vmid = vmid;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ring->funcs->emit_hdp_invalidate)
|
if (ring->funcs->emit_hdp_invalidate)
|
||||||
|
@ -198,6 +202,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
||||||
if (job && job->vm_id)
|
if (job && job->vm_id)
|
||||||
amdgpu_vm_reset_id(adev, job->vm_id);
|
amdgpu_vm_reset_id(adev, job->vm_id);
|
||||||
|
ring->vmid = old_vmid;
|
||||||
amdgpu_ring_undo(ring);
|
amdgpu_ring_undo(ring);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
#ifdef CONFIG_DRM_AMD_POWERPLAY
|
|
||||||
if (adev->pp_enabled) {
|
|
||||||
amdgpu_pm_sysfs_fini(adev);
|
|
||||||
amd_powerplay_fini(adev->powerplay.pp_handle);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void amdgpu_pp_late_fini(void *handle)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_DRM_AMD_POWERPLAY
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
if (adev->pp_enabled) {
|
||||||
|
amdgpu_pm_sysfs_fini(adev);
|
||||||
|
amd_powerplay_fini(adev->powerplay.pp_handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (adev->powerplay.ip_funcs->late_fini)
|
||||||
|
adev->powerplay.ip_funcs->late_fini(
|
||||||
|
adev->powerplay.pp_handle);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static int amdgpu_pp_suspend(void *handle)
|
static int amdgpu_pp_suspend(void *handle)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
|
||||||
.sw_fini = amdgpu_pp_sw_fini,
|
.sw_fini = amdgpu_pp_sw_fini,
|
||||||
.hw_init = amdgpu_pp_hw_init,
|
.hw_init = amdgpu_pp_hw_init,
|
||||||
.hw_fini = amdgpu_pp_hw_fini,
|
.hw_fini = amdgpu_pp_hw_fini,
|
||||||
|
.late_fini = amdgpu_pp_late_fini,
|
||||||
.suspend = amdgpu_pp_suspend,
|
.suspend = amdgpu_pp_suspend,
|
||||||
.resume = amdgpu_pp_resume,
|
.resume = amdgpu_pp_resume,
|
||||||
.is_idle = amdgpu_pp_is_idle,
|
.is_idle = amdgpu_pp_is_idle,
|
||||||
|
|
|
@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
||||||
ring->ring = NULL;
|
ring->ring = NULL;
|
||||||
ring->ring_obj = NULL;
|
ring->ring_obj = NULL;
|
||||||
|
|
||||||
|
amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
|
||||||
amdgpu_wb_free(ring->adev, ring->fence_offs);
|
amdgpu_wb_free(ring->adev, ring->fence_offs);
|
||||||
amdgpu_wb_free(ring->adev, ring->rptr_offs);
|
amdgpu_wb_free(ring->adev, ring->rptr_offs);
|
||||||
amdgpu_wb_free(ring->adev, ring->wptr_offs);
|
amdgpu_wb_free(ring->adev, ring->wptr_offs);
|
||||||
|
|
|
@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
|
r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
|
||||||
|
memset(sa_manager->cpu_ptr, 0, sa_manager->size);
|
||||||
amdgpu_bo_unreserve(sa_manager->bo);
|
amdgpu_bo_unreserve(sa_manager->bo);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (adev->uvd.vcpu_bo == NULL)
|
kfree(adev->uvd.saved_bo);
|
||||||
return 0;
|
|
||||||
|
|
||||||
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
|
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
|
if (adev->uvd.vcpu_bo) {
|
||||||
if (!r) {
|
r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
|
||||||
amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
|
if (!r) {
|
||||||
amdgpu_bo_unpin(adev->uvd.vcpu_bo);
|
amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
|
||||||
amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
|
amdgpu_bo_unpin(adev->uvd.vcpu_bo);
|
||||||
}
|
amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
|
||||||
|
}
|
||||||
|
|
||||||
amdgpu_bo_unref(&adev->uvd.vcpu_bo);
|
amdgpu_bo_unref(&adev->uvd.vcpu_bo);
|
||||||
|
}
|
||||||
|
|
||||||
amdgpu_ring_fini(&adev->uvd.ring);
|
amdgpu_ring_fini(&adev->uvd.ring);
|
||||||
|
|
||||||
|
|
|
@ -298,7 +298,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||||
unsigned vm_id, uint64_t pd_addr,
|
unsigned vm_id, uint64_t pd_addr,
|
||||||
uint32_t gds_base, uint32_t gds_size,
|
uint32_t gds_base, uint32_t gds_size,
|
||||||
uint32_t gws_base, uint32_t gws_size,
|
uint32_t gws_base, uint32_t gws_size,
|
||||||
uint32_t oa_base, uint32_t oa_size)
|
uint32_t oa_base, uint32_t oa_size,
|
||||||
|
bool vmid_switch)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
|
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
|
||||||
|
@ -312,8 +313,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (ring->funcs->emit_pipeline_sync && (
|
if (ring->funcs->emit_pipeline_sync && (
|
||||||
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
|
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch))
|
||||||
ring->type == AMDGPU_RING_TYPE_COMPUTE))
|
|
||||||
amdgpu_ring_emit_pipeline_sync(ring);
|
amdgpu_ring_emit_pipeline_sync(ring);
|
||||||
|
|
||||||
if (ring->funcs->emit_vm_flush &&
|
if (ring->funcs->emit_vm_flush &&
|
||||||
|
|
|
@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle)
|
||||||
ci_dpm_fini(adev);
|
ci_dpm_fini(adev);
|
||||||
mutex_unlock(&adev->pm.mutex);
|
mutex_unlock(&adev->pm.mutex);
|
||||||
|
|
||||||
|
release_firmware(adev->pm.fw);
|
||||||
|
adev->pm.fw = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
|
||||||
|
|
||||||
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
|
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
|
||||||
|
|
||||||
|
|
||||||
|
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
|
release_firmware(adev->sdma.instance[i].fw);
|
||||||
|
adev->sdma.instance[i].fw = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* sDMA - System DMA
|
* sDMA - System DMA
|
||||||
* Starting with CIK, the GPU has new asynchronous
|
* Starting with CIK, the GPU has new asynchronous
|
||||||
|
@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
|
||||||
/* Initialize the ring buffer's read and write pointers */
|
/* Initialize the ring buffer's read and write pointers */
|
||||||
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
|
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
|
||||||
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
|
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
|
||||||
|
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
|
||||||
|
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
|
||||||
|
|
||||||
/* set the wb address whether it's enabled or not */
|
/* set the wb address whether it's enabled or not */
|
||||||
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
|
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
|
||||||
|
@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
|
||||||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||||
|
|
||||||
ring->ready = true;
|
ring->ready = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
cik_sdma_enable(adev, true);
|
||||||
|
|
||||||
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
|
ring = &adev->sdma.instance[i].ring;
|
||||||
r = amdgpu_ring_test_ring(ring);
|
r = amdgpu_ring_test_ring(ring);
|
||||||
if (r) {
|
if (r) {
|
||||||
ring->ready = false;
|
ring->ready = false;
|
||||||
|
@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* unhalt the MEs */
|
/* halt the engine before programing */
|
||||||
cik_sdma_enable(adev, true);
|
cik_sdma_enable(adev, false);
|
||||||
|
|
||||||
/* start the gfx rings and rlc compute queues */
|
/* start the gfx rings and rlc compute queues */
|
||||||
r = cik_sdma_gfx_resume(adev);
|
r = cik_sdma_gfx_resume(adev);
|
||||||
|
@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle)
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||||
|
|
||||||
|
cik_sdma_free_microcode(adev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle)
|
||||||
|
|
||||||
static int fiji_dpm_sw_fini(void *handle)
|
static int fiji_dpm_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
release_firmware(adev->pm.fw);
|
||||||
|
adev->pm.fw = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -991,6 +991,22 @@ out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
release_firmware(adev->gfx.pfp_fw);
|
||||||
|
adev->gfx.pfp_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.me_fw);
|
||||||
|
adev->gfx.me_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.ce_fw);
|
||||||
|
adev->gfx.ce_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.mec_fw);
|
||||||
|
adev->gfx.mec_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.mec2_fw);
|
||||||
|
adev->gfx.mec2_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.rlc_fw);
|
||||||
|
adev->gfx.rlc_fw = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gfx_v7_0_tiling_mode_table_init - init the hw tiling table
|
* gfx_v7_0_tiling_mode_table_init - init the hw tiling table
|
||||||
*
|
*
|
||||||
|
@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle)
|
||||||
gfx_v7_0_cp_compute_fini(adev);
|
gfx_v7_0_cp_compute_fini(adev);
|
||||||
gfx_v7_0_rlc_fini(adev);
|
gfx_v7_0_rlc_fini(adev);
|
||||||
gfx_v7_0_mec_fini(adev);
|
gfx_v7_0_mec_fini(adev);
|
||||||
|
gfx_v7_0_free_microcode(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -836,6 +836,26 @@ err1:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
|
||||||
|
release_firmware(adev->gfx.pfp_fw);
|
||||||
|
adev->gfx.pfp_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.me_fw);
|
||||||
|
adev->gfx.me_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.ce_fw);
|
||||||
|
adev->gfx.ce_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.rlc_fw);
|
||||||
|
adev->gfx.rlc_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.mec_fw);
|
||||||
|
adev->gfx.mec_fw = NULL;
|
||||||
|
if ((adev->asic_type != CHIP_STONEY) &&
|
||||||
|
(adev->asic_type != CHIP_TOPAZ))
|
||||||
|
release_firmware(adev->gfx.mec2_fw);
|
||||||
|
adev->gfx.mec2_fw = NULL;
|
||||||
|
|
||||||
|
kfree(adev->gfx.rlc.register_list_format);
|
||||||
|
}
|
||||||
|
|
||||||
static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
const char *chip_name;
|
const char *chip_name;
|
||||||
|
@ -1983,7 +2003,7 @@ static int gfx_v8_0_sw_fini(void *handle)
|
||||||
|
|
||||||
gfx_v8_0_rlc_fini(adev);
|
gfx_v8_0_rlc_fini(adev);
|
||||||
|
|
||||||
kfree(adev->gfx.rlc.register_list_format);
|
gfx_v8_0_free_microcode(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3974,11 +3994,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
|
||||||
amdgpu_ring_write(ring, 0x3a00161a);
|
amdgpu_ring_write(ring, 0x3a00161a);
|
||||||
amdgpu_ring_write(ring, 0x0000002e);
|
amdgpu_ring_write(ring, 0x0000002e);
|
||||||
break;
|
break;
|
||||||
case CHIP_TOPAZ:
|
|
||||||
case CHIP_CARRIZO:
|
case CHIP_CARRIZO:
|
||||||
amdgpu_ring_write(ring, 0x00000002);
|
amdgpu_ring_write(ring, 0x00000002);
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
amdgpu_ring_write(ring, 0x00000000);
|
||||||
break;
|
break;
|
||||||
|
case CHIP_TOPAZ:
|
||||||
|
amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
|
||||||
|
0x00000000 : 0x00000002);
|
||||||
|
amdgpu_ring_write(ring, 0x00000000);
|
||||||
|
break;
|
||||||
case CHIP_STONEY:
|
case CHIP_STONEY:
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
amdgpu_ring_write(ring, 0x00000000);
|
||||||
amdgpu_ring_write(ring, 0x00000000);
|
amdgpu_ring_write(ring, 0x00000000);
|
||||||
|
|
|
@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle)
|
||||||
|
|
||||||
static int iceland_dpm_sw_fini(void *handle)
|
static int iceland_dpm_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
release_firmware(adev->pm.fw);
|
||||||
|
adev->pm.fw = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
|
release_firmware(adev->sdma.instance[i].fw);
|
||||||
|
adev->sdma.instance[i].fw = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sdma_v2_4_init_microcode - load ucode images from disk
|
* sdma_v2_4_init_microcode - load ucode images from disk
|
||||||
*
|
*
|
||||||
|
@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
|
||||||
/* Initialize the ring buffer's read and write pointers */
|
/* Initialize the ring buffer's read and write pointers */
|
||||||
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
|
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
|
||||||
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
|
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
|
||||||
|
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
|
||||||
|
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
|
||||||
|
|
||||||
/* set the wb address whether it's enabled or not */
|
/* set the wb address whether it's enabled or not */
|
||||||
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
|
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
|
||||||
|
@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
|
||||||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||||
|
|
||||||
ring->ready = true;
|
ring->ready = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
sdma_v2_4_enable(adev, true);
|
||||||
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
|
ring = &adev->sdma.instance[i].ring;
|
||||||
r = amdgpu_ring_test_ring(ring);
|
r = amdgpu_ring_test_ring(ring);
|
||||||
if (r) {
|
if (r) {
|
||||||
ring->ready = false;
|
ring->ready = false;
|
||||||
|
@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* unhalt the MEs */
|
/* halt the engine before programing */
|
||||||
sdma_v2_4_enable(adev, true);
|
sdma_v2_4_enable(adev, false);
|
||||||
|
|
||||||
/* start the gfx rings and rlc compute queues */
|
/* start the gfx rings and rlc compute queues */
|
||||||
r = sdma_v2_4_gfx_resume(adev);
|
r = sdma_v2_4_gfx_resume(adev);
|
||||||
|
@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle)
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||||
|
|
||||||
|
sdma_v2_4_free_microcode(adev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
|
release_firmware(adev->sdma.instance[i].fw);
|
||||||
|
adev->sdma.instance[i].fw = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sdma_v3_0_init_microcode - load ucode images from disk
|
* sdma_v3_0_init_microcode - load ucode images from disk
|
||||||
*
|
*
|
||||||
|
@ -672,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
||||||
/* Initialize the ring buffer's read and write pointers */
|
/* Initialize the ring buffer's read and write pointers */
|
||||||
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
|
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
|
||||||
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
|
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
|
||||||
|
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
|
||||||
|
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
|
||||||
|
|
||||||
/* set the wb address whether it's enabled or not */
|
/* set the wb address whether it's enabled or not */
|
||||||
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
|
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
|
||||||
|
@ -711,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
||||||
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||||
|
|
||||||
ring->ready = true;
|
ring->ready = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* unhalt the MEs */
|
||||||
|
sdma_v3_0_enable(adev, true);
|
||||||
|
/* enable sdma ring preemption */
|
||||||
|
sdma_v3_0_ctx_switch_enable(adev, true);
|
||||||
|
|
||||||
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
|
ring = &adev->sdma.instance[i].ring;
|
||||||
r = amdgpu_ring_test_ring(ring);
|
r = amdgpu_ring_test_ring(ring);
|
||||||
if (r) {
|
if (r) {
|
||||||
ring->ready = false;
|
ring->ready = false;
|
||||||
|
@ -804,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* unhalt the MEs */
|
/* disble sdma engine before programing it */
|
||||||
sdma_v3_0_enable(adev, true);
|
sdma_v3_0_ctx_switch_enable(adev, false);
|
||||||
/* enable sdma ring preemption */
|
sdma_v3_0_enable(adev, false);
|
||||||
sdma_v3_0_ctx_switch_enable(adev, true);
|
|
||||||
|
|
||||||
/* start the gfx rings and rlc compute queues */
|
/* start the gfx rings and rlc compute queues */
|
||||||
r = sdma_v3_0_gfx_resume(adev);
|
r = sdma_v3_0_gfx_resume(adev);
|
||||||
|
@ -1247,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle)
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||||
|
|
||||||
|
sdma_v3_0_free_microcode(adev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle)
|
||||||
|
|
||||||
static int tonga_dpm_sw_fini(void *handle)
|
static int tonga_dpm_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
release_firmware(adev->pm.fw);
|
||||||
|
adev->pm.fw = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -157,6 +157,7 @@ struct amd_ip_funcs {
|
||||||
int (*hw_init)(void *handle);
|
int (*hw_init)(void *handle);
|
||||||
/* tears down the hw state */
|
/* tears down the hw state */
|
||||||
int (*hw_fini)(void *handle);
|
int (*hw_fini)(void *handle);
|
||||||
|
void (*late_fini)(void *handle);
|
||||||
/* handles IP specific hw/sw changes for suspend */
|
/* handles IP specific hw/sw changes for suspend */
|
||||||
int (*suspend)(void *handle);
|
int (*suspend)(void *handle);
|
||||||
/* handles IP specific hw/sw changes for resume */
|
/* handles IP specific hw/sw changes for resume */
|
||||||
|
|
|
@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
|
||||||
enum cgs_ucode_id type,
|
enum cgs_ucode_id type,
|
||||||
struct cgs_firmware_info *info);
|
struct cgs_firmware_info *info);
|
||||||
|
|
||||||
|
typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
|
||||||
|
enum cgs_ucode_id type);
|
||||||
|
|
||||||
typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
|
typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
|
||||||
enum amd_ip_block_type block_type,
|
enum amd_ip_block_type block_type,
|
||||||
enum amd_powergating_state state);
|
enum amd_powergating_state state);
|
||||||
|
@ -645,6 +648,7 @@ struct cgs_ops {
|
||||||
cgs_set_camera_voltages_t set_camera_voltages;
|
cgs_set_camera_voltages_t set_camera_voltages;
|
||||||
/* Firmware Info */
|
/* Firmware Info */
|
||||||
cgs_get_firmware_info get_firmware_info;
|
cgs_get_firmware_info get_firmware_info;
|
||||||
|
cgs_rel_firmware rel_firmware;
|
||||||
/* cg pg interface*/
|
/* cg pg interface*/
|
||||||
cgs_set_powergating_state set_powergating_state;
|
cgs_set_powergating_state set_powergating_state;
|
||||||
cgs_set_clockgating_state set_clockgating_state;
|
cgs_set_clockgating_state set_clockgating_state;
|
||||||
|
@ -738,6 +742,8 @@ struct cgs_device
|
||||||
CGS_CALL(set_camera_voltages,dev,mask,voltages)
|
CGS_CALL(set_camera_voltages,dev,mask,voltages)
|
||||||
#define cgs_get_firmware_info(dev, type, info) \
|
#define cgs_get_firmware_info(dev, type, info) \
|
||||||
CGS_CALL(get_firmware_info, dev, type, info)
|
CGS_CALL(get_firmware_info, dev, type, info)
|
||||||
|
#define cgs_rel_firmware(dev, type) \
|
||||||
|
CGS_CALL(rel_firmware, dev, type)
|
||||||
#define cgs_set_powergating_state(dev, block_type, state) \
|
#define cgs_set_powergating_state(dev, block_type, state) \
|
||||||
CGS_CALL(set_powergating_state, dev, block_type, state)
|
CGS_CALL(set_powergating_state, dev, block_type, state)
|
||||||
#define cgs_set_clockgating_state(dev, block_type, state) \
|
#define cgs_set_clockgating_state(dev, block_type, state) \
|
||||||
|
|
|
@ -73,11 +73,14 @@ static int pp_sw_init(void *handle)
|
||||||
|
|
||||||
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
|
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err1;
|
||||||
|
|
||||||
pr_info("amdgpu: powerplay initialized\n");
|
pr_info("amdgpu: powerplay initialized\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
err1:
|
||||||
|
if (hwmgr->pptable_func->pptable_fini)
|
||||||
|
hwmgr->pptable_func->pptable_fini(hwmgr);
|
||||||
err:
|
err:
|
||||||
pr_err("amdgpu: powerplay initialization failed\n");
|
pr_err("amdgpu: powerplay initialization failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle)
|
||||||
if (hwmgr->hwmgr_func->backend_fini != NULL)
|
if (hwmgr->hwmgr_func->backend_fini != NULL)
|
||||||
ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
|
ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
|
||||||
|
|
||||||
|
if (hwmgr->pptable_func->pptable_fini)
|
||||||
|
hwmgr->pptable_func->pptable_fini(hwmgr);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr)
|
||||||
pem_unregister_interrupts(eventmgr);
|
pem_unregister_interrupts(eventmgr);
|
||||||
|
|
||||||
pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
|
pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
|
||||||
|
|
||||||
if (eventmgr != NULL)
|
|
||||||
kfree(eventmgr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int eventmgr_init(struct pp_instance *handle)
|
int eventmgr_init(struct pp_instance *handle)
|
||||||
|
|
|
@ -1830,7 +1830,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
|
||||||
|
|
||||||
PP_ASSERT_WITH_CODE(false,
|
PP_ASSERT_WITH_CODE(false,
|
||||||
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
|
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
|
||||||
return vddci_table->entries[i].value);
|
return vddci_table->entries[i-1].value);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
|
static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
|
||||||
|
|
|
@ -93,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
|
||||||
if (hwmgr == NULL || hwmgr->ps == NULL)
|
if (hwmgr == NULL || hwmgr->ps == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* do hwmgr finish*/
|
||||||
|
kfree(hwmgr->backend);
|
||||||
|
|
||||||
|
kfree(hwmgr->start_thermal_controller.function_list);
|
||||||
|
|
||||||
|
kfree(hwmgr->set_temperature_range.function_list);
|
||||||
|
|
||||||
kfree(hwmgr->ps);
|
kfree(hwmgr->ps);
|
||||||
kfree(hwmgr);
|
kfree(hwmgr);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -462,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
|
||||||
|
|
||||||
PP_ASSERT_WITH_CODE(false,
|
PP_ASSERT_WITH_CODE(false,
|
||||||
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
|
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
|
||||||
return vddci_table->entries[i].value);
|
return vddci_table->entries[i-1].value);
|
||||||
}
|
}
|
||||||
|
|
||||||
int phm_find_boot_level(void *table,
|
int phm_find_boot_level(void *table,
|
||||||
|
|
|
@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
|
||||||
|
|
||||||
if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
|
if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
|
||||||
(uint8_t *)&data->power_tune_table,
|
(uint8_t *)&data->power_tune_table,
|
||||||
sizeof(struct SMU74_Discrete_PmFuses), data->sram_end))
|
(sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
|
||||||
PP_ASSERT_WITH_CODE(false,
|
PP_ASSERT_WITH_CODE(false,
|
||||||
"Attempt to download PmFuseTable Failed!",
|
"Attempt to download PmFuseTable Failed!",
|
||||||
return -EINVAL);
|
return -EINVAL);
|
||||||
|
|
|
@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
|
|
||||||
for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
|
|
||||||
data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
|
|
||||||
/* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
|
|
||||||
/* param1 is for corresponding std voltage */
|
|
||||||
data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
|
|
||||||
}
|
|
||||||
data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
|
|
||||||
|
|
||||||
if (NULL != allowed_vdd_mclk_table) {
|
|
||||||
/* Initialize Vddci DPM table based on allow Mclk values */
|
|
||||||
for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
|
|
||||||
data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
|
|
||||||
data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
|
|
||||||
data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
|
|
||||||
data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
|
|
||||||
}
|
|
||||||
data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
|
|
||||||
data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* setup PCIE gen speed levels*/
|
/* setup PCIE gen speed levels*/
|
||||||
tonga_setup_default_pcie_tables(hwmgr);
|
tonga_setup_default_pcie_tables(hwmgr);
|
||||||
|
|
||||||
|
|
|
@ -1040,48 +1040,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
|
||||||
struct phm_ppt_v1_information *pp_table_information =
|
struct phm_ppt_v1_information *pp_table_information =
|
||||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||||
|
|
||||||
if (NULL != hwmgr->soft_pp_table) {
|
if (NULL != hwmgr->soft_pp_table)
|
||||||
kfree(hwmgr->soft_pp_table);
|
|
||||||
hwmgr->soft_pp_table = NULL;
|
hwmgr->soft_pp_table = NULL;
|
||||||
}
|
|
||||||
|
|
||||||
if (NULL != pp_table_information->vdd_dep_on_sclk)
|
kfree(pp_table_information->vdd_dep_on_sclk);
|
||||||
pp_table_information->vdd_dep_on_sclk = NULL;
|
pp_table_information->vdd_dep_on_sclk = NULL;
|
||||||
|
|
||||||
if (NULL != pp_table_information->vdd_dep_on_mclk)
|
kfree(pp_table_information->vdd_dep_on_mclk);
|
||||||
pp_table_information->vdd_dep_on_mclk = NULL;
|
pp_table_information->vdd_dep_on_mclk = NULL;
|
||||||
|
|
||||||
if (NULL != pp_table_information->valid_mclk_values)
|
kfree(pp_table_information->valid_mclk_values);
|
||||||
pp_table_information->valid_mclk_values = NULL;
|
pp_table_information->valid_mclk_values = NULL;
|
||||||
|
|
||||||
if (NULL != pp_table_information->valid_sclk_values)
|
kfree(pp_table_information->valid_sclk_values);
|
||||||
pp_table_information->valid_sclk_values = NULL;
|
pp_table_information->valid_sclk_values = NULL;
|
||||||
|
|
||||||
if (NULL != pp_table_information->vddc_lookup_table)
|
kfree(pp_table_information->vddc_lookup_table);
|
||||||
pp_table_information->vddc_lookup_table = NULL;
|
pp_table_information->vddc_lookup_table = NULL;
|
||||||
|
|
||||||
if (NULL != pp_table_information->vddgfx_lookup_table)
|
kfree(pp_table_information->vddgfx_lookup_table);
|
||||||
pp_table_information->vddgfx_lookup_table = NULL;
|
pp_table_information->vddgfx_lookup_table = NULL;
|
||||||
|
|
||||||
if (NULL != pp_table_information->mm_dep_table)
|
kfree(pp_table_information->mm_dep_table);
|
||||||
pp_table_information->mm_dep_table = NULL;
|
pp_table_information->mm_dep_table = NULL;
|
||||||
|
|
||||||
if (NULL != pp_table_information->cac_dtp_table)
|
kfree(pp_table_information->cac_dtp_table);
|
||||||
pp_table_information->cac_dtp_table = NULL;
|
pp_table_information->cac_dtp_table = NULL;
|
||||||
|
|
||||||
if (NULL != hwmgr->dyn_state.cac_dtp_table)
|
kfree(hwmgr->dyn_state.cac_dtp_table);
|
||||||
hwmgr->dyn_state.cac_dtp_table = NULL;
|
hwmgr->dyn_state.cac_dtp_table = NULL;
|
||||||
|
|
||||||
if (NULL != pp_table_information->ppm_parameter_table)
|
kfree(pp_table_information->ppm_parameter_table);
|
||||||
pp_table_information->ppm_parameter_table = NULL;
|
pp_table_information->ppm_parameter_table = NULL;
|
||||||
|
|
||||||
if (NULL != pp_table_information->pcie_table)
|
kfree(pp_table_information->pcie_table);
|
||||||
pp_table_information->pcie_table = NULL;
|
pp_table_information->pcie_table = NULL;
|
||||||
|
|
||||||
if (NULL != hwmgr->pptable) {
|
kfree(hwmgr->pptable);
|
||||||
kfree(hwmgr->pptable);
|
hwmgr->pptable = NULL;
|
||||||
hwmgr->pptable = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
|
||||||
|
|
||||||
static int fiji_smu_fini(struct pp_smumgr *smumgr)
|
static int fiji_smu_fini(struct pp_smumgr *smumgr)
|
||||||
{
|
{
|
||||||
|
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
|
||||||
|
|
||||||
|
smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
|
||||||
|
|
||||||
if (smumgr->backend) {
|
if (smumgr->backend) {
|
||||||
kfree(smumgr->backend);
|
kfree(smumgr->backend);
|
||||||
smumgr->backend = NULL;
|
smumgr->backend = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -469,6 +469,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr)
|
||||||
kfree(smumgr->backend);
|
kfree(smumgr->backend);
|
||||||
smumgr->backend = NULL;
|
smumgr->backend = NULL;
|
||||||
}
|
}
|
||||||
|
cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
||||||
|
|
||||||
int smum_fini(struct pp_smumgr *smumgr)
|
int smum_fini(struct pp_smumgr *smumgr)
|
||||||
{
|
{
|
||||||
|
kfree(smumgr->device);
|
||||||
kfree(smumgr);
|
kfree(smumgr);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
|
||||||
|
|
||||||
static int tonga_smu_fini(struct pp_smumgr *smumgr)
|
static int tonga_smu_fini(struct pp_smumgr *smumgr)
|
||||||
{
|
{
|
||||||
|
struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
|
||||||
|
|
||||||
|
smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
|
||||||
|
smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
|
||||||
|
|
||||||
if (smumgr->backend != NULL) {
|
if (smumgr->backend != NULL) {
|
||||||
kfree(smumgr->backend);
|
kfree(smumgr->backend);
|
||||||
smumgr->backend = NULL;
|
smumgr->backend = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,9 +42,10 @@ static const struct regmap_config fsl_dcu_regmap_config = {
|
||||||
.reg_bits = 32,
|
.reg_bits = 32,
|
||||||
.reg_stride = 4,
|
.reg_stride = 4,
|
||||||
.val_bits = 32,
|
.val_bits = 32,
|
||||||
.cache_type = REGCACHE_RBTREE,
|
.cache_type = REGCACHE_FLAT,
|
||||||
|
|
||||||
.volatile_reg = fsl_dcu_drm_is_volatile_reg,
|
.volatile_reg = fsl_dcu_drm_is_volatile_reg,
|
||||||
|
.max_register = 0x11fc,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int fsl_dcu_drm_irq_init(struct drm_device *dev)
|
static int fsl_dcu_drm_irq_init(struct drm_device *dev)
|
||||||
|
|
|
@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
}
|
}
|
||||||
|
|
||||||
adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
|
adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
|
||||||
if (!adreno_gpu->memptrs) {
|
if (IS_ERR(adreno_gpu->memptrs)) {
|
||||||
dev_err(drm->dev, "could not vmap memptrs\n");
|
dev_err(drm->dev, "could not vmap memptrs\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
||||||
dev->mode_config.fb_base = paddr;
|
dev->mode_config.fb_base = paddr;
|
||||||
|
|
||||||
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
|
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
|
||||||
|
if (IS_ERR(fbi->screen_base)) {
|
||||||
|
ret = PTR_ERR(fbi->screen_base);
|
||||||
|
goto fail_unlock;
|
||||||
|
}
|
||||||
fbi->screen_size = fbdev->bo->size;
|
fbi->screen_size = fbdev->bo->size;
|
||||||
fbi->fix.smem_start = paddr;
|
fbi->fix.smem_start = paddr;
|
||||||
fbi->fix.smem_len = fbdev->bo->size;
|
fbi->fix.smem_len = fbdev->bo->size;
|
||||||
|
|
|
@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
|
||||||
return ERR_CAST(pages);
|
return ERR_CAST(pages);
|
||||||
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
|
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
|
||||||
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
|
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
|
||||||
|
if (msm_obj->vaddr == NULL)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
return msm_obj->vaddr;
|
return msm_obj->vaddr;
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
||||||
|
|
||||||
submit->dev = dev;
|
submit->dev = dev;
|
||||||
submit->gpu = gpu;
|
submit->gpu = gpu;
|
||||||
|
submit->fence = NULL;
|
||||||
submit->pid = get_pid(task_pid(current));
|
submit->pid = get_pid(task_pid(current));
|
||||||
|
|
||||||
/* initially, until copy_from_user() and bo lookup succeeds: */
|
/* initially, until copy_from_user() and bo lookup succeeds: */
|
||||||
submit->nr_bos = 0;
|
submit->nr_bos = 0;
|
||||||
submit->nr_cmds = 0;
|
submit->nr_cmds = 0;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&submit->node);
|
||||||
INIT_LIST_HEAD(&submit->bo_list);
|
INIT_LIST_HEAD(&submit->bo_list);
|
||||||
ww_acquire_init(&submit->ticket, &reservation_ww_class);
|
ww_acquire_init(&submit->ticket, &reservation_ww_class);
|
||||||
|
|
||||||
|
@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
|
||||||
void __user *userptr =
|
void __user *userptr =
|
||||||
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
|
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
|
||||||
|
|
||||||
|
/* make sure we don't have garbage flags, in case we hit
|
||||||
|
* error path before flags is initialized:
|
||||||
|
*/
|
||||||
|
submit->bos[i].flags = 0;
|
||||||
|
|
||||||
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
|
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
|
|
|
@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
|
||||||
struct msm_gem_object *obj = submit->bos[idx].obj;
|
struct msm_gem_object *obj = submit->bos[idx].obj;
|
||||||
const char *buf = msm_gem_vaddr_locked(&obj->base);
|
const char *buf = msm_gem_vaddr_locked(&obj->base);
|
||||||
|
|
||||||
|
if (IS_ERR(buf))
|
||||||
|
continue;
|
||||||
|
|
||||||
buf += iova - submit->bos[idx].iova;
|
buf += iova - submit->bos[idx].iova;
|
||||||
|
|
||||||
rd_write_section(rd, RD_GPUADDR,
|
rd_write_section(rd, RD_GPUADDR,
|
||||||
|
|
|
@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
|
||||||
}
|
}
|
||||||
|
|
||||||
ring->start = msm_gem_vaddr_locked(ring->bo);
|
ring->start = msm_gem_vaddr_locked(ring->bo);
|
||||||
|
if (IS_ERR(ring->start)) {
|
||||||
|
ret = PTR_ERR(ring->start);
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
ring->end = ring->start + (size / 4);
|
ring->end = ring->start + (size / 4);
|
||||||
ring->cur = ring->start;
|
ring->cur = ring->start;
|
||||||
|
|
||||||
|
|
|
@ -16,9 +16,9 @@ enum nvkm_devidx {
|
||||||
NVKM_SUBDEV_MC,
|
NVKM_SUBDEV_MC,
|
||||||
NVKM_SUBDEV_BUS,
|
NVKM_SUBDEV_BUS,
|
||||||
NVKM_SUBDEV_TIMER,
|
NVKM_SUBDEV_TIMER,
|
||||||
|
NVKM_SUBDEV_INSTMEM,
|
||||||
NVKM_SUBDEV_FB,
|
NVKM_SUBDEV_FB,
|
||||||
NVKM_SUBDEV_LTC,
|
NVKM_SUBDEV_LTC,
|
||||||
NVKM_SUBDEV_INSTMEM,
|
|
||||||
NVKM_SUBDEV_MMU,
|
NVKM_SUBDEV_MMU,
|
||||||
NVKM_SUBDEV_BAR,
|
NVKM_SUBDEV_BAR,
|
||||||
NVKM_SUBDEV_PMU,
|
NVKM_SUBDEV_PMU,
|
||||||
|
|
|
@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask,
|
||||||
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
|
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
|
||||||
|
|
||||||
struct nvbios_ocfg {
|
struct nvbios_ocfg {
|
||||||
u16 match;
|
u8 proto;
|
||||||
|
u8 flags;
|
||||||
u16 clkcmp[2];
|
u16 clkcmp[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx,
|
||||||
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
|
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
|
||||||
u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
|
u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
|
||||||
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
|
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
|
||||||
u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
|
u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
|
||||||
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
|
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
|
||||||
u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
|
u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -552,6 +552,7 @@ nouveau_fbcon_init(struct drm_device *dev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fini;
|
goto fini;
|
||||||
|
|
||||||
|
fbcon->helper.fbdev->pixmap.buf_align = 4;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fini:
|
fini:
|
||||||
|
|
|
@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||||
uint32_t fg;
|
uint32_t fg;
|
||||||
uint32_t bg;
|
uint32_t bg;
|
||||||
uint32_t dsize;
|
uint32_t dsize;
|
||||||
uint32_t width;
|
|
||||||
uint32_t *data = (uint32_t *)image->data;
|
uint32_t *data = (uint32_t *)image->data;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
width = ALIGN(image->width, 8);
|
|
||||||
dsize = ALIGN(width * image->height, 32) >> 5;
|
|
||||||
|
|
||||||
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
|
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
|
||||||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
|
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
|
||||||
fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
|
fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
|
||||||
|
@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||||
((image->dx + image->width) & 0xffff));
|
((image->dx + image->width) & 0xffff));
|
||||||
OUT_RING(chan, bg);
|
OUT_RING(chan, bg);
|
||||||
OUT_RING(chan, fg);
|
OUT_RING(chan, fg);
|
||||||
OUT_RING(chan, (image->height << 16) | width);
|
OUT_RING(chan, (image->height << 16) | image->width);
|
||||||
OUT_RING(chan, (image->height << 16) | image->width);
|
OUT_RING(chan, (image->height << 16) | image->width);
|
||||||
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
|
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
|
||||||
|
|
||||||
|
dsize = ALIGN(image->width * image->height, 32) >> 5;
|
||||||
while (dsize) {
|
while (dsize) {
|
||||||
int iter_len = dsize > 128 ? 128 : dsize;
|
int iter_len = dsize > 128 ? 128 : dsize;
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||||
struct nouveau_fbdev *nfbdev = info->par;
|
struct nouveau_fbdev *nfbdev = info->par;
|
||||||
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
|
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
|
||||||
struct nouveau_channel *chan = drm->channel;
|
struct nouveau_channel *chan = drm->channel;
|
||||||
uint32_t width, dwords, *data = (uint32_t *)image->data;
|
uint32_t dwords, *data = (uint32_t *)image->data;
|
||||||
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
|
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
|
||||||
uint32_t *palette = info->pseudo_palette;
|
uint32_t *palette = info->pseudo_palette;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
width = ALIGN(image->width, 32);
|
|
||||||
dwords = (width * image->height) >> 5;
|
|
||||||
|
|
||||||
BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
|
BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
|
||||||
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
|
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
|
||||||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
|
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
|
||||||
|
@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||||
OUT_RING(chan, 0);
|
OUT_RING(chan, 0);
|
||||||
OUT_RING(chan, image->dy);
|
OUT_RING(chan, image->dy);
|
||||||
|
|
||||||
|
dwords = ALIGN(image->width * image->height, 32) >> 5;
|
||||||
while (dwords) {
|
while (dwords) {
|
||||||
int push = dwords > 2047 ? 2047 : dwords;
|
int push = dwords > 2047 ? 2047 : dwords;
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||||
struct nouveau_fbdev *nfbdev = info->par;
|
struct nouveau_fbdev *nfbdev = info->par;
|
||||||
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
|
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
|
||||||
struct nouveau_channel *chan = drm->channel;
|
struct nouveau_channel *chan = drm->channel;
|
||||||
uint32_t width, dwords, *data = (uint32_t *)image->data;
|
uint32_t dwords, *data = (uint32_t *)image->data;
|
||||||
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
|
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
|
||||||
uint32_t *palette = info->pseudo_palette;
|
uint32_t *palette = info->pseudo_palette;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
width = ALIGN(image->width, 32);
|
|
||||||
dwords = (width * image->height) >> 5;
|
|
||||||
|
|
||||||
BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
|
BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
|
||||||
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
|
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
|
||||||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
|
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
|
||||||
|
@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||||
OUT_RING (chan, 0);
|
OUT_RING (chan, 0);
|
||||||
OUT_RING (chan, image->dy);
|
OUT_RING (chan, image->dy);
|
||||||
|
|
||||||
|
dwords = ALIGN(image->width * image->height, 32) >> 5;
|
||||||
while (dwords) {
|
while (dwords) {
|
||||||
int push = dwords > 2047 ? 2047 : dwords;
|
int push = dwords > 2047 ? 2047 : dwords;
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o
|
||||||
nvkm-y += nvkm/engine/disp/sornv50.o
|
nvkm-y += nvkm/engine/disp/sornv50.o
|
||||||
nvkm-y += nvkm/engine/disp/sorg94.o
|
nvkm-y += nvkm/engine/disp/sorg94.o
|
||||||
nvkm-y += nvkm/engine/disp/sorgf119.o
|
nvkm-y += nvkm/engine/disp/sorgf119.o
|
||||||
|
nvkm-y += nvkm/engine/disp/sorgm107.o
|
||||||
nvkm-y += nvkm/engine/disp/sorgm200.o
|
nvkm-y += nvkm/engine/disp/sorgm200.o
|
||||||
nvkm-y += nvkm/engine/disp/dport.o
|
nvkm-y += nvkm/engine/disp/dport.o
|
||||||
|
|
||||||
|
|
|
@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
|
||||||
mask |= 0x0001 << or;
|
mask |= 0x0001 << or;
|
||||||
mask |= 0x0100 << head;
|
mask |= 0x0100 << head;
|
||||||
|
|
||||||
|
|
||||||
list_for_each_entry(outp, &disp->base.outp, head) {
|
list_for_each_entry(outp, &disp->base.outp, head) {
|
||||||
if ((outp->info.hasht & 0xff) == type &&
|
if ((outp->info.hasht & 0xff) == type &&
|
||||||
(outp->info.hashm & mask) == mask) {
|
(outp->info.hashm & mask) == mask) {
|
||||||
|
@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
|
||||||
if (!outp)
|
if (!outp)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
*conf = (ctrl & 0x00000f00) >> 8;
|
||||||
switch (outp->info.type) {
|
switch (outp->info.type) {
|
||||||
case DCB_OUTPUT_TMDS:
|
case DCB_OUTPUT_TMDS:
|
||||||
*conf = (ctrl & 0x00000f00) >> 8;
|
|
||||||
if (*conf == 5)
|
if (*conf == 5)
|
||||||
*conf |= 0x0100;
|
*conf |= 0x0100;
|
||||||
break;
|
break;
|
||||||
case DCB_OUTPUT_LVDS:
|
case DCB_OUTPUT_LVDS:
|
||||||
*conf = disp->sor.lvdsconf;
|
*conf |= disp->sor.lvdsconf;
|
||||||
break;
|
break;
|
||||||
case DCB_OUTPUT_DP:
|
|
||||||
*conf = (ctrl & 0x00000f00) >> 8;
|
|
||||||
break;
|
|
||||||
case DCB_OUTPUT_ANALOG:
|
|
||||||
default:
|
default:
|
||||||
*conf = 0x00ff;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
|
data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
|
||||||
|
&ver, &hdr, &cnt, &len, &info2);
|
||||||
if (data && id < 0xff) {
|
if (data && id < 0xff) {
|
||||||
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
|
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
|
||||||
if (data) {
|
if (data) {
|
||||||
|
|
|
@ -36,7 +36,7 @@ gm107_disp = {
|
||||||
.outp.internal.crt = nv50_dac_output_new,
|
.outp.internal.crt = nv50_dac_output_new,
|
||||||
.outp.internal.tmds = nv50_sor_output_new,
|
.outp.internal.tmds = nv50_sor_output_new,
|
||||||
.outp.internal.lvds = nv50_sor_output_new,
|
.outp.internal.lvds = nv50_sor_output_new,
|
||||||
.outp.internal.dp = gf119_sor_dp_new,
|
.outp.internal.dp = gm107_sor_dp_new,
|
||||||
.dac.nr = 3,
|
.dac.nr = 3,
|
||||||
.dac.power = nv50_dac_power,
|
.dac.power = nv50_dac_power,
|
||||||
.dac.sense = nv50_dac_sense,
|
.dac.sense = nv50_dac_sense,
|
||||||
|
|
|
@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
|
||||||
if (!outp)
|
if (!outp)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
*conf = (ctrl & 0x00000f00) >> 8;
|
||||||
if (outp->info.location == 0) {
|
if (outp->info.location == 0) {
|
||||||
switch (outp->info.type) {
|
switch (outp->info.type) {
|
||||||
case DCB_OUTPUT_TMDS:
|
case DCB_OUTPUT_TMDS:
|
||||||
*conf = (ctrl & 0x00000f00) >> 8;
|
|
||||||
if (*conf == 5)
|
if (*conf == 5)
|
||||||
*conf |= 0x0100;
|
*conf |= 0x0100;
|
||||||
break;
|
break;
|
||||||
case DCB_OUTPUT_LVDS:
|
case DCB_OUTPUT_LVDS:
|
||||||
*conf = disp->sor.lvdsconf;
|
*conf |= disp->sor.lvdsconf;
|
||||||
break;
|
break;
|
||||||
case DCB_OUTPUT_DP:
|
|
||||||
*conf = (ctrl & 0x00000f00) >> 8;
|
|
||||||
break;
|
|
||||||
case DCB_OUTPUT_ANALOG:
|
|
||||||
default:
|
default:
|
||||||
*conf = 0x00ff;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
|
||||||
pclk = pclk / 2;
|
pclk = pclk / 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
|
data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
|
||||||
|
&ver, &hdr, &cnt, &len, &info2);
|
||||||
if (data && id < 0xff) {
|
if (data && id < 0xff) {
|
||||||
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
|
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
|
||||||
if (data) {
|
if (data) {
|
||||||
|
|
|
@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
|
||||||
int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
|
int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
|
||||||
struct nvkm_output **);
|
struct nvkm_output **);
|
||||||
int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
|
int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
|
||||||
|
int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
|
||||||
|
|
||||||
int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
|
int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
|
||||||
struct nvkm_output **);
|
struct nvkm_output **);
|
||||||
|
int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
|
||||||
|
|
||||||
|
int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
|
||||||
|
struct nvkm_output **);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -40,8 +40,7 @@ static int
|
||||||
gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
|
gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
|
||||||
{
|
{
|
||||||
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
|
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
|
||||||
const u32 loff = gf119_sor_loff(outp);
|
nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
|
||||||
nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +63,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
int
|
||||||
gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
|
gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
|
||||||
int ln, int vs, int pe, int pc)
|
int ln, int vs, int pe, int pc)
|
||||||
{
|
{
|
||||||
|
|
53
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
Normal file
53
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2016 Red Hat Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||||
|
*/
|
||||||
|
#include "nv50.h"
|
||||||
|
#include "outpdp.h"
|
||||||
|
|
||||||
|
int
|
||||||
|
gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
|
||||||
|
{
|
||||||
|
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
|
||||||
|
const u32 soff = outp->base.or * 0x800;
|
||||||
|
const u32 data = 0x01010101 * pattern;
|
||||||
|
if (outp->base.info.sorconf.link & 1)
|
||||||
|
nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
|
||||||
|
else
|
||||||
|
nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct nvkm_output_dp_func
|
||||||
|
gm107_sor_dp_func = {
|
||||||
|
.pattern = gm107_sor_dp_pattern,
|
||||||
|
.lnk_pwr = g94_sor_dp_lnk_pwr,
|
||||||
|
.lnk_ctl = gf119_sor_dp_lnk_ctl,
|
||||||
|
.drv_ctl = gf119_sor_dp_drv_ctl,
|
||||||
|
};
|
||||||
|
|
||||||
|
int
|
||||||
|
gm107_sor_dp_new(struct nvkm_disp *disp, int index,
|
||||||
|
struct dcb_output *dcbE, struct nvkm_output **poutp)
|
||||||
|
{
|
||||||
|
return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
|
||||||
|
}
|
|
@ -56,19 +56,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
|
||||||
return lane * 0x08;
|
return lane * 0x08;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
|
|
||||||
{
|
|
||||||
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
|
|
||||||
const u32 soff = gm200_sor_soff(outp);
|
|
||||||
const u32 data = 0x01010101 * pattern;
|
|
||||||
if (outp->base.info.sorconf.link & 1)
|
|
||||||
nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
|
|
||||||
else
|
|
||||||
nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
|
gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
|
||||||
{
|
{
|
||||||
|
@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
|
||||||
|
|
||||||
static const struct nvkm_output_dp_func
|
static const struct nvkm_output_dp_func
|
||||||
gm200_sor_dp_func = {
|
gm200_sor_dp_func = {
|
||||||
.pattern = gm200_sor_dp_pattern,
|
.pattern = gm107_sor_dp_pattern,
|
||||||
.lnk_pwr = gm200_sor_dp_lnk_pwr,
|
.lnk_pwr = gm200_sor_dp_lnk_pwr,
|
||||||
.lnk_ctl = gf119_sor_dp_lnk_ctl,
|
.lnk_ctl = gf119_sor_dp_lnk_ctl,
|
||||||
.drv_ctl = gm200_sor_dp_drv_ctl,
|
.drv_ctl = gm200_sor_dp_drv_ctl,
|
||||||
|
|
|
@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct nvkm_enum gf100_mp_warp_error[] = {
|
static const struct nvkm_enum gf100_mp_warp_error[] = {
|
||||||
{ 0x00, "NO_ERROR" },
|
{ 0x01, "STACK_ERROR" },
|
||||||
{ 0x01, "STACK_MISMATCH" },
|
{ 0x02, "API_STACK_ERROR" },
|
||||||
|
{ 0x03, "RET_EMPTY_STACK_ERROR" },
|
||||||
|
{ 0x04, "PC_WRAP" },
|
||||||
{ 0x05, "MISALIGNED_PC" },
|
{ 0x05, "MISALIGNED_PC" },
|
||||||
{ 0x08, "MISALIGNED_GPR" },
|
{ 0x06, "PC_OVERFLOW" },
|
||||||
{ 0x09, "INVALID_OPCODE" },
|
{ 0x07, "MISALIGNED_IMMC_ADDR" },
|
||||||
{ 0x0d, "GPR_OUT_OF_BOUNDS" },
|
{ 0x08, "MISALIGNED_REG" },
|
||||||
{ 0x0e, "MEM_OUT_OF_BOUNDS" },
|
{ 0x09, "ILLEGAL_INSTR_ENCODING" },
|
||||||
{ 0x0f, "UNALIGNED_MEM_ACCESS" },
|
{ 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
|
||||||
|
{ 0x0b, "ILLEGAL_INSTR_PARAM" },
|
||||||
|
{ 0x0c, "INVALID_CONST_ADDR" },
|
||||||
|
{ 0x0d, "OOR_REG" },
|
||||||
|
{ 0x0e, "OOR_ADDR" },
|
||||||
|
{ 0x0f, "MISALIGNED_ADDR" },
|
||||||
{ 0x10, "INVALID_ADDR_SPACE" },
|
{ 0x10, "INVALID_ADDR_SPACE" },
|
||||||
{ 0x11, "INVALID_PARAM" },
|
{ 0x11, "ILLEGAL_INSTR_PARAM2" },
|
||||||
|
{ 0x12, "INVALID_CONST_ADDR_LDC" },
|
||||||
|
{ 0x13, "GEOMETRY_SM_ERROR" },
|
||||||
|
{ 0x14, "DIVERGENT" },
|
||||||
|
{ 0x15, "WARP_EXIT" },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct nvkm_bitfield gf100_mp_global_error[] = {
|
static const struct nvkm_bitfield gf100_mp_global_error[] = {
|
||||||
|
{ 0x00000001, "SM_TO_SM_FAULT" },
|
||||||
|
{ 0x00000002, "L1_ERROR" },
|
||||||
{ 0x00000004, "MULTIPLE_WARP_ERRORS" },
|
{ 0x00000004, "MULTIPLE_WARP_ERRORS" },
|
||||||
{ 0x00000008, "OUT_OF_STACK_SPACE" },
|
{ 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
|
||||||
|
{ 0x00000010, "BPT_INT" },
|
||||||
|
{ 0x00000020, "BPT_PAUSE" },
|
||||||
|
{ 0x00000040, "SINGLE_STEP_COMPLETE" },
|
||||||
|
{ 0x20000000, "ECC_SEC_ERROR" },
|
||||||
|
{ 0x40000000, "ECC_DED_ERROR" },
|
||||||
|
{ 0x80000000, "TIMEOUT" },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
|
||||||
{
|
{
|
||||||
u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
|
u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
|
||||||
if (data) {
|
if (data) {
|
||||||
info->match = nvbios_rd16(bios, data + 0x00);
|
info->proto = nvbios_rd08(bios, data + 0x00);
|
||||||
|
info->flags = nvbios_rd16(bios, data + 0x01);
|
||||||
info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
|
info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
|
||||||
info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
|
info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
|
||||||
}
|
}
|
||||||
|
@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
|
||||||
}
|
}
|
||||||
|
|
||||||
u16
|
u16
|
||||||
nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
|
nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
|
||||||
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
|
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
|
||||||
{
|
{
|
||||||
u16 data, idx = 0;
|
u16 data, idx = 0;
|
||||||
while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
|
while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
|
||||||
if (info->match == type)
|
if ((info->proto == proto || info->proto == 0xff) &&
|
||||||
|
(info->flags == flags))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return data;
|
return data;
|
||||||
|
|
|
@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
|
gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
|
||||||
{
|
{
|
||||||
struct nvkm_subdev *subdev = <c->subdev;
|
struct nvkm_subdev *subdev = <c->subdev;
|
||||||
struct nvkm_device *device = subdev->device;
|
struct nvkm_device *device = subdev->device;
|
||||||
u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
|
u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
|
||||||
u32 stat = nvkm_rd32(device, base + 0x00c);
|
u32 stat = nvkm_rd32(device, base + 0x00c);
|
||||||
|
|
||||||
if (stat) {
|
if (stat) {
|
||||||
|
@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc)
|
||||||
while (mask) {
|
while (mask) {
|
||||||
u32 s, c = __ffs(mask);
|
u32 s, c = __ffs(mask);
|
||||||
for (s = 0; s < ltc->lts_nr; s++)
|
for (s = 0; s < ltc->lts_nr; s++)
|
||||||
gm107_ltc_lts_isr(ltc, c, s);
|
gm107_ltc_intr_lts(ltc, c, s);
|
||||||
mask &= ~(1 << c);
|
mask &= ~(1 << c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,7 @@ static const struct nvkm_ltc_func
|
||||||
gm200_ltc = {
|
gm200_ltc = {
|
||||||
.oneinit = gm200_ltc_oneinit,
|
.oneinit = gm200_ltc_oneinit,
|
||||||
.init = gm200_ltc_init,
|
.init = gm200_ltc_init,
|
||||||
.intr = gm107_ltc_intr, /*XXX: not validated */
|
.intr = gm107_ltc_intr,
|
||||||
.cbc_clear = gm107_ltc_cbc_clear,
|
.cbc_clear = gm107_ltc_cbc_clear,
|
||||||
.cbc_wait = gm107_ltc_cbc_wait,
|
.cbc_wait = gm107_ltc_cbc_wait,
|
||||||
.zbc = 16,
|
.zbc = 16,
|
||||||
|
|
|
@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
|
||||||
{
|
{
|
||||||
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
|
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
|
||||||
struct regulator *vdds_dsi;
|
struct regulator *vdds_dsi;
|
||||||
int r;
|
|
||||||
|
|
||||||
if (dsi->vdds_dsi_reg != NULL)
|
if (dsi->vdds_dsi_reg != NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -120,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
|
||||||
|
|
||||||
static int hdmi_init_regulator(void)
|
static int hdmi_init_regulator(void)
|
||||||
{
|
{
|
||||||
int r;
|
|
||||||
struct regulator *reg;
|
struct regulator *reg;
|
||||||
|
|
||||||
if (hdmi.vdda_reg != NULL)
|
if (hdmi.vdda_reg != NULL)
|
||||||
|
|
|
@ -456,14 +456,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||||
|
|
||||||
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
|
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
|
||||||
|
|
||||||
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
|
|
||||||
vc4_state->mm.start);
|
|
||||||
|
|
||||||
if (debug_dump_regs) {
|
|
||||||
DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
|
|
||||||
vc4_hvs_dump_state(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (crtc->state->event) {
|
if (crtc->state->event) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -473,8 +465,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||||
|
|
||||||
spin_lock_irqsave(&dev->event_lock, flags);
|
spin_lock_irqsave(&dev->event_lock, flags);
|
||||||
vc4_crtc->event = crtc->state->event;
|
vc4_crtc->event = crtc->state->event;
|
||||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
|
||||||
crtc->state->event = NULL;
|
crtc->state->event = NULL;
|
||||||
|
|
||||||
|
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
|
||||||
|
vc4_state->mm.start);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
|
} else {
|
||||||
|
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
|
||||||
|
vc4_state->mm.start);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (debug_dump_regs) {
|
||||||
|
DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
|
||||||
|
vc4_hvs_dump_state(dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -500,12 +504,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
|
||||||
{
|
{
|
||||||
struct drm_crtc *crtc = &vc4_crtc->base;
|
struct drm_crtc *crtc = &vc4_crtc->base;
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||||
|
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
|
||||||
|
u32 chan = vc4_crtc->channel;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&dev->event_lock, flags);
|
spin_lock_irqsave(&dev->event_lock, flags);
|
||||||
if (vc4_crtc->event) {
|
if (vc4_crtc->event &&
|
||||||
|
(vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
|
||||||
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
|
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
|
||||||
vc4_crtc->event = NULL;
|
vc4_crtc->event = NULL;
|
||||||
|
drm_crtc_vblank_put(crtc);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -556,6 +565,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
|
||||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drm_crtc_vblank_put(crtc);
|
||||||
drm_framebuffer_unreference(flip_state->fb);
|
drm_framebuffer_unreference(flip_state->fb);
|
||||||
kfree(flip_state);
|
kfree(flip_state);
|
||||||
|
|
||||||
|
@ -598,6 +608,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
|
||||||
|
|
||||||
/* Immediately update the plane's legacy fb pointer, so that later
|
/* Immediately update the plane's legacy fb pointer, so that later
|
||||||
* modeset prep sees the state that will be present when the semaphore
|
* modeset prep sees the state that will be present when the semaphore
|
||||||
* is released.
|
* is released.
|
||||||
|
|
|
@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
|
static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
|
||||||
DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
|
DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
|
||||||
DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
|
DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW),
|
||||||
DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
|
DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW),
|
||||||
DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
|
DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW),
|
||||||
DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
|
DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW),
|
||||||
DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
|
DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
|
||||||
DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
|
DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
|
||||||
DRM_ROOT_ONLY),
|
DRM_ROOT_ONLY),
|
||||||
};
|
};
|
||||||
|
@ -91,7 +91,7 @@ static struct drm_driver vc4_drm_driver = {
|
||||||
|
|
||||||
.enable_vblank = vc4_enable_vblank,
|
.enable_vblank = vc4_enable_vblank,
|
||||||
.disable_vblank = vc4_disable_vblank,
|
.disable_vblank = vc4_disable_vblank,
|
||||||
.get_vblank_counter = drm_vblank_count,
|
.get_vblank_counter = drm_vblank_no_hw_counter,
|
||||||
|
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
.debugfs_init = vc4_debugfs_init,
|
.debugfs_init = vc4_debugfs_init,
|
||||||
|
|
|
@ -117,10 +117,18 @@ static int vc4_atomic_commit(struct drm_device *dev,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Make sure that any outstanding modesets have finished. */
|
/* Make sure that any outstanding modesets have finished. */
|
||||||
ret = down_interruptible(&vc4->async_modeset);
|
if (nonblock) {
|
||||||
if (ret) {
|
ret = down_trylock(&vc4->async_modeset);
|
||||||
kfree(c);
|
if (ret) {
|
||||||
return ret;
|
kfree(c);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ret = down_interruptible(&vc4->async_modeset);
|
||||||
|
if (ret) {
|
||||||
|
kfree(c);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = drm_atomic_helper_prepare_planes(dev, state);
|
ret = drm_atomic_helper_prepare_planes(dev, state);
|
||||||
|
|
|
@ -341,6 +341,10 @@
|
||||||
#define SCALER_DISPLACT0 0x00000030
|
#define SCALER_DISPLACT0 0x00000030
|
||||||
#define SCALER_DISPLACT1 0x00000034
|
#define SCALER_DISPLACT1 0x00000034
|
||||||
#define SCALER_DISPLACT2 0x00000038
|
#define SCALER_DISPLACT2 0x00000038
|
||||||
|
#define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \
|
||||||
|
(x) * (SCALER_DISPLACT1 - \
|
||||||
|
SCALER_DISPLACT0))
|
||||||
|
|
||||||
#define SCALER_DISPCTRL0 0x00000040
|
#define SCALER_DISPCTRL0 0x00000040
|
||||||
# define SCALER_DISPCTRLX_ENABLE BIT(31)
|
# define SCALER_DISPCTRLX_ENABLE BIT(31)
|
||||||
# define SCALER_DISPCTRLX_RESET BIT(30)
|
# define SCALER_DISPCTRLX_RESET BIT(30)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue