Merge tag 'drm-intel-next-2017-07-17' of git://anongit.freedesktop.org/git/drm-intel into drm-next

2nd round of 4.14 features:

- prep for deferred fbdev setup
- refactor fixed 16.16 computations and skl+ wm code (Mahesh Kumar)
- more cnl paches (Rodrigo, Imre et al)
- tighten context cleanup and handling (Chris Wilson)
- fix interlaced handling on skl+ (Mahesh Kumar)
- small bits as usual

* tag 'drm-intel-next-2017-07-17' of git://anongit.freedesktop.org/git/drm-intel: (84 commits)
  drm/i915: Update DRIVER_DATE to 20170717
  drm/i915: Protect against deferred fbdev setup
  drm/i915/fbdev: Always forward hotplug events
  drm/i915/skl+: unify cpp value in WM calculation
  drm/i915/skl+: WM calculation don't require height
  drm/i915: Addition wrapper for fixed16.16 operation
  drm/i915: cleanup fixed-point wrappers naming
  drm/i915: Always perform internal fixed16 division in 64 bits
  drm/i915: take-out common clamping code of fixed16 wrappers
  drm/i915/cnl: Add missing type case.
  drm/i915/cnl: Add max allowed Cannonlake DC.
  drm/i915: Make DP-MST connector info work
  drm/i915/cnl: Get DDI clock based on PLLs.
  drm/i915/cnl: Inherit RPS stuff from previous platforms.
  drm/i915/cnl: Gen10 render context size.
  drm/i915/cnl: Don't trust VBT's alternate pin for port D for now.
  drm/i915: Fix the kernel panic when using aliasing ppgtt
  drm/i915/cnl: Cannonlake color init.
  drm/i915/cnl: Add force wake for gen10+.
  x86/gpu: CNL uses the same GMS values as SKL
  ...
This commit is contained in:
Dave Airlie 2017-07-20 11:31:43 +10:00
commit 2d62c799f8
49 changed files with 1521 additions and 638 deletions

View file

@ -527,6 +527,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_BXT_IDS(&gen9_early_ops), INTEL_BXT_IDS(&gen9_early_ops),
INTEL_KBL_IDS(&gen9_early_ops), INTEL_KBL_IDS(&gen9_early_ops),
INTEL_GLK_IDS(&gen9_early_ops), INTEL_GLK_IDS(&gen9_early_ops),
INTEL_CNL_IDS(&gen9_early_ops),
}; };
static void __init static void __init

View file

@ -21,6 +21,7 @@ config DRM_I915
select ACPI_BUTTON if ACPI select ACPI_BUTTON if ACPI
select SYNC_FILE select SYNC_FILE
select IOSF_MBI select IOSF_MBI
select CRC32
help help
Choose this option if you have a system that has "Intel Graphics Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics, Media Accelerator" or "HD Graphics" integrated graphics,

View file

@ -616,7 +616,7 @@ err:
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{ {
i915_gem_context_put_unlocked(vgpu->shadow_ctx); i915_gem_context_put(vgpu->shadow_ctx);
} }
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)

View file

@ -1159,7 +1159,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ); reqf = I915_READ(GEN6_RPNSWREQ);
if (IS_GEN9(dev_priv)) if (INTEL_GEN(dev_priv) >= 9)
reqf >>= 23; reqf >>= 23;
else { else {
reqf &= ~GEN6_TURBO_DISABLE; reqf &= ~GEN6_TURBO_DISABLE;
@ -1181,7 +1181,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
if (IS_GEN9(dev_priv)) if (INTEL_GEN(dev_priv) >= 9)
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
@ -1210,7 +1210,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
dev_priv->rps.pm_intrmsk_mbz); dev_priv->rps.pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n", seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8); (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n", seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff); gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n", seq_printf(m, "Render p-state limit: %d\n",
@ -1241,18 +1241,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff; rp_state_cap >> 16) & 0xff;
max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1); max_freq *= (IS_GEN9_BC(dev_priv) ||
IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq)); intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8; max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1); max_freq *= (IS_GEN9_BC(dev_priv) ||
IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq)); intel_gpu_freq(dev_priv, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff; rp_state_cap >> 0) & 0xff;
max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1); max_freq *= (IS_GEN9_BC(dev_priv) ||
IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq)); intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n", seq_printf(m, "Max overclocked frequency: %dMHz\n",
@ -1407,6 +1410,23 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0; return 0;
} }
static int i915_reset_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct i915_gpu_error *error = &dev_priv->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s = %u\n", engine->name,
i915_reset_engine_count(error, engine));
}
return 0;
}
static int ironlake_drpc_info(struct seq_file *m) static int ironlake_drpc_info(struct seq_file *m)
{ {
struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_i915_private *dev_priv = node_to_i915(m->private);
@ -1838,7 +1858,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret) if (ret)
goto out; goto out;
if (IS_GEN9_BC(dev_priv)) { if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */ /* Convert GT frequency to 50 HZ units */
min_gpu_freq = min_gpu_freq =
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@ -1858,7 +1878,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
&ia_freq); &ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq * intel_gpu_freq(dev_priv, (gpu_freq *
(IS_GEN9_BC(dev_priv) ? (IS_GEN9_BC(dev_priv) ||
IS_CANNONLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1))), GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100, ((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100); ((ia_freq >> 8) & 0xff) * 100);
@ -1914,7 +1935,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return ret; return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION #ifdef CONFIG_DRM_FBDEV_EMULATION
if (dev_priv->fbdev) { if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
@ -1970,7 +1991,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret) if (ret)
return ret; return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) { list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
seq_printf(m, "HW context %u ", ctx->hw_id); seq_printf(m, "HW context %u ", ctx->hw_id);
if (ctx->pid) { if (ctx->pid) {
struct task_struct *task; struct task_struct *task;
@ -2076,7 +2097,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
if (ret) if (ret)
return ret; return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) list_for_each_entry(ctx, &dev_priv->contexts.list, link)
for_each_engine(engine, dev_priv, id) for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine); i915_dump_lrc_obj(m, ctx, engine);
@ -2310,6 +2331,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "GPU busy? %s [%d requests]\n", seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&dev_priv->rps.num_waiters));
seq_printf(m, "Frequency requested %d\n", seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
@ -2323,22 +2346,20 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq)); intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
mutex_lock(&dev->filelist_mutex); mutex_lock(&dev->filelist_mutex);
spin_lock(&dev_priv->rps.client_lock);
list_for_each_entry_reverse(file, &dev->filelist, lhead) { list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
struct task_struct *task; struct task_struct *task;
rcu_read_lock(); rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID); task = pid_task(file->pid, PIDTYPE_PID);
seq_printf(m, "%s [%d]: %d boosts%s\n", seq_printf(m, "%s [%d]: %d boosts\n",
task ? task->comm : "<unknown>", task ? task->comm : "<unknown>",
task ? task->pid : -1, task ? task->pid : -1,
file_priv->rps.boosts, atomic_read(&file_priv->rps.boosts));
list_empty(&file_priv->rps.link) ? "" : ", active");
rcu_read_unlock(); rcu_read_unlock();
} }
seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts); seq_printf(m, "Kernel (anonymous) boosts: %d\n",
spin_unlock(&dev_priv->rps.client_lock); atomic_read(&dev_priv->rps.boosts));
mutex_unlock(&dev->filelist_mutex); mutex_unlock(&dev->filelist_mutex);
if (INTEL_GEN(dev_priv) >= 6 && if (INTEL_GEN(dev_priv) >= 6 &&
@ -3289,6 +3310,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
static int i915_engine_info(struct seq_file *m, void *unused) static int i915_engine_info(struct seq_file *m, void *unused)
{ {
struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct i915_gpu_error *error = &dev_priv->gpu_error;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
@ -3312,6 +3334,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
engine->hangcheck.seqno, engine->hangcheck.seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp), jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
engine->timeline->inflight_seqnos); engine->timeline->inflight_seqnos);
seq_printf(m, "\tReset count: %d\n",
i915_reset_engine_count(error, engine));
rcu_read_lock(); rcu_read_lock();
@ -3758,13 +3782,18 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
drm_connector_list_iter_begin(dev, &conn_iter); drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) { drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_encoder *encoder;
if (connector->connector_type != if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort) DRM_MODE_CONNECTOR_DisplayPort)
continue; continue;
if (connector->status == connector_status_connected && encoder = to_intel_encoder(connector->encoder);
connector->encoder != NULL) { if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
intel_dp = enc_to_intel_dp(connector->encoder); continue;
if (encoder && connector->status == connector_status_connected) {
intel_dp = enc_to_intel_dp(&encoder->base);
status = kstrtoint(input_buffer, 10, &val); status = kstrtoint(input_buffer, 10, &val);
if (status < 0) if (status < 0)
break; break;
@ -3796,13 +3825,18 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
drm_connector_list_iter_begin(dev, &conn_iter); drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) { drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_encoder *encoder;
if (connector->connector_type != if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort) DRM_MODE_CONNECTOR_DisplayPort)
continue; continue;
if (connector->status == connector_status_connected && encoder = to_intel_encoder(connector->encoder);
connector->encoder != NULL) { if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
intel_dp = enc_to_intel_dp(connector->encoder); continue;
if (encoder && connector->status == connector_status_connected) {
intel_dp = enc_to_intel_dp(&encoder->base);
if (intel_dp->compliance.test_active) if (intel_dp->compliance.test_active)
seq_puts(m, "1"); seq_puts(m, "1");
else else
@ -3842,13 +3876,18 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
drm_connector_list_iter_begin(dev, &conn_iter); drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) { drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_encoder *encoder;
if (connector->connector_type != if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort) DRM_MODE_CONNECTOR_DisplayPort)
continue; continue;
if (connector->status == connector_status_connected && encoder = to_intel_encoder(connector->encoder);
connector->encoder != NULL) { if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
intel_dp = enc_to_intel_dp(connector->encoder); continue;
if (encoder && connector->status == connector_status_connected) {
intel_dp = enc_to_intel_dp(&encoder->base);
if (intel_dp->compliance.test_type == if (intel_dp->compliance.test_type ==
DP_TEST_LINK_EDID_READ) DP_TEST_LINK_EDID_READ)
seq_printf(m, "%lx", seq_printf(m, "%lx",
@ -3895,13 +3934,18 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
drm_connector_list_iter_begin(dev, &conn_iter); drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) { drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_encoder *encoder;
if (connector->connector_type != if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort) DRM_MODE_CONNECTOR_DisplayPort)
continue; continue;
if (connector->status == connector_status_connected && encoder = to_intel_encoder(connector->encoder);
connector->encoder != NULL) { if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
intel_dp = enc_to_intel_dp(connector->encoder); continue;
if (encoder && connector->status == connector_status_connected) {
intel_dp = enc_to_intel_dp(&encoder->base);
seq_printf(m, "%02lx", intel_dp->compliance.test_type); seq_printf(m, "%02lx", intel_dp->compliance.test_type);
} else } else
seq_puts(m, "0"); seq_puts(m, "0");
@ -4824,6 +4868,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_huc_load_status", i915_huc_load_status_info, 0}, {"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0}, {"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0}, {"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_reset_info", i915_reset_info, 0},
{"i915_drpc_info", i915_drpc_info, 0}, {"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0}, {"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0}, {"i915_ring_freq_table", i915_ring_freq_table, 0},

View file

@ -132,9 +132,13 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
} else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) { } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
ret = PCH_CPT; ret = PCH_CPT;
DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); DRM_DEBUG_KMS("Assuming CougarPoint PCH\n");
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
ret = PCH_LPT; ret = PCH_LPT;
if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
else
dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = PCH_SPT; ret = PCH_SPT;
@ -173,29 +177,25 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) { if (pch->vendor == PCI_VENDOR_ID_INTEL) {
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
unsigned short id_ext = pch->device &
INTEL_PCH_DEVICE_ID_MASK_EXT; dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_IBX; dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev_priv)); WARN_ON(!IS_GEN5(dev_priv));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CPT; dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n"); DRM_DEBUG_KMS("Found CougarPoint PCH\n");
WARN_ON(!(IS_GEN6(dev_priv) || WARN_ON(!IS_GEN6(dev_priv) &&
IS_IVYBRIDGE(dev_priv))); !IS_IVYBRIDGE(dev_priv));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */ /* PantherPoint is CPT compatible */
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CPT; dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PantherPoint PCH\n"); DRM_DEBUG_KMS("Found PantherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev_priv) || WARN_ON(!IS_GEN6(dev_priv) &&
IS_IVYBRIDGE(dev_priv))); !IS_IVYBRIDGE(dev_priv));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_LPT; dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n"); DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) && WARN_ON(!IS_HASWELL(dev_priv) &&
@ -203,51 +203,60 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
WARN_ON(IS_HSW_ULT(dev_priv) || WARN_ON(IS_HSW_ULT(dev_priv) ||
IS_BDW_ULT(dev_priv)); IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_LPT; dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) && WARN_ON(!IS_HASWELL(dev_priv) &&
!IS_BROADWELL(dev_priv)); !IS_BROADWELL(dev_priv));
WARN_ON(!IS_HSW_ULT(dev_priv) && WARN_ON(!IS_HSW_ULT(dev_priv) &&
!IS_BDW_ULT(dev_priv)); !IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
/* WildcatPoint is LPT compatible */
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) &&
!IS_BROADWELL(dev_priv));
WARN_ON(IS_HSW_ULT(dev_priv) ||
IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
/* WildcatPoint is LPT compatible */
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) &&
!IS_BROADWELL(dev_priv));
WARN_ON(!IS_HSW_ULT(dev_priv) &&
!IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_SPT; dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) && WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv)); !IS_KABYLAKE(dev_priv));
} else if (id_ext == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_id = id_ext;
dev_priv->pch_type = PCH_SPT; dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) && WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv)); !IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_KBP; dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found KabyPoint PCH\n"); DRM_DEBUG_KMS("Found KabyPoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) && WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv)); !IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CNP; dev_priv->pch_type = PCH_CNP;
DRM_DEBUG_KMS("Found CannonPoint PCH\n"); DRM_DEBUG_KMS("Found CannonPoint PCH\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) && WARN_ON(!IS_CANNONLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv)); !IS_COFFEELAKE(dev_priv));
} else if (id_ext == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
dev_priv->pch_id = id_ext;
dev_priv->pch_type = PCH_CNP; dev_priv->pch_type = PCH_CNP;
DRM_DEBUG_KMS("Found CannonPoint LP PCH\n"); DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) && WARN_ON(!IS_CANNONLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv)); !IS_COFFEELAKE(dev_priv));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
pch->subsystem_vendor == pch->subsystem_vendor ==
PCI_SUBVENDOR_ID_REDHAT_QUMRANET && PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pch->subsystem_device == pch->subsystem_device ==
PCI_SUBDEVICE_ID_QEMU)) { PCI_SUBDEVICE_ID_QEMU)) {
dev_priv->pch_id = id;
dev_priv->pch_type = dev_priv->pch_type =
intel_virt_detect_pch(dev_priv); intel_virt_detect_pch(dev_priv);
} else } else
@ -331,6 +340,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
break; break;
case I915_PARAM_HAS_GPU_RESET: case I915_PARAM_HAS_GPU_RESET:
value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
if (value && intel_has_reset_engine(dev_priv))
value = 2;
break; break;
case I915_PARAM_HAS_RESOURCE_STREAMER: case I915_PARAM_HAS_RESOURCE_STREAMER:
value = HAS_RESOURCE_STREAMER(dev_priv); value = HAS_RESOURCE_STREAMER(dev_priv);
@ -585,16 +596,18 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
static void i915_gem_fini(struct drm_i915_private *dev_priv) static void i915_gem_fini(struct drm_i915_private *dev_priv)
{ {
flush_workqueue(dev_priv->wq);
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv); intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv); i915_gem_cleanup_engines(dev_priv);
i915_gem_context_fini(dev_priv); i915_gem_contexts_fini(dev_priv);
i915_gem_cleanup_userptr(dev_priv); i915_gem_cleanup_userptr(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_drain_freed_objects(dev_priv); i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!list_empty(&dev_priv->context_list)); WARN_ON(!list_empty(&dev_priv->contexts.list));
} }
static int i915_load_modeset_init(struct drm_device *dev) static int i915_load_modeset_init(struct drm_device *dev)
@ -1427,9 +1440,10 @@ static void i915_driver_release(struct drm_device *dev)
static int i915_driver_open(struct drm_device *dev, struct drm_file *file) static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{ {
struct drm_i915_private *i915 = to_i915(dev);
int ret; int ret;
ret = i915_gem_open(dev, file); ret = i915_gem_open(i915, file);
if (ret) if (ret)
return ret; return ret;
@ -1459,7 +1473,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file); i915_gem_context_close(file);
i915_gem_release(dev, file); i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
@ -1911,9 +1925,72 @@ wakeup:
error: error:
i915_gem_set_wedged(dev_priv); i915_gem_set_wedged(dev_priv);
i915_gem_retire_requests(dev_priv);
goto finish; goto finish;
} }
/**
* i915_reset_engine - reset GPU engine to recover from a hang
* @engine: engine to reset
*
* Reset a specific GPU engine. Useful if a hang is detected.
* Returns zero on successful reset or otherwise an error code.
*
* Procedure is:
* - identifies the request that caused the hang and it is dropped
* - reset engine (which will force the engine to idle)
* - re-init/configure engine
*/
int i915_reset_engine(struct intel_engine_cs *engine)
{
struct i915_gpu_error *error = &engine->i915->gpu_error;
struct drm_i915_gem_request *active_request;
int ret;
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
DRM_DEBUG_DRIVER("resetting %s\n", engine->name);
active_request = i915_gem_reset_prepare_engine(engine);
if (IS_ERR(active_request)) {
DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
ret = PTR_ERR(active_request);
goto out;
}
/*
* The request that caused the hang is stuck on elsp, we know the
* active request and can drop it, adjust head to skip the offending
* request to resume executing remaining requests in the queue.
*/
i915_gem_reset_engine(engine, active_request);
/* Finally, reset just this engine. */
ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine));
i915_gem_reset_finish_engine(engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n",
engine->name, ret);
goto out;
}
/*
* The engine and its registers (and workarounds in case of render)
* have been reset to their default values. Follow the init_ring
* process to program RING_MODE, HWSP and re-enable submission.
*/
ret = engine->init_hw(engine);
if (ret)
goto out;
error->reset_engine_count[engine->id]++;
out:
return ret;
}
static int i915_pm_suspend(struct device *kdev) static int i915_pm_suspend(struct device *kdev)
{ {
struct pci_dev *pdev = to_pci_dev(kdev); struct pci_dev *pdev = to_pci_dev(kdev);

View file

@ -80,8 +80,8 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20170619" #define DRIVER_DATE "20170717"
#define DRIVER_TIMESTAMP 1497857498 #define DRIVER_TIMESTAMP 1500275179
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@ -122,7 +122,7 @@ static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
return false; return false;
} }
static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val) static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
{ {
uint_fixed_16_16_t fp; uint_fixed_16_16_t fp;
@ -132,17 +132,17 @@ static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
return fp; return fp;
} }
static inline uint32_t fixed_16_16_to_u32_round_up(uint_fixed_16_16_t fp) static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
{ {
return DIV_ROUND_UP(fp.val, 1 << 16); return DIV_ROUND_UP(fp.val, 1 << 16);
} }
static inline uint32_t fixed_16_16_to_u32(uint_fixed_16_16_t fp) static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
{ {
return fp.val >> 16; return fp.val >> 16;
} }
static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1, static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
uint_fixed_16_16_t min2) uint_fixed_16_16_t min2)
{ {
uint_fixed_16_16_t min; uint_fixed_16_16_t min;
@ -151,7 +151,7 @@ static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1,
return min; return min;
} }
static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1, static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
uint_fixed_16_16_t max2) uint_fixed_16_16_t max2)
{ {
uint_fixed_16_16_t max; uint_fixed_16_16_t max;
@ -160,6 +160,14 @@ static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
return max; return max;
} }
static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
{
uint_fixed_16_16_t fp;
WARN_ON(val >> 32);
fp.val = clamp_t(uint32_t, val, 0, ~0);
return fp;
}
static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val, static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t d) uint_fixed_16_16_t d)
{ {
@ -170,48 +178,30 @@ static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
uint_fixed_16_16_t mul) uint_fixed_16_16_t mul)
{ {
uint64_t intermediate_val; uint64_t intermediate_val;
uint32_t result;
intermediate_val = (uint64_t) val * mul.val; intermediate_val = (uint64_t) val * mul.val;
intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16); intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
WARN_ON(intermediate_val >> 32); WARN_ON(intermediate_val >> 32);
result = clamp_t(uint32_t, intermediate_val, 0, ~0); return clamp_t(uint32_t, intermediate_val, 0, ~0);
return result;
} }
static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t mul) uint_fixed_16_16_t mul)
{ {
uint64_t intermediate_val; uint64_t intermediate_val;
uint_fixed_16_16_t fp;
intermediate_val = (uint64_t) val.val * mul.val; intermediate_val = (uint64_t) val.val * mul.val;
intermediate_val = intermediate_val >> 16; intermediate_val = intermediate_val >> 16;
WARN_ON(intermediate_val >> 32); return clamp_u64_to_fixed16(intermediate_val);
fp.val = clamp_t(uint32_t, intermediate_val, 0, ~0);
return fp;
} }
static inline uint_fixed_16_16_t fixed_16_16_div(uint32_t val, uint32_t d) static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
{ {
uint_fixed_16_16_t fp, res;
fp = u32_to_fixed_16_16(val);
res.val = DIV_ROUND_UP(fp.val, d);
return res;
}
static inline uint_fixed_16_16_t fixed_16_16_div_u64(uint32_t val, uint32_t d)
{
uint_fixed_16_16_t res;
uint64_t interm_val; uint64_t interm_val;
interm_val = (uint64_t)val << 16; interm_val = (uint64_t)val << 16;
interm_val = DIV_ROUND_UP_ULL(interm_val, d); interm_val = DIV_ROUND_UP_ULL(interm_val, d);
WARN_ON(interm_val >> 32); return clamp_u64_to_fixed16(interm_val);
res.val = (uint32_t) interm_val;
return res;
} }
static inline uint32_t div_round_up_u32_fixed16(uint32_t val, static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
@ -225,16 +215,32 @@ static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
return clamp_t(uint32_t, interm_val, 0, ~0); return clamp_t(uint32_t, interm_val, 0, ~0);
} }
static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val, static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
uint_fixed_16_16_t mul) uint_fixed_16_16_t mul)
{ {
uint64_t intermediate_val; uint64_t intermediate_val;
uint_fixed_16_16_t fp;
intermediate_val = (uint64_t) val * mul.val; intermediate_val = (uint64_t) val * mul.val;
WARN_ON(intermediate_val >> 32); return clamp_u64_to_fixed16(intermediate_val);
fp.val = (uint32_t) intermediate_val; }
return fp;
static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
uint_fixed_16_16_t add2)
{
uint64_t interm_sum;
interm_sum = (uint64_t) add1.val + add2.val;
return clamp_u64_to_fixed16(interm_sum);
}
static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
uint32_t add2)
{
uint64_t interm_sum;
uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
interm_sum = (uint64_t) add1.val + interm_add2.val;
return clamp_u64_to_fixed16(interm_sum);
} }
static inline const char *yesno(bool v) static inline const char *yesno(bool v)
@ -584,8 +590,7 @@ struct drm_i915_file_private {
struct idr context_idr; struct idr context_idr;
struct intel_rps_client { struct intel_rps_client {
struct list_head link; atomic_t boosts;
unsigned boosts;
} rps; } rps;
unsigned int bsd_engine; unsigned int bsd_engine;
@ -753,6 +758,7 @@ struct intel_csr {
func(has_csr); \ func(has_csr); \
func(has_ddi); \ func(has_ddi); \
func(has_dp_mst); \ func(has_dp_mst); \
func(has_reset_engine); \
func(has_fbc); \ func(has_fbc); \
func(has_fpga_dbg); \ func(has_fpga_dbg); \
func(has_full_ppgtt); \ func(has_full_ppgtt); \
@ -917,6 +923,7 @@ struct i915_gpu_state {
enum intel_engine_hangcheck_action hangcheck_action; enum intel_engine_hangcheck_action hangcheck_action;
struct i915_address_space *vm; struct i915_address_space *vm;
int num_requests; int num_requests;
u32 reset_count;
/* position of active request inside the ring */ /* position of active request inside the ring */
u32 rq_head, rq_post, rq_tail; u32 rq_head, rq_post, rq_tail;
@ -1149,8 +1156,8 @@ struct i915_psr {
enum intel_pch { enum intel_pch {
PCH_NONE = 0, /* No PCH present */ PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */ PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
PCH_LPT, /* Lynxpoint PCH */ PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */ PCH_SPT, /* Sunrisepoint PCH */
PCH_KBP, /* Kabypoint PCH */ PCH_KBP, /* Kabypoint PCH */
PCH_CNP, /* Cannonpoint PCH */ PCH_CNP, /* Cannonpoint PCH */
@ -1166,6 +1173,7 @@ enum intel_sbi_destination {
#define QUIRK_INVERT_BRIGHTNESS (1<<2) #define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3) #define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5) #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
#define QUIRK_INCREASE_T12_DELAY (1<<6)
struct intel_fbdev; struct intel_fbdev;
struct intel_fbc_work; struct intel_fbc_work;
@ -1301,13 +1309,10 @@ struct intel_gen6_power_mgmt {
int last_adj; int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power; enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
spinlock_t client_lock;
struct list_head clients;
bool client_boost;
bool enabled; bool enabled;
struct delayed_work autoenable_work; struct delayed_work autoenable_work;
unsigned boosts; atomic_t num_waiters;
atomic_t boosts;
/* manual wa residency calculations */ /* manual wa residency calculations */
struct intel_rps_ei ei; struct intel_rps_ei ei;
@ -1550,6 +1555,12 @@ struct i915_gpu_error {
* inspect the bit and do the reset directly, otherwise the worker * inspect the bit and do the reset directly, otherwise the worker
* waits for the struct_mutex. * waits for the struct_mutex.
* *
* #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
* acquire the struct_mutex to reset an engine, we need an explicit
* flag to prevent two concurrent reset attempts in the same engine.
* As the number of engines continues to grow, allocate the flags from
* the most significant bits.
*
* #I915_WEDGED - If reset fails and we can no longer use the GPU, * #I915_WEDGED - If reset fails and we can no longer use the GPU,
* we set the #I915_WEDGED bit. Prior to command submission, e.g. * we set the #I915_WEDGED bit. Prior to command submission, e.g.
* i915_gem_request_alloc(), this bit is checked and the sequence * i915_gem_request_alloc(), this bit is checked and the sequence
@ -1559,6 +1570,10 @@ struct i915_gpu_error {
#define I915_RESET_BACKOFF 0 #define I915_RESET_BACKOFF 0
#define I915_RESET_HANDOFF 1 #define I915_RESET_HANDOFF 1
#define I915_WEDGED (BITS_PER_LONG - 1) #define I915_WEDGED (BITS_PER_LONG - 1)
#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
/** Number of times an engine has been reset */
u32 reset_engine_count[I915_NUM_ENGINES];
/** /**
* Waitqueue to signal when a hang is detected. Used to for waiters * Waitqueue to signal when a hang is detected. Used to for waiters
@ -2236,13 +2251,6 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7); DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock; struct mutex mm_lock;
/* The hw wants to have a stable context identifier for the lifetime
* of the context (for OA, PASID, faults, etc). This is limited
* in execlists to 21 bits.
*/
struct ida context_hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
/* Kernel Modesetting */ /* Kernel Modesetting */
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@ -2321,7 +2329,18 @@ struct drm_i915_private {
*/ */
struct mutex av_mutex; struct mutex av_mutex;
struct list_head context_list; struct {
struct list_head list;
struct llist_head free_list;
struct work_struct free_work;
/* The hw wants to have a stable context identifier for the
* lifetime of the context (for OA, PASID, faults, etc).
* This is limited in execlists to 21 bits.
*/
struct ida hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
} contexts;
u32 fdi_rx_config; u32 fdi_rx_config;
@ -2996,16 +3015,17 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) #define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
#define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_DEVICE_ID_MASK 0xff80
#define INTEL_PCH_DEVICE_ID_MASK_EXT 0xff80
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300 #define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
@ -3020,9 +3040,11 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev_priv) \ #define HAS_PCH_LPT_LP(dev_priv) \
((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
(dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
#define HAS_PCH_LPT_H(dev_priv) \ #define HAS_PCH_LPT_H(dev_priv) \
((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
(dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE)
#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT) #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX) #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
@ -3089,6 +3111,8 @@ extern void i915_driver_unload(struct drm_device *dev);
extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
extern void i915_reset(struct drm_i915_private *dev_priv); extern void i915_reset(struct drm_i915_private *dev_priv);
extern int i915_reset_engine(struct intel_engine_cs *engine);
extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv); extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
@ -3461,11 +3485,22 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
return READ_ONCE(error->reset_count); return READ_ONCE(error->reset_count);
} }
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
struct intel_engine_cs *engine)
{
return READ_ONCE(error->reset_engine_count[engine->id]);
}
struct drm_i915_gem_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset(struct drm_i915_private *dev_priv); void i915_gem_reset(struct drm_i915_private *dev_priv);
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv); void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv); void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
void i915_gem_reset_engine(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request);
void i915_gem_init_mmio(struct drm_i915_private *i915); void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@ -3499,7 +3534,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align); int align);
int i915_gem_open(struct drm_device *dev, struct drm_file *file); int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
@ -3530,41 +3565,26 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages); struct sg_table *pages);
static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
{
return idr_find(&file_priv->context_idr, id);
}
static inline struct i915_gem_context * static inline struct i915_gem_context *
i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); rcu_read_lock();
ctx = __i915_gem_context_lookup_rcu(file_priv, id);
ctx = idr_find(&file_priv->context_idr, id); if (ctx && !kref_get_unless_zero(&ctx->ref))
if (!ctx) ctx = NULL;
return ERR_PTR(-ENOENT); rcu_read_unlock();
return ctx; return ctx;
} }
static inline struct i915_gem_context *
i915_gem_context_get(struct i915_gem_context *ctx)
{
kref_get(&ctx->ref);
return ctx;
}
static inline void i915_gem_context_put(struct i915_gem_context *ctx)
{
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
kref_put(&ctx->ref, i915_gem_context_free);
}
static inline void i915_gem_context_put_unlocked(struct i915_gem_context *ctx)
{
struct mutex *lock = &ctx->i915->drm.struct_mutex;
if (kref_put_mutex(&ctx->ref, i915_gem_context_free, lock))
mutex_unlock(lock);
}
static inline struct intel_timeline * static inline struct intel_timeline *
i915_gem_context_lookup_timeline(struct i915_gem_context *ctx, i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)

View file

@ -388,7 +388,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
*/ */
if (rps) { if (rps) {
if (INTEL_GEN(rq->i915) >= 6) if (INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies); gen6_rps_boost(rq, rps);
else else
rps = NULL; rps = NULL;
} }
@ -399,22 +399,6 @@ out:
if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq)) if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
i915_gem_request_retire_upto(rq); i915_gem_request_retire_upto(rq);
if (rps && i915_gem_request_global_seqno(rq) == intel_engine_last_submit(rq->engine)) {
/* The GPU is now idle and this client has stalled.
* Since no other client has submitted a request in the
* meantime, assume that this client is the only one
* supplying work to the GPU but is unable to keep that
* work supplied because it is waiting. Since the GPU is
* then never kept fully busy, RPS autoclocking will
* keep the clocks relatively low, causing further delays.
* Compensate by giving the synchronous client credit for
* a waitboost next time.
*/
spin_lock(&rq->i915->rps.client_lock);
list_del_init(&rps->link);
spin_unlock(&rq->i915->rps.client_lock);
}
return timeout; return timeout;
} }
@ -2832,15 +2816,14 @@ static bool engine_stalled(struct intel_engine_cs *engine)
return true; return true;
} }
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) /*
* Ensure irq handler finishes, and not run again.
* Also return the active request so that we only search for it once.
*/
struct drm_i915_gem_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{ {
struct intel_engine_cs *engine; struct drm_i915_gem_request *request = NULL;
enum intel_engine_id id;
int err = 0;
/* Ensure irq handler finishes, and not run again. */
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *request;
/* Prevent the signaler thread from updating the request /* Prevent the signaler thread from updating the request
* state (by calling dma_fence_signal) as we are processing * state (by calling dma_fence_signal) as we are processing
@ -2870,8 +2853,27 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
if (engine_stalled(engine)) { if (engine_stalled(engine)) {
request = i915_gem_find_active_request(engine); request = i915_gem_find_active_request(engine);
if (request && request->fence.error == -EIO) if (request && request->fence.error == -EIO)
err = -EIO; /* Previous reset failed! */ request = ERR_PTR(-EIO); /* Previous reset failed! */
} }
return request;
}
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
struct drm_i915_gem_request *request;
enum intel_engine_id id;
int err = 0;
for_each_engine(engine, dev_priv, id) {
request = i915_gem_reset_prepare_engine(engine);
if (IS_ERR(request)) {
err = PTR_ERR(request);
continue;
}
engine->hangcheck.active_request = request;
} }
i915_gem_revoke_fences(dev_priv); i915_gem_revoke_fences(dev_priv);
@ -2925,7 +2927,7 @@ static void engine_skip_context(struct drm_i915_gem_request *request)
static bool i915_gem_reset_request(struct drm_i915_gem_request *request) static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
{ {
/* Read once and return the resolution */ /* Read once and return the resolution */
const bool guilty = engine_stalled(request->engine); const bool guilty = !i915_gem_request_completed(request);
/* The guilty request will get skipped on a hung engine. /* The guilty request will get skipped on a hung engine.
* *
@ -2959,11 +2961,9 @@ static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
return guilty; return guilty;
} }
static void i915_gem_reset_engine(struct intel_engine_cs *engine) void i915_gem_reset_engine(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{ {
struct drm_i915_gem_request *request;
request = i915_gem_find_active_request(engine);
if (request && i915_gem_reset_request(request)) { if (request && i915_gem_reset_request(request)) {
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
engine->name, request->global_seqno); engine->name, request->global_seqno);
@ -2989,7 +2989,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
i915_gem_reset_engine(engine); i915_gem_reset_engine(engine, engine->hangcheck.active_request);
ctx = fetch_and_zero(&engine->last_retired_context); ctx = fetch_and_zero(&engine->last_retired_context);
if (ctx) if (ctx)
engine->context_unpin(engine, ctx); engine->context_unpin(engine, ctx);
@ -3005,6 +3005,12 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
} }
} }
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
tasklet_enable(&engine->irq_tasklet);
kthread_unpark(engine->breadcrumbs.signaler);
}
void i915_gem_reset_finish(struct drm_i915_private *dev_priv) void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
@ -3013,8 +3019,8 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
tasklet_enable(&engine->irq_tasklet); engine->hangcheck.active_request = NULL;
kthread_unpark(engine->breadcrumbs.signaler); i915_gem_reset_finish_engine(engine);
} }
} }
@ -3041,6 +3047,7 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped */ /* Mark all executing requests as skipped */
spin_lock_irqsave(&engine->timeline->lock, flags); spin_lock_irqsave(&engine->timeline->lock, flags);
list_for_each_entry(request, &engine->timeline->requests, link) list_for_each_entry(request, &engine->timeline->requests, link)
if (!i915_gem_request_completed(request))
dma_fence_set_error(&request->fence, -EIO); dma_fence_set_error(&request->fence, -EIO);
spin_unlock_irqrestore(&engine->timeline->lock, flags); spin_unlock_irqrestore(&engine->timeline->lock, flags);
@ -3071,6 +3078,13 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
engine->execlist_first = NULL; engine->execlist_first = NULL;
spin_unlock_irqrestore(&engine->timeline->lock, flags); spin_unlock_irqrestore(&engine->timeline->lock, flags);
/* The port is checked prior to scheduling a tasklet, but
* just in case we have suspended the tasklet to do the
* wedging make sure that when it wakes, it decides there
* is no work to do by clearing the irq_posted bit.
*/
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
} }
} }
@ -3080,6 +3094,7 @@ static int __i915_gem_set_wedged_BKL(void *data)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
set_bit(I915_WEDGED, &i915->gpu_error.flags);
for_each_engine(engine, i915, id) for_each_engine(engine, i915, id)
engine_set_wedged(engine); engine_set_wedged(engine);
@ -3088,20 +3103,7 @@ static int __i915_gem_set_wedged_BKL(void *data)
void i915_gem_set_wedged(struct drm_i915_private *dev_priv) void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
{ {
lockdep_assert_held(&dev_priv->drm.struct_mutex);
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
/* Retire completed requests first so the list of inflight/incomplete
* requests is accurate and we don't try and mark successful requests
* as in error during __i915_gem_set_wedged_BKL().
*/
i915_gem_retire_requests(dev_priv);
stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL); stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
i915_gem_context_lost(dev_priv);
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
} }
bool i915_gem_unset_wedged(struct drm_i915_private *i915) bool i915_gem_unset_wedged(struct drm_i915_private *i915)
@ -3156,6 +3158,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
* context and do not require stop_machine(). * context and do not require stop_machine().
*/ */
intel_engines_reset_default_submission(i915); intel_engines_reset_default_submission(i915);
i915_gem_contexts_lost(i915);
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &i915->gpu_error.flags); clear_bit(I915_WEDGED, &i915->gpu_error.flags);
@ -4565,7 +4568,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
goto err_unlock; goto err_unlock;
assert_kernel_context_is_current(dev_priv); assert_kernel_context_is_current(dev_priv);
i915_gem_context_lost(dev_priv); i915_gem_contexts_lost(dev_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
intel_guc_suspend(dev_priv); intel_guc_suspend(dev_priv);
@ -4579,8 +4582,6 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
while (flush_delayed_work(&dev_priv->gt.idle_work)) while (flush_delayed_work(&dev_priv->gt.idle_work))
; ;
i915_gem_drain_freed_objects(dev_priv);
/* Assert that we sucessfully flushed all the work and /* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state. * reset the GPU back to its idle, low power state.
*/ */
@ -4812,7 +4813,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret) if (ret)
goto out_unlock; goto out_unlock;
ret = i915_gem_context_init(dev_priv); ret = i915_gem_contexts_init(dev_priv);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
@ -4922,7 +4923,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (err) if (err)
goto err_priorities; goto err_priorities;
INIT_LIST_HEAD(&dev_priv->context_list);
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
init_llist_head(&dev_priv->mm.free_list); init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list); INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
@ -5038,15 +5038,9 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
list_for_each_entry(request, &file_priv->mm.request_list, client_link) list_for_each_entry(request, &file_priv->mm.request_list, client_link)
request->file_priv = NULL; request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock); spin_unlock(&file_priv->mm.lock);
if (!list_empty(&file_priv->rps.link)) {
spin_lock(&to_i915(dev)->rps.client_lock);
list_del(&file_priv->rps.link);
spin_unlock(&to_i915(dev)->rps.client_lock);
}
} }
int i915_gem_open(struct drm_device *dev, struct drm_file *file) int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
{ {
struct drm_i915_file_private *file_priv; struct drm_i915_file_private *file_priv;
int ret; int ret;
@ -5058,16 +5052,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return -ENOMEM; return -ENOMEM;
file->driver_priv = file_priv; file->driver_priv = file_priv;
file_priv->dev_priv = to_i915(dev); file_priv->dev_priv = i915;
file_priv->file = file; file_priv->file = file;
INIT_LIST_HEAD(&file_priv->rps.link);
spin_lock_init(&file_priv->mm.lock); spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list); INIT_LIST_HEAD(&file_priv->mm.request_list);
file_priv->bsd_engine = -1; file_priv->bsd_engine = -1;
ret = i915_gem_context_open(dev, file); ret = i915_gem_context_open(i915, file);
if (ret) if (ret)
kfree(file_priv); kfree(file_priv);

View file

@ -158,13 +158,11 @@ static void vma_lut_free(struct i915_gem_context *ctx)
kvfree(lut->ht); kvfree(lut->ht);
} }
void i915_gem_context_free(struct kref *ctx_ref) static void i915_gem_context_free(struct i915_gem_context *ctx)
{ {
struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
int i; int i;
lockdep_assert_held(&ctx->i915->drm.struct_mutex); lockdep_assert_held(&ctx->i915->drm.struct_mutex);
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
vma_lut_free(ctx); vma_lut_free(ctx);
@ -188,8 +186,54 @@ void i915_gem_context_free(struct kref *ctx_ref)
list_del(&ctx->link); list_del(&ctx->link);
ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
kfree(ctx); kfree_rcu(ctx, rcu);
}
static void contexts_free(struct drm_i915_private *i915)
{
struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
struct i915_gem_context *ctx, *cn;
lockdep_assert_held(&i915->drm.struct_mutex);
llist_for_each_entry_safe(ctx, cn, freed, free_link)
i915_gem_context_free(ctx);
}
static void contexts_free_first(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
struct llist_node *freed;
lockdep_assert_held(&i915->drm.struct_mutex);
freed = llist_del_first(&i915->contexts.free_list);
if (!freed)
return;
ctx = container_of(freed, typeof(*ctx), free_link);
i915_gem_context_free(ctx);
}
static void contexts_free_worker(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, typeof(*i915), contexts.free_work);
mutex_lock(&i915->drm.struct_mutex);
contexts_free(i915);
mutex_unlock(&i915->drm.struct_mutex);
}
void i915_gem_context_release(struct kref *ref)
{
struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
struct drm_i915_private *i915 = ctx->i915;
trace_i915_context_free(ctx);
if (llist_add(&ctx->free_link, &i915->contexts.free_list))
queue_work(i915->wq, &i915->contexts.free_work);
} }
static void context_close(struct i915_gem_context *ctx) static void context_close(struct i915_gem_context *ctx)
@ -205,7 +249,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{ {
int ret; int ret;
ret = ida_simple_get(&dev_priv->context_hw_ida, ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL); 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
/* Contexts are only released when no longer active. /* Contexts are only released when no longer active.
@ -213,7 +257,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
* stale contexts and try again. * stale contexts and try again.
*/ */
i915_gem_retire_requests(dev_priv); i915_gem_retire_requests(dev_priv);
ret = ida_simple_get(&dev_priv->context_hw_ida, ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL); 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -265,7 +309,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
} }
kref_init(&ctx->ref); kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list); list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv; ctx->i915 = dev_priv;
ctx->priority = I915_PRIORITY_NORMAL; ctx->priority = I915_PRIORITY_NORMAL;
@ -354,6 +398,9 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Reap the most stale context */
contexts_free_first(dev_priv);
ctx = __create_hw_context(dev_priv, file_priv); ctx = __create_hw_context(dev_priv, file_priv);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return ctx; return ctx;
@ -418,7 +465,7 @@ out:
return ctx; return ctx;
} }
int i915_gem_context_init(struct drm_i915_private *dev_priv) int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
@ -427,6 +474,10 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
if (WARN_ON(dev_priv->kernel_context)) if (WARN_ON(dev_priv->kernel_context))
return 0; return 0;
INIT_LIST_HEAD(&dev_priv->contexts.list);
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
init_llist_head(&dev_priv->contexts.free_list);
if (intel_vgpu_active(dev_priv) && if (intel_vgpu_active(dev_priv) &&
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915.enable_execlists) { if (!i915.enable_execlists) {
@ -437,7 +488,7 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
/* Using the simple ida interface, the max is limited by sizeof(int) */ /* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->context_hw_ida); ida_init(&dev_priv->contexts.hw_ida);
ctx = i915_gem_create_context(dev_priv, NULL); ctx = i915_gem_create_context(dev_priv, NULL);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
@ -463,7 +514,7 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
return 0; return 0;
} }
void i915_gem_context_lost(struct drm_i915_private *dev_priv) void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
@ -484,7 +535,7 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
if (!i915.enable_execlists) { if (!i915.enable_execlists) {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
list_for_each_entry(ctx, &dev_priv->context_list, link) { list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
if (!i915_gem_context_is_default(ctx)) if (!i915_gem_context_is_default(ctx))
continue; continue;
@ -503,18 +554,20 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
} }
} }
void i915_gem_context_fini(struct drm_i915_private *dev_priv) void i915_gem_contexts_fini(struct drm_i915_private *i915)
{ {
struct i915_gem_context *dctx = dev_priv->kernel_context; struct i915_gem_context *ctx;
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_kernel(dctx)); /* Keep the context so that we can free it immediately ourselves */
ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context));
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
context_close(ctx);
i915_gem_context_free(ctx);
context_close(dctx); /* Must free all deferred contexts (via flush_workqueue) first */
dev_priv->kernel_context = NULL; ida_destroy(&i915->contexts.hw_ida);
ida_destroy(&dev_priv->context_hw_ida);
} }
static int context_idr_cleanup(int id, void *p, void *data) static int context_idr_cleanup(int id, void *p, void *data)
@ -525,32 +578,32 @@ static int context_idr_cleanup(int id, void *p, void *data)
return 0; return 0;
} }
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file)
{ {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
idr_init(&file_priv->context_idr); idr_init(&file_priv->context_idr);
mutex_lock(&dev->struct_mutex); mutex_lock(&i915->drm.struct_mutex);
ctx = i915_gem_create_context(to_i915(dev), file_priv); ctx = i915_gem_create_context(i915, file_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
idr_destroy(&file_priv->context_idr); idr_destroy(&file_priv->context_idr);
return PTR_ERR(ctx); return PTR_ERR(ctx);
} }
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
return 0; return 0;
} }
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) void i915_gem_context_close(struct drm_file *file)
{ {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
lockdep_assert_held(&dev->struct_mutex); lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr); idr_destroy(&file_priv->context_idr);
@ -981,20 +1034,19 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT; return -ENOENT;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ctx = i915_gem_context_lookup(file_priv, args->ctx_id); ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (IS_ERR(ctx)) { if (!ctx)
mutex_unlock(&dev->struct_mutex); return -ENOENT;
return PTR_ERR(ctx);
} ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
goto out;
__destroy_hw_context(ctx, file_priv); __destroy_hw_context(ctx, file_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("HW context %d destroyed\n", args->ctx_id); out:
i915_gem_context_put(ctx);
return 0; return 0;
} }
@ -1004,17 +1056,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data; struct drm_i915_gem_context_param *args = data;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
int ret; int ret = 0;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ctx = i915_gem_context_lookup(file_priv, args->ctx_id); ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (IS_ERR(ctx)) { if (!ctx)
mutex_unlock(&dev->struct_mutex); return -ENOENT;
return PTR_ERR(ctx);
}
args->size = 0; args->size = 0;
switch (args->param) { switch (args->param) {
@ -1042,8 +1088,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
mutex_unlock(&dev->struct_mutex);
i915_gem_context_put(ctx);
return ret; return ret;
} }
@ -1055,15 +1101,13 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
int ret; int ret;
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (!ctx)
return -ENOENT;
ret = i915_mutex_lock_interruptible(dev); ret = i915_mutex_lock_interruptible(dev);
if (ret) if (ret)
return ret; goto out;
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
switch (args->param) { switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_BAN_PERIOD:
@ -1101,6 +1145,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
out:
i915_gem_context_put(ctx);
return ret; return ret;
} }
@ -1115,27 +1161,31 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
if (args->flags || args->pad) if (args->flags || args->pad)
return -EINVAL; return -EINVAL;
ret = i915_mutex_lock_interruptible(dev); ret = -ENOENT;
if (ret) rcu_read_lock();
return ret; ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
if (!ctx)
goto out;
ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); /*
if (IS_ERR(ctx)) { * We opt for unserialised reads here. This may result in tearing
mutex_unlock(&dev->struct_mutex); * in the extremely unlikely event of a GPU hang on this context
return PTR_ERR(ctx); * as we are querying them. If we need that extra layer of protection,
} * we should wrap the hangstats with a seqlock.
*/
if (capable(CAP_SYS_ADMIN)) if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error); args->reset_count = i915_reset_count(&dev_priv->gpu_error);
else else
args->reset_count = 0; args->reset_count = 0;
args->batch_active = ctx->guilty_count; args->batch_active = READ_ONCE(ctx->guilty_count);
args->batch_pending = ctx->active_count; args->batch_pending = READ_ONCE(ctx->active_count);
mutex_unlock(&dev->struct_mutex); ret = 0;
out:
return 0; rcu_read_unlock();
return ret;
} }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)

View file

@ -86,6 +86,7 @@ struct i915_gem_context {
/** link: place with &drm_i915_private.context_list */ /** link: place with &drm_i915_private.context_list */
struct list_head link; struct list_head link;
struct llist_node free_link;
/** /**
* @ref: reference count * @ref: reference count
@ -98,6 +99,11 @@ struct i915_gem_context {
*/ */
struct kref ref; struct kref ref;
/**
* @rcu: rcu_head for deferred freeing.
*/
struct rcu_head rcu;
/** /**
* @flags: small set of booleans * @flags: small set of booleans
*/ */
@ -273,14 +279,18 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
} }
/* i915_gem_context.c */ /* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
void i915_gem_context_lost(struct drm_i915_private *dev_priv); void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
void i915_gem_context_fini(struct drm_i915_private *dev_priv); void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
void i915_gem_context_close(struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req); int i915_switch_context(struct drm_i915_gem_request *req);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
void i915_gem_context_free(struct kref *ctx_ref);
void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context * struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev); i915_gem_context_create_gvt(struct drm_device *dev);
@ -295,4 +305,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
static inline struct i915_gem_context *
i915_gem_context_get(struct i915_gem_context *ctx)
{
kref_get(&ctx->ref);
return ctx;
}
static inline void i915_gem_context_put(struct i915_gem_context *ctx)
{
kref_put(&ctx->ref, i915_gem_context_release);
}
#endif /* !__I915_GEM_CONTEXT_H__ */ #endif /* !__I915_GEM_CONTEXT_H__ */

View file

@ -675,16 +675,17 @@ static int eb_select_context(struct i915_execbuffer *eb)
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
if (unlikely(IS_ERR(ctx))) if (unlikely(!ctx))
return PTR_ERR(ctx); return -ENOENT;
if (unlikely(i915_gem_context_is_banned(ctx))) { if (unlikely(i915_gem_context_is_banned(ctx))) {
DRM_DEBUG("Context %u tried to submit while banned\n", DRM_DEBUG("Context %u tried to submit while banned\n",
ctx->user_handle); ctx->user_handle);
i915_gem_context_put(ctx);
return -EIO; return -EIO;
} }
eb->ctx = i915_gem_context_get(ctx); eb->ctx = ctx;
eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base; eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
eb->context_flags = 0; eb->context_flags = 0;
@ -2134,7 +2135,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
args->flags |= __EXEC_HAS_RELOC; args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec; eb.exec = exec;
eb.ctx = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
if (USES_FULL_PPGTT(eb.i915)) if (USES_FULL_PPGTT(eb.i915))
eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT; eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
@ -2192,6 +2192,10 @@ i915_gem_do_execbuffer(struct drm_device *dev,
GEM_BUG_ON(!eb.lut_size); GEM_BUG_ON(!eb.lut_size);
err = eb_select_context(&eb);
if (unlikely(err))
goto err_destroy;
/* /*
* Take a local wakeref for preparing to dispatch the execbuf as * Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the * we expect to access the hardware fairly frequently in the
@ -2200,14 +2204,11 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* 100ms. * 100ms.
*/ */
intel_runtime_pm_get(eb.i915); intel_runtime_pm_get(eb.i915);
err = i915_mutex_lock_interruptible(dev); err = i915_mutex_lock_interruptible(dev);
if (err) if (err)
goto err_rpm; goto err_rpm;
err = eb_select_context(&eb);
if (unlikely(err))
goto err_unlock;
err = eb_relocate(&eb); err = eb_relocate(&eb);
if (err) if (err)
/* /*
@ -2343,11 +2344,11 @@ err_batch_unpin:
err_vma: err_vma:
if (eb.exec) if (eb.exec)
eb_release_vmas(&eb); eb_release_vmas(&eb);
i915_gem_context_put(eb.ctx);
err_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
err_rpm: err_rpm:
intel_runtime_pm_put(eb.i915); intel_runtime_pm_put(eb.i915);
i915_gem_context_put(eb.ctx);
err_destroy:
eb_destroy(&eb); eb_destroy(&eb);
err_out_fence: err_out_fence:
if (out_fence_fd != -1) if (out_fence_fd != -1)

View file

@ -207,8 +207,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
if (vma->obj->gt_ro) if (vma->obj->gt_ro)
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
cache_level, pte_flags);
return 0; return 0;
} }
@ -907,37 +906,35 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
} }
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
struct sg_table *pages, struct i915_vma *vma,
u64 start,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 unused) u32 unused)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = { struct sgt_dma iter = {
.sg = pages->sgl, .sg = vma->pages->sgl,
.dma = sg_dma_address(iter.sg), .dma = sg_dma_address(iter.sg),
.max = iter.dma + iter.sg->length, .max = iter.dma + iter.sg->length,
}; };
struct gen8_insert_pte idx = gen8_insert_pte(start); struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
cache_level); cache_level);
} }
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
struct sg_table *pages, struct i915_vma *vma,
u64 start,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 unused) u32 unused)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = { struct sgt_dma iter = {
.sg = pages->sgl, .sg = vma->pages->sgl,
.dma = sg_dma_address(iter.sg), .dma = sg_dma_address(iter.sg),
.max = iter.dma + iter.sg->length, .max = iter.dma + iter.sg->length,
}; };
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps; struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
struct gen8_insert_pte idx = gen8_insert_pte(start); struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter, while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
&idx, cache_level)) &idx, cache_level))
@ -1621,13 +1618,12 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
} }
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages, struct i915_vma *vma,
u64 start,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = vma->node.start >> PAGE_SHIFT;
unsigned act_pt = first_entry / GEN6_PTES; unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES; unsigned act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, cache_level, flags); const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
@ -1635,7 +1631,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
gen6_pte_t *vaddr; gen6_pte_t *vaddr;
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]); vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
iter.sg = pages->sgl; iter.sg = vma->pages->sgl;
iter.dma = sg_dma_address(iter.sg); iter.dma = sg_dma_address(iter.sg);
iter.max = iter.dma + iter.sg->length; iter.max = iter.dma + iter.sg->length;
do { do {
@ -2090,8 +2086,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
} }
static void gen8_ggtt_insert_entries(struct i915_address_space *vm, static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st, struct i915_vma *vma,
u64 start,
enum i915_cache_level level, enum i915_cache_level level,
u32 unused) u32 unused)
{ {
@ -2102,8 +2097,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
dma_addr_t addr; dma_addr_t addr;
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += start >> PAGE_SHIFT; gtt_entries += vma->node.start >> PAGE_SHIFT;
for_each_sgt_dma(addr, sgt_iter, st) for_each_sgt_dma(addr, sgt_iter, vma->pages)
gen8_set_pte(gtt_entries++, pte_encode | addr); gen8_set_pte(gtt_entries++, pte_encode | addr);
wmb(); wmb();
@ -2137,17 +2132,16 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
* mapped BAR (dev_priv->mm.gtt->gtt). * mapped BAR (dev_priv->mm.gtt->gtt).
*/ */
static void gen6_ggtt_insert_entries(struct i915_address_space *vm, static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st, struct i915_vma *vma,
u64 start,
enum i915_cache_level level, enum i915_cache_level level,
u32 flags) u32 flags)
{ {
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
unsigned int i = start >> PAGE_SHIFT; unsigned int i = vma->node.start >> PAGE_SHIFT;
struct sgt_iter iter; struct sgt_iter iter;
dma_addr_t addr; dma_addr_t addr;
for_each_sgt_dma(addr, iter, st) for_each_sgt_dma(addr, iter, vma->pages)
iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
wmb(); wmb();
@ -2229,8 +2223,7 @@ static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
struct insert_entries { struct insert_entries {
struct i915_address_space *vm; struct i915_address_space *vm;
struct sg_table *st; struct i915_vma *vma;
u64 start;
enum i915_cache_level level; enum i915_cache_level level;
}; };
@ -2238,19 +2231,18 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{ {
struct insert_entries *arg = _arg; struct insert_entries *arg = _arg;
gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0); gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
bxt_vtd_ggtt_wa(arg->vm); bxt_vtd_ggtt_wa(arg->vm);
return 0; return 0;
} }
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct sg_table *st, struct i915_vma *vma,
u64 start,
enum i915_cache_level level, enum i915_cache_level level,
u32 unused) u32 unused)
{ {
struct insert_entries arg = { vm, st, start, level }; struct insert_entries arg = { vm, vma, level };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
} }
@ -2316,15 +2308,15 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
} }
static void i915_ggtt_insert_entries(struct i915_address_space *vm, static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages, struct i915_vma *vma,
u64 start,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 unused) u32 unused)
{ {
unsigned int flags = (cache_level == I915_CACHE_NONE) ? unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
flags);
} }
static void i915_ggtt_clear_range(struct i915_address_space *vm, static void i915_ggtt_clear_range(struct i915_address_space *vm,
@ -2353,8 +2345,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915); intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
cache_level, pte_flags);
intel_runtime_pm_put(i915); intel_runtime_pm_put(i915);
/* /*
@ -2407,16 +2398,13 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
goto err_pages; goto err_pages;
} }
appgtt->base.insert_entries(&appgtt->base, appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
vma->pages, vma->node.start, pte_flags);
cache_level, pte_flags);
} }
if (flags & I915_VMA_GLOBAL_BIND) { if (flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915); intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
vma->pages, vma->node.start,
cache_level, pte_flags);
intel_runtime_pm_put(i915); intel_runtime_pm_put(i915);
} }

View file

@ -313,8 +313,7 @@ struct i915_address_space {
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags); u32 flags);
void (*insert_entries)(struct i915_address_space *vm, void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st, struct i915_vma *vma,
u64 start,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags); u32 flags);
void (*cleanup)(struct i915_address_space *vm); void (*cleanup)(struct i915_address_space *vm);

View file

@ -384,7 +384,11 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
engine->context_unpin(engine, engine->last_retired_context); engine->context_unpin(engine, engine->last_retired_context);
engine->last_retired_context = request->ctx; engine->last_retired_context = request->ctx;
dma_fence_signal(&request->fence); spin_lock_irq(&request->lock);
if (request->waitboost)
atomic_dec(&request->i915->rps.num_waiters);
dma_fence_signal_locked(&request->fence);
spin_unlock_irq(&request->lock);
i915_priotree_fini(request->i915, &request->priotree); i915_priotree_fini(request->i915, &request->priotree);
i915_gem_request_put(request); i915_gem_request_put(request);
@ -639,6 +643,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->file_priv = NULL; req->file_priv = NULL;
req->batch = NULL; req->batch = NULL;
req->capture_list = NULL; req->capture_list = NULL;
req->waitboost = false;
/* /*
* Reserve space in the ring buffer for all the commands required to * Reserve space in the ring buffer for all the commands required to

View file

@ -184,6 +184,8 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */ /** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies; unsigned long emitted_jiffies;
bool waitboost;
/** engine->request_list entry for this request */ /** engine->request_list entry for this request */
struct list_head link; struct list_head link;

View file

@ -463,6 +463,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n", err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n",
ee->hangcheck_timestamp, ee->hangcheck_timestamp,
jiffies_to_msecs(jiffies - ee->hangcheck_timestamp)); jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
err_printf(m, " engine reset count: %u\n", ee->reset_count);
error_print_request(m, " ELSP[0]: ", &ee->execlist[0]); error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
error_print_request(m, " ELSP[1]: ", &ee->execlist[1]); error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
@ -1236,6 +1237,8 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->hangcheck_timestamp = engine->hangcheck.action_timestamp; ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->hangcheck_action = engine->hangcheck.action; ee->hangcheck_action = engine->hangcheck.action;
ee->hangcheck_stalled = engine->hangcheck.stalled; ee->hangcheck_stalled = engine->hangcheck.stalled;
ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
engine);
if (USES_PPGTT(dev_priv)) { if (USES_PPGTT(dev_priv)) {
int i; int i;

View file

@ -1091,18 +1091,6 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
return events; return events;
} }
static bool any_waiters(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id)
if (intel_engine_has_waiter(engine))
return true;
return false;
}
static void gen6_pm_rps_work(struct work_struct *work) static void gen6_pm_rps_work(struct work_struct *work)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
@ -1114,7 +1102,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->rps.interrupts_enabled) { if (dev_priv->rps.interrupts_enabled) {
pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir); pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
client_boost = fetch_and_zero(&dev_priv->rps.client_boost); client_boost = atomic_read(&dev_priv->rps.num_waiters);
} }
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
@ -1131,7 +1119,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay = dev_priv->rps.cur_freq; new_delay = dev_priv->rps.cur_freq;
min = dev_priv->rps.min_freq_softlimit; min = dev_priv->rps.min_freq_softlimit;
max = dev_priv->rps.max_freq_softlimit; max = dev_priv->rps.max_freq_softlimit;
if (client_boost || any_waiters(dev_priv)) if (client_boost)
max = dev_priv->rps.max_freq; max = dev_priv->rps.max_freq;
if (client_boost && new_delay < dev_priv->rps.boost_freq) { if (client_boost && new_delay < dev_priv->rps.boost_freq) {
new_delay = dev_priv->rps.boost_freq; new_delay = dev_priv->rps.boost_freq;
@ -1144,7 +1132,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if (new_delay >= dev_priv->rps.max_freq_softlimit) if (new_delay >= dev_priv->rps.max_freq_softlimit)
adj = 0; adj = 0;
} else if (client_boost || any_waiters(dev_priv)) { } else if (client_boost) {
adj = 0; adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
@ -2599,60 +2587,93 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return ret; return ret;
} }
struct wedge_me {
struct delayed_work work;
struct drm_i915_private *i915;
const char *name;
};
static void wedge_me(struct work_struct *work)
{
struct wedge_me *w = container_of(work, typeof(*w), work.work);
dev_err(w->i915->drm.dev,
"%s timed out, cancelling all in-flight rendering.\n",
w->name);
i915_gem_set_wedged(w->i915);
}
static void __init_wedge(struct wedge_me *w,
struct drm_i915_private *i915,
long timeout,
const char *name)
{
w->i915 = i915;
w->name = name;
INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
schedule_delayed_work(&w->work, timeout);
}
static void __fini_wedge(struct wedge_me *w)
{
cancel_delayed_work_sync(&w->work);
destroy_delayed_work_on_stack(&w->work);
w->i915 = NULL;
}
#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \
(W)->i915; \
__fini_wedge((W)))
/** /**
* i915_reset_and_wakeup - do process context error handling work * i915_reset_device - do process context error handling work
* @dev_priv: i915 device private * @dev_priv: i915 device private
* *
* Fire an error uevent so userspace can see that a hang or error * Fire an error uevent so userspace can see that a hang or error
* was detected. * was detected.
*/ */
static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) static void i915_reset_device(struct drm_i915_private *dev_priv)
{ {
struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
struct wedge_me w;
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
DRM_DEBUG_DRIVER("resetting chip\n"); DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
intel_prepare_reset(dev_priv); intel_prepare_reset(dev_priv);
/* Signal that locked waiters should reset the GPU */
set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
wake_up_all(&dev_priv->gpu_error.wait_queue); wake_up_all(&dev_priv->gpu_error.wait_queue);
do { /* Wait for anyone holding the lock to wakeup, without
/* * blocking indefinitely on struct_mutex.
* All state reset _must_ be completed before we update the
* reset counter, for otherwise waiters might miss the reset
* pending state and not properly drop locks, resulting in
* deadlocks with the reset work.
*/ */
do {
if (mutex_trylock(&dev_priv->drm.struct_mutex)) { if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
i915_reset(dev_priv); i915_reset(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
} }
/* We need to wait for anyone holding the lock to wakeup */
} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
I915_RESET_HANDOFF, I915_RESET_HANDOFF,
TASK_UNINTERRUPTIBLE, TASK_UNINTERRUPTIBLE,
HZ)); 1));
intel_finish_reset(dev_priv); intel_finish_reset(dev_priv);
}
if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
kobject_uevent_env(kobj, kobject_uevent_env(kobj,
KOBJ_CHANGE, reset_done_event); KOBJ_CHANGE, reset_done_event);
/*
* Note: The wake_up also serves as a memory barrier so that
* waiters see the updated value of the dev_priv->gpu_error.
*/
clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
wake_up_all(&dev_priv->gpu_error.reset_queue);
} }
static inline void static inline void
@ -2722,6 +2743,8 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask, u32 engine_mask,
const char *fmt, ...) const char *fmt, ...)
{ {
struct intel_engine_cs *engine;
unsigned int tmp;
va_list args; va_list args;
char error_msg[80]; char error_msg[80];
@ -2741,14 +2764,56 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
i915_capture_error_state(dev_priv, engine_mask, error_msg); i915_capture_error_state(dev_priv, engine_mask, error_msg);
i915_clear_error_registers(dev_priv); i915_clear_error_registers(dev_priv);
/*
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
if (intel_has_reset_engine(dev_priv)) {
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_HANDOFF >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&dev_priv->gpu_error.flags))
continue;
if (i915_reset_engine(engine) == 0)
engine_mask &= ~intel_engine_flag(engine);
clear_bit(I915_RESET_ENGINE + engine->id,
&dev_priv->gpu_error.flags);
wake_up_bit(&dev_priv->gpu_error.flags,
I915_RESET_ENGINE + engine->id);
}
}
if (!engine_mask) if (!engine_mask)
goto out; goto out;
if (test_and_set_bit(I915_RESET_BACKOFF, /* Full reset needs the mutex, stop any other user trying to do so. */
&dev_priv->gpu_error.flags)) if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
wait_event(dev_priv->gpu_error.reset_queue,
!test_bit(I915_RESET_BACKOFF,
&dev_priv->gpu_error.flags));
goto out; goto out;
}
i915_reset_and_wakeup(dev_priv); /* Prevent any other reset-engine attempt. */
for_each_engine(engine, dev_priv, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&dev_priv->gpu_error.flags))
wait_on_bit(&dev_priv->gpu_error.flags,
I915_RESET_ENGINE + engine->id,
TASK_UNINTERRUPTIBLE);
}
i915_reset_device(dev_priv);
for_each_engine(engine, dev_priv, tmp) {
clear_bit(I915_RESET_ENGINE + engine->id,
&dev_priv->gpu_error.flags);
}
clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
wake_up_all(&dev_priv->gpu_error.reset_queue);
out: out:
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);

View file

@ -46,7 +46,7 @@ struct i915_params i915 __read_mostly = {
.prefault_disable = 0, .prefault_disable = 0,
.load_detect_test = 0, .load_detect_test = 0,
.force_reset_modeset_test = 0, .force_reset_modeset_test = 0,
.reset = true, .reset = 2,
.error_capture = true, .error_capture = true,
.invert_brightness = 0, .invert_brightness = 0,
.disable_display = 0, .disable_display = 0,
@ -63,8 +63,9 @@ struct i915_params i915 __read_mostly = {
.huc_firmware_path = NULL, .huc_firmware_path = NULL,
.enable_dp_mst = true, .enable_dp_mst = true,
.inject_load_failure = 0, .inject_load_failure = 0,
.enable_dpcd_backlight = false, .enable_dpcd_backlight = -1,
.enable_gvt = false, .enable_gvt = false,
.enable_dbc = true,
}; };
module_param_named(modeset, i915.modeset, int, 0400); module_param_named(modeset, i915.modeset, int, 0400);
@ -115,8 +116,8 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT " "Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)"); "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
module_param_named_unsafe(reset, i915.reset, bool, 0600); module_param_named_unsafe(reset, i915.reset, int, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
module_param_named(error_capture, i915.error_capture, bool, 0600); module_param_named(error_capture, i915.error_capture, bool, 0600);
@ -246,10 +247,15 @@ MODULE_PARM_DESC(enable_dp_mst,
module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
MODULE_PARM_DESC(inject_load_failure, MODULE_PARM_DESC(inject_load_failure,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600); module_param_named_unsafe(enable_dpcd_backlight, i915.enable_dpcd_backlight, int, 0600);
MODULE_PARM_DESC(enable_dpcd_backlight, MODULE_PARM_DESC(enable_dpcd_backlight,
"Enable support for DPCD backlight control (default:false)"); "Enable support for DPCD backlight control "
"(-1:auto (default), 0:force disable, 1:force enabled if supported");
module_param_named(enable_gvt, i915.enable_gvt, bool, 0400); module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
MODULE_PARM_DESC(enable_gvt, MODULE_PARM_DESC(enable_gvt,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)"); "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
module_param_named_unsafe(enable_dbc, i915.enable_dbc, bool, 0600);
MODULE_PARM_DESC(enable_dbc,
"Enable support for dynamic backlight control (default:true)");

View file

@ -51,7 +51,9 @@
func(int, use_mmio_flip); \ func(int, use_mmio_flip); \
func(int, mmio_debug); \ func(int, mmio_debug); \
func(int, edp_vswing); \ func(int, edp_vswing); \
func(int, reset); \
func(unsigned int, inject_load_failure); \ func(unsigned int, inject_load_failure); \
func(int, enable_dpcd_backlight); \
/* leave bools at the end to not create holes */ \ /* leave bools at the end to not create holes */ \
func(bool, alpha_support); \ func(bool, alpha_support); \
func(bool, enable_cmd_parser); \ func(bool, enable_cmd_parser); \
@ -60,14 +62,13 @@
func(bool, prefault_disable); \ func(bool, prefault_disable); \
func(bool, load_detect_test); \ func(bool, load_detect_test); \
func(bool, force_reset_modeset_test); \ func(bool, force_reset_modeset_test); \
func(bool, reset); \
func(bool, error_capture); \ func(bool, error_capture); \
func(bool, disable_display); \ func(bool, disable_display); \
func(bool, verbose_state_checks); \ func(bool, verbose_state_checks); \
func(bool, nuclear_pageflip); \ func(bool, nuclear_pageflip); \
func(bool, enable_dp_mst); \ func(bool, enable_dp_mst); \
func(bool, enable_dpcd_backlight); \ func(bool, enable_gvt); \
func(bool, enable_gvt) func(bool, enable_dbc)
#define MEMBER(T, member) T member #define MEMBER(T, member) T member
struct i915_params { struct i915_params {

View file

@ -310,7 +310,8 @@ static const struct intel_device_info intel_haswell_info = {
BDW_COLORS, \ BDW_COLORS, \
.has_logical_ring_contexts = 1, \ .has_logical_ring_contexts = 1, \
.has_full_48bit_ppgtt = 1, \ .has_full_48bit_ppgtt = 1, \
.has_64bit_reloc = 1 .has_64bit_reloc = 1, \
.has_reset_engine = 1
#define BDW_PLATFORM \ #define BDW_PLATFORM \
BDW_FEATURES, \ BDW_FEATURES, \
@ -342,6 +343,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_gmch_display = 1, .has_gmch_display = 1,
.has_aliasing_ppgtt = 1, .has_aliasing_ppgtt = 1,
.has_full_ppgtt = 1, .has_full_ppgtt = 1,
.has_reset_engine = 1,
.display_mmio_offset = VLV_DISPLAY_BASE, .display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS, GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS, CURSOR_OFFSETS,
@ -387,6 +389,7 @@ static const struct intel_device_info intel_skylake_gt3_info = {
.has_aliasing_ppgtt = 1, \ .has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \ .has_full_ppgtt = 1, \
.has_full_48bit_ppgtt = 1, \ .has_full_48bit_ppgtt = 1, \
.has_reset_engine = 1, \
GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \ IVB_CURSOR_OFFSETS, \
BDW_COLORS BDW_COLORS
@ -446,6 +449,7 @@ static const struct intel_device_info intel_cannonlake_info = {
.gen = 10, .gen = 10,
.ddb_size = 1024, .ddb_size = 1024,
.has_csr = 1, .has_csr = 1,
.color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
}; };
/* /*

View file

@ -1746,7 +1746,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
goto out; goto out;
/* Update all contexts now that we've stalled the submission. */ /* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->context_list, link) { list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
struct intel_context *ce = &ctx->engine[RCS]; struct intel_context *ce = &ctx->engine[RCS];
u32 *regs; u32 *regs;
@ -2444,7 +2444,7 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
list_del(&stream->link); list_del(&stream->link);
if (stream->ctx) if (stream->ctx)
i915_gem_context_put_unlocked(stream->ctx); i915_gem_context_put(stream->ctx);
kfree(stream); kfree(stream);
} }
@ -2633,7 +2633,7 @@ err_alloc:
kfree(stream); kfree(stream);
err_ctx: err_ctx:
if (specific_ctx) if (specific_ctx)
i915_gem_context_put_unlocked(specific_ctx); i915_gem_context_put(specific_ctx);
err: err:
return ret; return ret;
} }

View file

@ -3522,7 +3522,7 @@ enum skl_disp_power_wells {
#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25) #define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2) #define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5) #define INTERVAL_0_833_US(us) (((us) * 6) / 5)
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ #define GT_INTERVAL_FROM_US(dev_priv, us) (INTEL_GEN(dev_priv) >= 9 ? \
(IS_GEN9_LP(dev_priv) ? \ (IS_GEN9_LP(dev_priv) ? \
INTERVAL_0_833_US(us) : \ INTERVAL_0_833_US(us) : \
INTERVAL_1_33_US(us)) : \ INTERVAL_1_33_US(us)) : \
@ -3531,7 +3531,7 @@ enum skl_disp_power_wells {
#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100) #define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100)
#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3) #define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6) #define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \ #define GT_PM_INTERVAL_TO_US(dev_priv, interval) (INTEL_GEN(dev_priv) >= 9 ? \
(IS_GEN9_LP(dev_priv) ? \ (IS_GEN9_LP(dev_priv) ? \
INTERVAL_0_833_TO_US(interval) : \ INTERVAL_0_833_TO_US(interval) : \
INTERVAL_1_33_TO_US(interval)) : \ INTERVAL_1_33_TO_US(interval)) : \
@ -8343,6 +8343,7 @@ enum {
#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25) #define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25) #define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10) #define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
#define DPLL_CFGCR0_DCO_FRAC_SHIFT (10)
#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10) #define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff) #define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
#define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0) #define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0)
@ -8350,6 +8351,7 @@ enum {
#define _CNL_DPLL0_CFGCR1 0x6C004 #define _CNL_DPLL0_CFGCR1 0x6C004
#define _CNL_DPLL1_CFGCR1 0x6C084 #define _CNL_DPLL1_CFGCR1 0x6C084
#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10) #define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10) #define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9) #define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
#define DPLL_CFGCR1_KDIV_MASK (7 << 6) #define DPLL_CFGCR1_KDIV_MASK (7 << 6)

View file

@ -96,7 +96,7 @@ static struct attribute *rc6_attrs[] = {
NULL NULL
}; };
static struct attribute_group rc6_attr_group = { static const struct attribute_group rc6_attr_group = {
.name = power_group_name, .name = power_group_name,
.attrs = rc6_attrs .attrs = rc6_attrs
}; };
@ -107,7 +107,7 @@ static struct attribute *rc6p_attrs[] = {
NULL NULL
}; };
static struct attribute_group rc6p_attr_group = { static const struct attribute_group rc6p_attr_group = {
.name = power_group_name, .name = power_group_name,
.attrs = rc6p_attrs .attrs = rc6p_attrs
}; };
@ -117,7 +117,7 @@ static struct attribute *media_rc6_attrs[] = {
NULL NULL
}; };
static struct attribute_group media_rc6_attr_group = { static const struct attribute_group media_rc6_attr_group = {
.name = power_group_name, .name = power_group_name,
.attrs = media_rc6_attrs .attrs = media_rc6_attrs
}; };
@ -209,7 +209,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
memcpy(*remap_info + (offset/4), buf, count); memcpy(*remap_info + (offset/4), buf, count);
/* NB: We defer the remapping until we switch to the context */ /* NB: We defer the remapping until we switch to the context */
list_for_each_entry(ctx, &dev_priv->context_list, link) list_for_each_entry(ctx, &dev_priv->contexts.list, link)
ctx->remap_slice |= (1<<slice); ctx->remap_slice |= (1<<slice);
ret = count; ret = count;
@ -253,7 +253,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else { } else {
u32 rpstat = I915_READ(GEN6_RPSTAT1); u32 rpstat = I915_READ(GEN6_RPSTAT1);
if (IS_GEN9(dev_priv)) if (INTEL_GEN(dev_priv) >= 9)
ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;

View file

@ -579,11 +579,17 @@ err_unpin:
static void i915_vma_destroy(struct i915_vma *vma) static void i915_vma_destroy(struct i915_vma *vma)
{ {
int i;
GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(i915_vma_is_active(vma)); GEM_BUG_ON(i915_vma_is_active(vma));
GEM_BUG_ON(!i915_vma_is_closed(vma)); GEM_BUG_ON(!i915_vma_is_closed(vma));
GEM_BUG_ON(vma->fence); GEM_BUG_ON(vma->fence);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
list_del(&vma->vm_link); list_del(&vma->vm_link);
if (!i915_vma_is_ggtt(vma)) if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
@ -680,9 +686,8 @@ int i915_vma_unbind(struct i915_vma *vma)
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
if (ret) if (ret)
return ret; return ret;
GEM_BUG_ON(i915_vma_is_active(vma));
} }
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) if (i915_vma_is_pinned(vma))
return -EBUSY; return -EBUSY;

View file

@ -114,6 +114,8 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(plane->dev); struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_plane_state *state = &intel_state->base; struct drm_plane_state *state = &intel_state->base;
struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_plane *intel_plane = to_intel_plane(plane);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
int ret; int ret;
/* /*
@ -173,6 +175,19 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
if (ret) if (ret)
return ret; return ret;
/*
* Y-tiling is not supported in IF-ID Interlace mode in
* GEN9 and above.
*/
if (state->fb && INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (state->fb->modifier == I915_FORMAT_MOD_Y_TILED ||
state->fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
}
}
/* FIXME pre-g4x don't work like this */ /* FIXME pre-g4x don't work like this */
if (intel_state->base.visible) if (intel_state->base.visible)
crtc_state->active_planes |= BIT(intel_plane->id); crtc_state->active_planes |= BIT(intel_plane->id);

View file

@ -1187,6 +1187,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (is_dvi) { if (is_dvi) {
info->alternate_ddc_pin = ddc_pin; info->alternate_ddc_pin = ddc_pin;
/*
* All VBTs that we got so far for B Stepping has this
* information wrong for Port D. So, let's just ignore for now.
*/
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0) &&
port == PORT_D) {
info->alternate_ddc_pin = 0;
}
sanitize_ddc_pin(dev_priv, port); sanitize_ddc_pin(dev_priv, port);
} }

View file

@ -615,7 +615,7 @@ void intel_color_init(struct drm_crtc *crtc)
IS_BROXTON(dev_priv)) { IS_BROXTON(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix; dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts; dev_priv->display.load_luts = broadwell_load_luts;
} else if (IS_GEMINILAKE(dev_priv)) { } else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix; dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = glk_load_luts; dev_priv->display.load_luts = glk_load_luts;
} else { } else {

View file

@ -1103,6 +1103,62 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
return dco_freq / (p0 * p1 * p2 * 5); return dco_freq / (p0 * p1 * p2 * 5);
} }
static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
uint32_t pll_id)
{
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
if (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
p1 = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
DPLL_CFGCR1_QDIV_RATIO_SHIFT;
else
p1 = 1;
switch (p0) {
case DPLL_CFGCR1_PDIV_2:
p0 = 2;
break;
case DPLL_CFGCR1_PDIV_3:
p0 = 3;
break;
case DPLL_CFGCR1_PDIV_5:
p0 = 5;
break;
case DPLL_CFGCR1_PDIV_7:
p0 = 7;
break;
}
switch (p2) {
case DPLL_CFGCR1_KDIV_1:
p2 = 1;
break;
case DPLL_CFGCR1_KDIV_2:
p2 = 2;
break;
case DPLL_CFGCR1_KDIV_4:
p2 = 4;
break;
}
ref_clock = dev_priv->cdclk.hw.ref;
dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
DPLL_CFGCR0_DCO_FRAC_SHIFT) * ref_clock) / 0x8000;
return dco_freq / (p0 * p1 * p2 * 5);
}
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{ {
int dotclock; int dotclock;
@ -1124,6 +1180,59 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->base.adjusted_mode.crtc_clock = dotclock; pipe_config->base.adjusted_mode.crtc_clock = dotclock;
} }
static void cnl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
uint32_t cfgcr0, pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
if (cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
} else {
link_clock = cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
switch (link_clock) {
case DPLL_CFGCR0_LINK_RATE_810:
link_clock = 81000;
break;
case DPLL_CFGCR0_LINK_RATE_1080:
link_clock = 108000;
break;
case DPLL_CFGCR0_LINK_RATE_1350:
link_clock = 135000;
break;
case DPLL_CFGCR0_LINK_RATE_1620:
link_clock = 162000;
break;
case DPLL_CFGCR0_LINK_RATE_2160:
link_clock = 216000;
break;
case DPLL_CFGCR0_LINK_RATE_2700:
link_clock = 270000;
break;
case DPLL_CFGCR0_LINK_RATE_3240:
link_clock = 324000;
break;
case DPLL_CFGCR0_LINK_RATE_4050:
link_clock = 405000;
break;
default:
WARN(1, "Unsupported link rate\n");
break;
}
link_clock *= 2;
}
pipe_config->port_clock = link_clock;
ddi_dotclock_get(pipe_config);
}
static void skl_ddi_clock_get(struct intel_encoder *encoder, static void skl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config) struct intel_crtc_state *pipe_config)
{ {
@ -1267,6 +1376,8 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
skl_ddi_clock_get(encoder, pipe_config); skl_ddi_clock_get(encoder, pipe_config);
else if (IS_GEN9_LP(dev_priv)) else if (IS_GEN9_LP(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config); bxt_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
} }
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@ -1868,9 +1979,12 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
if ((intel_dp) && (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)) { if ((intel_dp) && (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)) {
width = intel_dp->lane_count; width = intel_dp->lane_count;
rate = intel_dp->link_rate; rate = intel_dp->link_rate;
} else { } else if (type == INTEL_OUTPUT_HDMI) {
width = 4; width = 4;
/* Rate is always < than 6GHz for HDMI */ /* Rate is always < than 6GHz for HDMI */
} else {
MISSING_CASE(type);
return;
} }
/* /*

View file

@ -363,7 +363,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
*/ */
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
(dev_priv->pch_type == PCH_CPT && (HAS_PCH_CPT(dev_priv) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
DRM_INFO("Display fused off, disabling\n"); DRM_INFO("Display fused off, disabling\n");
info->num_pipes = 0; info->num_pipes = 0;

View file

@ -3311,7 +3311,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE; plane_ctl = PLANE_CTL_ENABLE;
if (!IS_GEMINILAKE(dev_priv)) { if (!IS_GEMINILAKE(dev_priv) && !IS_CANNONLAKE(dev_priv)) {
plane_ctl |= plane_ctl |=
PLANE_CTL_PIPE_GAMMA_ENABLE | PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE | PLANE_CTL_PIPE_CSC_ENABLE |
@ -3367,7 +3367,7 @@ static void skylake_update_primary_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (IS_GEMINILAKE(dev_priv)) { if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
PLANE_COLOR_PIPE_GAMMA_ENABLE | PLANE_COLOR_PIPE_GAMMA_ENABLE |
PLANE_COLOR_PIPE_CSC_ENABLE | PLANE_COLOR_PIPE_CSC_ENABLE |
@ -4612,6 +4612,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
&crtc_state->scaler_state; &crtc_state->scaler_state;
struct intel_crtc *intel_crtc = struct intel_crtc *intel_crtc =
to_intel_crtc(crtc_state->base.crtc); to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
int need_scaling; int need_scaling;
/* /*
@ -4621,6 +4624,18 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/ */
need_scaling = src_w != dst_w || src_h != dst_h; need_scaling = src_w != dst_w || src_h != dst_h;
/*
* Scaling/fitting not supported in IF-ID mode in GEN9+
* TODO: Interlace fetch mode doesn't support YUV420 planar formats.
* Once NV12 is enabled, handle it here while allocating scaler
* for NV12.
*/
if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
}
/* /*
* if plane is being disabled or scaler is no more required or force detach * if plane is being disabled or scaler is no more required or force detach
* - free scaler binded to this plane/crtc * - free scaler binded to this plane/crtc
@ -14765,6 +14780,17 @@ static void quirk_backlight_present(struct drm_device *dev)
DRM_INFO("applying backlight present quirk\n"); DRM_INFO("applying backlight present quirk\n");
} }
/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
* which is 300 ms greater than eDP spec T12 min.
*/
static void quirk_increase_t12_delay(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
DRM_INFO("Applying T12 delay quirk\n");
}
struct intel_quirk { struct intel_quirk {
int device; int device;
int subsystem_vendor; int subsystem_vendor;
@ -14848,6 +14874,9 @@ static struct intel_quirk intel_quirks[] = {
/* Dell Chromebook 11 (2015 version) */ /* Dell Chromebook 11 (2015 version) */
{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
/* Toshiba Satellite P50-C-18C */
{ 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
}; };
static void intel_init_quirks(struct drm_device *dev) static void intel_init_quirks(struct drm_device *dev)

View file

@ -4418,8 +4418,6 @@ static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
u32 bit; u32 bit;
switch (port->port) { switch (port->port) {
case PORT_A:
return true;
case PORT_B: case PORT_B:
bit = SDE_PORTB_HOTPLUG; bit = SDE_PORTB_HOTPLUG;
break; break;
@ -4443,8 +4441,6 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
u32 bit; u32 bit;
switch (port->port) { switch (port->port) {
case PORT_A:
return true;
case PORT_B: case PORT_B:
bit = SDE_PORTB_HOTPLUG_CPT; bit = SDE_PORTB_HOTPLUG_CPT;
break; break;
@ -4454,12 +4450,28 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
case PORT_D: case PORT_D:
bit = SDE_PORTD_HOTPLUG_CPT; bit = SDE_PORTD_HOTPLUG_CPT;
break; break;
default:
MISSING_CASE(port->port);
return false;
}
return I915_READ(SDEISR) & bit;
}
static bool spt_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
u32 bit;
switch (port->port) {
case PORT_A:
bit = SDE_PORTA_HOTPLUG_SPT;
break;
case PORT_E: case PORT_E:
bit = SDE_PORTE_HOTPLUG_SPT; bit = SDE_PORTE_HOTPLUG_SPT;
break; break;
default: default:
MISSING_CASE(port->port); return cpt_digital_port_connected(dev_priv, port);
return false;
} }
return I915_READ(SDEISR) & bit; return I915_READ(SDEISR) & bit;
@ -4511,6 +4523,42 @@ static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
return I915_READ(PORT_HOTPLUG_STAT) & bit; return I915_READ(PORT_HOTPLUG_STAT) & bit;
} }
static bool ilk_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
if (port->port == PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
else
return ibx_digital_port_connected(dev_priv, port);
}
static bool snb_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
if (port->port == PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
else
return cpt_digital_port_connected(dev_priv, port);
}
static bool ivb_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
if (port->port == PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
else
return cpt_digital_port_connected(dev_priv, port);
}
static bool bdw_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
if (port->port == PORT_A)
return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
else
return cpt_digital_port_connected(dev_priv, port);
}
static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *intel_dig_port) struct intel_digital_port *intel_dig_port)
{ {
@ -4547,16 +4595,25 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
bool intel_digital_port_connected(struct drm_i915_private *dev_priv, bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port) struct intel_digital_port *port)
{ {
if (HAS_PCH_IBX(dev_priv)) if (HAS_GMCH_DISPLAY(dev_priv)) {
return ibx_digital_port_connected(dev_priv, port); if (IS_GM45(dev_priv))
else if (HAS_PCH_SPLIT(dev_priv))
return cpt_digital_port_connected(dev_priv, port);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
else if (IS_GM45(dev_priv))
return gm45_digital_port_connected(dev_priv, port); return gm45_digital_port_connected(dev_priv, port);
else else
return g4x_digital_port_connected(dev_priv, port); return g4x_digital_port_connected(dev_priv, port);
}
if (IS_GEN5(dev_priv))
return ilk_digital_port_connected(dev_priv, port);
else if (IS_GEN6(dev_priv))
return snb_digital_port_connected(dev_priv, port);
else if (IS_GEN7(dev_priv))
return ivb_digital_port_connected(dev_priv, port);
else if (IS_GEN8(dev_priv))
return bdw_digital_port_connected(dev_priv, port);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
else
return spt_digital_port_connected(dev_priv, port);
} }
static struct edid * static struct edid *
@ -5121,12 +5178,8 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
PANEL_POWER_DOWN_DELAY_SHIFT; PANEL_POWER_DOWN_DELAY_SHIFT;
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) { if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
BXT_POWER_CYCLE_DELAY_SHIFT; BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
if (tmp > 0)
seq->t11_t12 = (tmp - 1) * 1000;
else
seq->t11_t12 = 0;
} else { } else {
seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
@ -5177,6 +5230,21 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
intel_pps_dump_state("cur", &cur); intel_pps_dump_state("cur", &cur);
vbt = dev_priv->vbt.edp.pps; vbt = dev_priv->vbt.edp.pps;
/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
* of 500ms appears to be too short. Ocassionally the panel
* just fails to power back on. Increasing the delay to 800ms
* seems sufficient to avoid this problem.
*/
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
vbt.t11_t12 = max_t(u16, vbt.t11_t12, 800 * 10);
DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
vbt.t11_t12);
}
/* T11_T12 delay is special and actually in units of 100ms, but zero
* based in the hw (so we need to add 100 ms). But the sw vbt
* table multiplies it with 1000 to make it in units of 100usec,
* too. */
vbt.t11_t12 += 100 * 10;
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */ * our hw here, which are all in 100usec. */
@ -5280,7 +5348,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) { if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl); pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000) pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
<< BXT_POWER_CYCLE_DELAY_SHIFT); << BXT_POWER_CYCLE_DELAY_SHIFT);
} else { } else {
pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;

View file

@ -98,13 +98,105 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
} }
} }
/*
* Set PWM Frequency divider to match desired frequency in vbt.
* The PWM Frequency is calculated as 27Mhz / (F x P).
* - Where F = PWM Frequency Pre-Divider value programmed by field 7:0 of the
* EDP_BACKLIGHT_FREQ_SET register (DPCD Address 00728h)
* - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the
* EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h)
*/
static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
u8 pn, pn_min, pn_max;
/* Find desired value of (F x P)
* Note that, if F x P is out of supported range, the maximum value or
* minimum value will applied automatically. So no need to check that.
*/
freq = dev_priv->vbt.backlight.pwm_freq_hz;
DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
if (!freq) {
DRM_DEBUG_KMS("Use panel default backlight frequency\n");
return false;
}
fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq);
/* Use highest possible value of Pn for more granularity of brightness
* adjustment while satifying the conditions below.
* - Pn is in the range of Pn_min and Pn_max
* - F is in the range of 1 and 255
* - FxP is within 25% of desired value.
* Note: 25% is arbitrary value and may need some tweak.
*/
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
return false;
}
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
return false;
}
pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
return false;
}
for (pn = pn_max; pn >= pn_min; pn--) {
f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
fxp_actual = f << pn;
if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
break;
}
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
return false;
}
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
return false;
}
return true;
}
/*
* Set minimum / maximum dynamic brightness percentage. This value is expressed
* as the percentage of normal brightness in 5% increments.
*/
static bool
intel_dp_aux_set_dynamic_backlight_percent(struct intel_dp *intel_dp,
u32 min, u32 max)
{
u8 dbc[] = { DIV_ROUND_CLOSEST(min, 5), DIV_ROUND_CLOSEST(max, 5) };
if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET,
dbc, sizeof(dbc)) < 0) {
DRM_DEBUG_KMS("Failed to write aux DBC brightness level\n");
return false;
}
return true;
}
static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state, static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state) const struct drm_connector_state *conn_state)
{ {
struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t dpcd_buf = 0; uint8_t dpcd_buf, new_dpcd_buf, edp_backlight_mode;
uint8_t edp_backlight_mode = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
@ -113,18 +205,15 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
return; return;
} }
new_dpcd_buf = dpcd_buf;
edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
switch (edp_backlight_mode) { switch (edp_backlight_mode) {
case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM: case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM:
case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET: case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET:
case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT:
dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf) < 0) {
DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
}
break; break;
/* Do nothing when it is already DPCD mode */ /* Do nothing when it is already DPCD mode */
@ -133,6 +222,25 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
break; break;
} }
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP)
if (intel_dp_aux_set_pwm_freq(connector))
new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
if (i915.enable_dbc &&
(intel_dp->edp_dpcd[2] & DP_EDP_DYNAMIC_BACKLIGHT_CAP)) {
if(intel_dp_aux_set_dynamic_backlight_percent(intel_dp, 0, 100)) {
new_dpcd_buf |= DP_EDP_DYNAMIC_BACKLIGHT_ENABLE;
DRM_DEBUG_KMS("Enable dynamic brightness.\n");
}
}
if (new_dpcd_buf != dpcd_buf) {
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) {
DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
}
}
set_aux_backlight_enable(intel_dp, true); set_aux_backlight_enable(intel_dp, true);
intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level); intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level);
} }
@ -169,15 +277,66 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
/* Check the eDP Display control capabilities registers to determine if /* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel * the panel can support backlight control over the aux channel
*/ */
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && if ((intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP) &&
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) {
!(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
return true; return true;
} }
return false; return false;
} }
/*
* Heuristic function whether we should use AUX for backlight adjustment or not.
*
* We should use AUX for backlight brightness adjustment if panel doesn't this
* via PWM pin or using AUX is better than using PWM pin.
*
* The heuristic to determine that using AUX pin is better than using PWM pin is
* that the panel support any of the feature list here.
* - Regional backlight brightness adjustment
* - Backlight PWM frequency set
* - More than 8 bits resolution of brightness level
* - Backlight enablement via AUX and not by BL_ENABLE pin
*
* If all above are not true, assume that using PWM pin is better.
*/
static bool
intel_dp_aux_display_control_heuristic(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t reg_val;
/* Panel doesn't support adjusting backlight brightness via PWN pin */
if (!(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))
return true;
/* Panel supports regional backlight brightness adjustment */
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_GENERAL_CAP_3,
&reg_val) != 1) {
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
DP_EDP_GENERAL_CAP_3);
return false;
}
if (reg_val > 0)
return true;
/* Panel supports backlight PWM frequency set */
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP)
return true;
/* Panel supports more than 8 bits resolution of brightness level */
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
return true;
/* Panel supports enabling backlight via AUX but not by BL_ENABLE pin */
if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
!(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP))
return true;
return false;
}
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{ {
struct intel_panel *panel = &intel_connector->panel; struct intel_panel *panel = &intel_connector->panel;
@ -188,6 +347,10 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
if (!intel_dp_aux_display_control_capable(intel_connector)) if (!intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV; return -ENODEV;
if (i915.enable_dpcd_backlight == -1 &&
!intel_dp_aux_display_control_heuristic(intel_connector))
return -ENODEV;
panel->backlight.setup = intel_dp_aux_setup_backlight; panel->backlight.setup = intel_dp_aux_setup_backlight;
panel->backlight.enable = intel_dp_aux_enable_backlight; panel->backlight.enable = intel_dp_aux_enable_backlight;
panel->backlight.disable = intel_dp_aux_disable_backlight; panel->backlight.disable = intel_dp_aux_disable_backlight;

View file

@ -1858,9 +1858,8 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv, void gen6_rps_boost(struct drm_i915_gem_request *rq,
struct intel_rps_client *rps, struct intel_rps_client *rps);
unsigned long submitted);
void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req); void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
void g4x_wm_get_hw_state(struct drm_device *dev); void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev); void vlv_wm_get_hw_state(struct drm_device *dev);

View file

@ -149,6 +149,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
switch (INTEL_GEN(dev_priv)) { switch (INTEL_GEN(dev_priv)) {
default: default:
MISSING_CASE(INTEL_GEN(dev_priv)); MISSING_CASE(INTEL_GEN(dev_priv));
case 10:
case 9: case 9:
return GEN9_LR_CONTEXT_RENDER_SIZE; return GEN9_LR_CONTEXT_RENDER_SIZE;
case 8: case 8:
@ -291,11 +292,9 @@ cleanup:
*/ */
int intel_engines_init(struct drm_i915_private *dev_priv) int intel_engines_init(struct drm_i915_private *dev_priv)
{ {
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id, err_id; enum intel_engine_id id, err_id;
unsigned int mask = 0; int err;
int err = 0;
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
const struct engine_class_info *class_info = const struct engine_class_info *class_info =
@ -306,41 +305,31 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
init = class_info->init_execlists; init = class_info->init_execlists;
else else
init = class_info->init_legacy; init = class_info->init_legacy;
if (!init) {
kfree(engine); err = -EINVAL;
dev_priv->engine[id] = NULL; err_id = id;
continue;
} if (GEM_WARN_ON(!init))
goto cleanup;
err = init(engine); err = init(engine);
if (err) { if (err)
err_id = id;
goto cleanup; goto cleanup;
}
GEM_BUG_ON(!engine->submit_request); GEM_BUG_ON(!engine->submit_request);
mask |= ENGINE_MASK(id);
} }
/*
* Catch failures to update intel_engines table when the new engines
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
device_info->ring_mask = mask;
device_info->num_rings = hweight32(mask);
return 0; return 0;
cleanup: cleanup:
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
if (id >= err_id) if (id >= err_id) {
kfree(engine); kfree(engine);
else dev_priv->engine[id] = NULL;
} else {
dev_priv->gt.cleanup_engine(engine); dev_priv->gt.cleanup_engine(engine);
} }
}
return err; return err;
} }
@ -1340,6 +1329,7 @@ void intel_engines_mark_idle(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
intel_engine_disarm_breadcrumbs(engine); intel_engine_disarm_breadcrumbs(engine);
i915_gem_batch_pool_fini(&engine->batch_pool); i915_gem_batch_pool_fini(&engine->batch_pool);
tasklet_kill(&engine->irq_tasklet);
engine->no_priolist = false; engine->no_priolist = false;
} }
} }

View file

@ -813,7 +813,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
{ {
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
if (ifbdev && ifbdev->vma) if (ifbdev)
drm_fb_helper_hotplug_event(&ifbdev->helper); drm_fb_helper_hotplug_event(&ifbdev->helper);
} }

View file

@ -2071,7 +2071,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
* So to avoid that we reset the context images upon resume. For * So to avoid that we reset the context images upon resume. For
* simplicity, we just zero everything out. * simplicity, we just zero everything out.
*/ */
list_for_each_entry(ctx, &dev_priv->context_list, link) { list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = &ctx->engine[engine->id];
u32 *reg; u32 *reg;

View file

@ -3837,7 +3837,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
uint_fixed_16_16_t downscale_h, downscale_w; uint_fixed_16_16_t downscale_h, downscale_w;
if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
return u32_to_fixed_16_16(0); return u32_to_fixed16(0);
/* n.b., src is 16.16 fixed point, dst is whole integer */ /* n.b., src is 16.16 fixed point, dst is whole integer */
if (plane->id == PLANE_CURSOR) { if (plane->id == PLANE_CURSOR) {
@ -3861,10 +3861,10 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
dst_h = drm_rect_height(&pstate->base.dst); dst_h = drm_rect_height(&pstate->base.dst);
} }
fp_w_ratio = fixed_16_16_div(src_w, dst_w); fp_w_ratio = div_fixed16(src_w, dst_w);
fp_h_ratio = fixed_16_16_div(src_h, dst_h); fp_h_ratio = div_fixed16(src_h, dst_h);
downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1)); downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1)); downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
return mul_fixed16(downscale_w, downscale_h); return mul_fixed16(downscale_w, downscale_h);
} }
@ -3872,7 +3872,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
static uint_fixed_16_16_t static uint_fixed_16_16_t
skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state) skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
{ {
uint_fixed_16_16_t pipe_downscale = u32_to_fixed_16_16(1); uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
if (!crtc_state->base.enable) if (!crtc_state->base.enable)
return pipe_downscale; return pipe_downscale;
@ -3891,10 +3891,10 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
if (!dst_w || !dst_h) if (!dst_w || !dst_h)
return pipe_downscale; return pipe_downscale;
fp_w_ratio = fixed_16_16_div(src_w, dst_w); fp_w_ratio = div_fixed16(src_w, dst_w);
fp_h_ratio = fixed_16_16_div(src_h, dst_h); fp_h_ratio = div_fixed16(src_h, dst_h);
downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1)); downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1)); downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
pipe_downscale = mul_fixed16(downscale_w, downscale_h); pipe_downscale = mul_fixed16(downscale_w, downscale_h);
} }
@ -3913,14 +3913,14 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
int crtc_clock, dotclk; int crtc_clock, dotclk;
uint32_t pipe_max_pixel_rate; uint32_t pipe_max_pixel_rate;
uint_fixed_16_16_t pipe_downscale; uint_fixed_16_16_t pipe_downscale;
uint_fixed_16_16_t max_downscale = u32_to_fixed_16_16(1); uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
if (!cstate->base.enable) if (!cstate->base.enable)
return 0; return 0;
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
uint_fixed_16_16_t plane_downscale; uint_fixed_16_16_t plane_downscale;
uint_fixed_16_16_t fp_9_div_8 = fixed_16_16_div(9, 8); uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
int bpp; int bpp;
if (!intel_wm_plane_visible(cstate, if (!intel_wm_plane_visible(cstate,
@ -3938,7 +3938,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
plane_downscale = mul_fixed16(plane_downscale, plane_downscale = mul_fixed16(plane_downscale,
fp_9_div_8); fp_9_div_8);
max_downscale = max_fixed_16_16(plane_downscale, max_downscale); max_downscale = max_fixed16(plane_downscale, max_downscale);
} }
pipe_downscale = skl_pipe_downscale_amount(cstate); pipe_downscale = skl_pipe_downscale_amount(cstate);
@ -4276,7 +4276,7 @@ static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp,
return FP_16_16_MAX; return FP_16_16_MAX;
wm_intermediate_val = latency * pixel_rate * cpp; wm_intermediate_val = latency * pixel_rate * cpp;
ret = fixed_16_16_div_u64(wm_intermediate_val, 1000 * 512); ret = div_fixed16(wm_intermediate_val, 1000 * 512);
return ret; return ret;
} }
@ -4294,7 +4294,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
wm_intermediate_val = latency * pixel_rate; wm_intermediate_val = latency * pixel_rate;
wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val, wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
pipe_htotal * 1000); pipe_htotal * 1000);
ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line); ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
return ret; return ret;
} }
@ -4306,15 +4306,15 @@ intel_get_linetime_us(struct intel_crtc_state *cstate)
uint_fixed_16_16_t linetime_us; uint_fixed_16_16_t linetime_us;
if (!cstate->base.active) if (!cstate->base.active)
return u32_to_fixed_16_16(0); return u32_to_fixed16(0);
pixel_rate = cstate->pixel_rate; pixel_rate = cstate->pixel_rate;
if (WARN_ON(pixel_rate == 0)) if (WARN_ON(pixel_rate == 0))
return u32_to_fixed_16_16(0); return u32_to_fixed16(0);
crtc_htotal = cstate->base.adjusted_mode.crtc_htotal; crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
linetime_us = fixed_16_16_div_u64(crtc_htotal * 1000, pixel_rate); linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
return linetime_us; return linetime_us;
} }
@ -4361,7 +4361,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t plane_bytes_per_line; uint32_t plane_bytes_per_line;
uint32_t res_blocks, res_lines; uint32_t res_blocks, res_lines;
uint8_t cpp; uint8_t cpp;
uint32_t width = 0, height = 0; uint32_t width = 0;
uint32_t plane_pixel_rate; uint32_t plane_pixel_rate;
uint_fixed_16_16_t y_tile_minimum; uint_fixed_16_16_t y_tile_minimum;
uint32_t y_min_scanlines; uint32_t y_min_scanlines;
@ -4390,7 +4390,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (plane->id == PLANE_CURSOR) { if (plane->id == PLANE_CURSOR) {
width = intel_pstate->base.crtc_w; width = intel_pstate->base.crtc_w;
height = intel_pstate->base.crtc_h;
} else { } else {
/* /*
* Src coordinates are already rotated by 270 degrees for * Src coordinates are already rotated by 270 degrees for
@ -4398,16 +4397,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
* GTT mapping), hence no need to account for rotation here. * GTT mapping), hence no need to account for rotation here.
*/ */
width = drm_rect_width(&intel_pstate->base.src) >> 16; width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
} }
cpp = fb->format->cpp[0]; cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
fb->format->cpp[0];
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
if (drm_rotation_90_or_270(pstate->rotation)) { if (drm_rotation_90_or_270(pstate->rotation)) {
int cpp = (fb->format->format == DRM_FORMAT_NV12) ?
fb->format->cpp[1] :
fb->format->cpp[0];
switch (cpp) { switch (cpp) {
case 1: case 1:
@ -4434,14 +4430,14 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (y_tiled) { if (y_tiled) {
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line * interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
y_min_scanlines, 512); y_min_scanlines, 512);
plane_blocks_per_line = fixed_16_16_div(interm_pbpl, plane_blocks_per_line = div_fixed16(interm_pbpl,
y_min_scanlines); y_min_scanlines);
} else if (x_tiled) { } else if (x_tiled) {
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512); interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl); plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
} else { } else {
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl); plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
} }
method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
@ -4450,35 +4446,35 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
latency, latency,
plane_blocks_per_line); plane_blocks_per_line);
y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines, y_tile_minimum = mul_u32_fixed16(y_min_scanlines,
plane_blocks_per_line); plane_blocks_per_line);
if (y_tiled) { if (y_tiled) {
selected_result = max_fixed_16_16(method2, y_tile_minimum); selected_result = max_fixed16(method2, y_tile_minimum);
} else { } else {
uint32_t linetime_us; uint32_t linetime_us;
linetime_us = fixed_16_16_to_u32_round_up( linetime_us = fixed16_to_u32_round_up(
intel_get_linetime_us(cstate)); intel_get_linetime_us(cstate));
if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
(plane_bytes_per_line / 512 < 1)) (plane_bytes_per_line / 512 < 1))
selected_result = method2; selected_result = method2;
else if ((ddb_allocation && ddb_allocation / else if ((ddb_allocation && ddb_allocation /
fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1) fixed16_to_u32_round_up(plane_blocks_per_line)) >= 1)
selected_result = min_fixed_16_16(method1, method2); selected_result = min_fixed16(method1, method2);
else if (latency >= linetime_us) else if (latency >= linetime_us)
selected_result = min_fixed_16_16(method1, method2); selected_result = min_fixed16(method1, method2);
else else
selected_result = method1; selected_result = method1;
} }
res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1; res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
res_lines = div_round_up_fixed16(selected_result, res_lines = div_round_up_fixed16(selected_result,
plane_blocks_per_line); plane_blocks_per_line);
if (level >= 1 && level <= 7) { if (level >= 1 && level <= 7) {
if (y_tiled) { if (y_tiled) {
res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum); res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
res_lines += y_min_scanlines; res_lines += y_min_scanlines;
} else { } else {
res_blocks++; res_blocks++;
@ -4563,8 +4559,7 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
if (is_fixed16_zero(linetime_us)) if (is_fixed16_zero(linetime_us))
return 0; return 0;
linetime_wm = fixed_16_16_to_u32_round_up(mul_u32_fixed_16_16(8, linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
linetime_us));
/* Display WA #1135: bxt. */ /* Display WA #1135: bxt. */
if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled) if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
@ -5852,7 +5847,7 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* the hw runs at the minimal clock before selecting the desired * the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not * frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */ * receive a down interrupt. */
if (IS_GEN9(dev_priv)) { if (INTEL_GEN(dev_priv) >= 9) {
limits = (dev_priv->rps.max_freq_softlimit) << 23; limits = (dev_priv->rps.max_freq_softlimit) << 23;
if (val <= dev_priv->rps.min_freq_softlimit) if (val <= dev_priv->rps.min_freq_softlimit)
limits |= (dev_priv->rps.min_freq_softlimit) << 14; limits |= (dev_priv->rps.min_freq_softlimit) << 14;
@ -5994,7 +5989,7 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
if (val != dev_priv->rps.cur_freq) { if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val); gen6_set_rps_thresholds(dev_priv, val);
if (IS_GEN9(dev_priv)) if (INTEL_GEN(dev_priv) >= 9)
I915_WRITE(GEN6_RPNSWREQ, I915_WRITE(GEN6_RPNSWREQ,
GEN9_FREQUENCY(val)); GEN9_FREQUENCY(val));
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@ -6126,47 +6121,35 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
gen6_sanitize_rps_pm_mask(dev_priv, ~0)); gen6_sanitize_rps_pm_mask(dev_priv, ~0));
} }
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
spin_lock(&dev_priv->rps.client_lock);
while (!list_empty(&dev_priv->rps.clients))
list_del_init(dev_priv->rps.clients.next);
spin_unlock(&dev_priv->rps.client_lock);
} }
void gen6_rps_boost(struct drm_i915_private *dev_priv, void gen6_rps_boost(struct drm_i915_gem_request *rq,
struct intel_rps_client *rps, struct intel_rps_client *rps)
unsigned long submitted)
{ {
struct drm_i915_private *i915 = rq->i915;
bool boost;
/* This is intentionally racy! We peek at the state here, then /* This is intentionally racy! We peek at the state here, then
* validate inside the RPS worker. * validate inside the RPS worker.
*/ */
if (!(dev_priv->gt.awake && if (!i915->rps.enabled)
dev_priv->rps.enabled &&
dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
return; return;
/* Force a RPS boost (and don't count it against the client) if boost = false;
* the GPU is severely congested. spin_lock_irq(&rq->lock);
*/ if (!rq->waitboost && !i915_gem_request_completed(rq)) {
if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES)) atomic_inc(&i915->rps.num_waiters);
rps = NULL; rq->waitboost = true;
boost = true;
spin_lock(&dev_priv->rps.client_lock);
if (rps == NULL || list_empty(&rps->link)) {
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.client_boost = true;
schedule_work(&dev_priv->rps.work);
} }
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&rq->lock);
if (!boost)
return;
if (rps != NULL) { if (READ_ONCE(i915->rps.cur_freq) < i915->rps.boost_freq)
list_add(&rps->link, &dev_priv->rps.clients); schedule_work(&i915->rps.work);
rps->boosts++;
} else atomic_inc(rps ? &rps->boosts : &i915->rps.boosts);
dev_priv->rps.boosts++;
}
spin_unlock(&dev_priv->rps.client_lock);
} }
int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
@ -6365,7 +6348,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
IS_GEN9_BC(dev_priv)) { IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
u32 ddcc_status = 0; u32 ddcc_status = 0;
if (sandybridge_pcode_read(dev_priv, if (sandybridge_pcode_read(dev_priv,
@ -6378,7 +6361,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
dev_priv->rps.max_freq); dev_priv->rps.max_freq);
} }
if (IS_GEN9_BC(dev_priv)) { if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Store the frequency values in 16.66 MHZ units, which is /* Store the frequency values in 16.66 MHZ units, which is
* the natural hardware unit for SKL * the natural hardware unit for SKL
*/ */
@ -6684,7 +6667,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
/* convert DDR frequency from units of 266.6MHz to bandwidth */ /* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3); min_ring_freq = mult_frac(min_ring_freq, 8, 3);
if (IS_GEN9_BC(dev_priv)) { if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */ /* Convert GT frequency to 50 HZ units */
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@ -6702,7 +6685,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
int diff = max_gpu_freq - gpu_freq; int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0; unsigned int ia_freq = 0, ring_freq = 0;
if (IS_GEN9_BC(dev_priv)) { if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* /*
* ring_freq = 2 * GT. ring_freq is in 100MHz units * ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL. * No floor required for ring frequency on SKL.
@ -7833,7 +7816,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
} else if (INTEL_GEN(dev_priv) >= 9) { } else if (INTEL_GEN(dev_priv) >= 9) {
gen9_enable_rc6(dev_priv); gen9_enable_rc6(dev_priv);
gen9_enable_rps(dev_priv); gen9_enable_rps(dev_priv);
if (IS_GEN9_BC(dev_priv)) if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
gen6_update_ring_freq(dev_priv); gen6_update_ring_freq(dev_priv);
} else if (IS_BROADWELL(dev_priv)) { } else if (IS_BROADWELL(dev_priv)) {
gen8_enable_rps(dev_priv); gen8_enable_rps(dev_priv);
@ -9078,7 +9061,7 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{ {
if (IS_GEN9(dev_priv)) if (INTEL_GEN(dev_priv) >= 9)
return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
GEN9_FREQ_SCALER); GEN9_FREQ_SCALER);
else if (IS_CHERRYVIEW(dev_priv)) else if (IS_CHERRYVIEW(dev_priv))
@ -9091,7 +9074,7 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{ {
if (IS_GEN9(dev_priv)) if (INTEL_GEN(dev_priv) >= 9)
return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
GT_FREQUENCY_MULTIPLIER); GT_FREQUENCY_MULTIPLIER);
else if (IS_CHERRYVIEW(dev_priv)) else if (IS_CHERRYVIEW(dev_priv))
@ -9113,7 +9096,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
struct drm_i915_gem_request *req = boost->req; struct drm_i915_gem_request *req = boost->req;
if (!i915_gem_request_completed(req)) if (!i915_gem_request_completed(req))
gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); gen6_rps_boost(req, NULL);
i915_gem_request_put(req); i915_gem_request_put(req);
kfree(boost); kfree(boost);
@ -9142,11 +9125,10 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
void intel_pm_setup(struct drm_i915_private *dev_priv) void intel_pm_setup(struct drm_i915_private *dev_priv)
{ {
mutex_init(&dev_priv->rps.hw_lock); mutex_init(&dev_priv->rps.hw_lock);
spin_lock_init(&dev_priv->rps.client_lock);
INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
__intel_autoenable_gt_powersave); __intel_autoenable_gt_powersave);
INIT_LIST_HEAD(&dev_priv->rps.clients); atomic_set(&dev_priv->rps.num_waiters, 0);
dev_priv->pm.suspended = false; dev_priv->pm.suspended = false;
atomic_set(&dev_priv->pm.wakeref_count, 0); atomic_set(&dev_priv->pm.wakeref_count, 0);

View file

@ -2140,7 +2140,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->emit_breadcrumb = gen6_sema_emit_breadcrumb; engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1; num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
if (INTEL_GEN(dev_priv) >= 8) { if (INTEL_GEN(dev_priv) >= 8) {
engine->emit_breadcrumb_sz += num_rings * 6; engine->emit_breadcrumb_sz += num_rings * 6;
} else { } else {
@ -2184,8 +2184,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
engine->semaphore.signal = gen8_rcs_signal; engine->semaphore.signal = gen8_rcs_signal;
num_rings = num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
engine->emit_breadcrumb_sz += num_rings * 8; engine->emit_breadcrumb_sz += num_rings * 8;
} }
} else if (INTEL_GEN(dev_priv) >= 6) { } else if (INTEL_GEN(dev_priv) >= 6) {

View file

@ -121,6 +121,7 @@ struct intel_engine_hangcheck {
unsigned long action_timestamp; unsigned long action_timestamp;
int deadlock; int deadlock;
struct intel_instdone instdone; struct intel_instdone instdone;
struct drm_i915_gem_request *active_request;
bool stalled; bool stalled;
}; };

View file

@ -341,6 +341,59 @@ static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
1 << PIPE_C | 1 << PIPE_B); 1 << PIPE_C | 1 << PIPE_B);
} }
static void gen9_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
int id = power_well->id;
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
WARN_ON(intel_wait_for_register(dev_priv,
HSW_PWR_WELL_DRIVER,
SKL_POWER_WELL_STATE(id),
SKL_POWER_WELL_STATE(id),
1));
}
static u32 gen9_power_well_requesters(struct drm_i915_private *dev_priv, int id)
{
u32 req_mask = SKL_POWER_WELL_REQ(id);
u32 ret;
ret = I915_READ(HSW_PWR_WELL_BIOS) & req_mask ? 1 : 0;
ret |= I915_READ(HSW_PWR_WELL_DRIVER) & req_mask ? 2 : 0;
ret |= I915_READ(HSW_PWR_WELL_KVMR) & req_mask ? 4 : 0;
ret |= I915_READ(HSW_PWR_WELL_DEBUG) & req_mask ? 8 : 0;
return ret;
}
static void gen9_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
int id = power_well->id;
bool disabled;
u32 reqs;
/*
* Bspec doesn't require waiting for PWs to get disabled, but still do
* this for paranoia. The known cases where a PW will be forced on:
* - a KVMR request on any power well via the KVMR request register
* - a DMC request on PW1 and MISC_IO power wells via the BIOS and
* DEBUG request registers
* Skip the wait in case any of the request bits are set and print a
* diagnostic message.
*/
wait_for((disabled = !(I915_READ(HSW_PWR_WELL_DRIVER) &
SKL_POWER_WELL_STATE(id))) ||
(reqs = gen9_power_well_requesters(dev_priv, id)), 1);
if (disabled)
return;
DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
power_well->name,
!!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
}
static void hsw_set_power_well(struct drm_i915_private *dev_priv, static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable) struct i915_power_well *power_well, bool enable)
{ {
@ -549,7 +602,9 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
"DC9 already programmed to be enabled.\n"); "DC9 already programmed to be enabled.\n");
WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
"DC5 still not disabled to enable DC9.\n"); "DC5 still not disabled to enable DC9.\n");
WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n"); WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER) &
SKL_POWER_WELL_REQ(SKL_DISP_PW_2),
"Power well 2 on.\n");
WARN_ONCE(intel_irqs_enabled(dev_priv), WARN_ONCE(intel_irqs_enabled(dev_priv),
"Interrupts not disabled yet.\n"); "Interrupts not disabled yet.\n");
@ -744,45 +799,6 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
} }
static void
gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum skl_disp_power_wells power_well_id = power_well->id;
u32 val;
u32 mask;
mask = SKL_POWER_WELL_REQ(power_well_id);
val = I915_READ(HSW_PWR_WELL_KVMR);
if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
power_well->name))
I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
val = I915_READ(HSW_PWR_WELL_BIOS);
val |= I915_READ(HSW_PWR_WELL_DEBUG);
if (!(val & mask))
return;
/*
* DMC is known to force on the request bits for power well 1 on SKL
* and BXT and the misc IO power well on SKL but we don't expect any
* other request bits to be set, so WARN for those.
*/
if (power_well_id == SKL_DISP_PW_1 ||
(IS_GEN9_BC(dev_priv) &&
power_well_id == SKL_DISP_PW_MISC_IO))
DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
"by DMC\n", power_well->name);
else
WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
power_well->name);
I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
}
static void skl_set_power_well(struct drm_i915_private *dev_priv, static void skl_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable) struct i915_power_well *power_well, bool enable)
{ {
@ -846,6 +862,8 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Enabling %s\n", power_well->name); DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
check_fuse_status = true; check_fuse_status = true;
} }
gen9_wait_for_power_well_enable(dev_priv, power_well);
} else { } else {
if (enable_requested) { if (enable_requested) {
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
@ -853,14 +871,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Disabling %s\n", power_well->name); DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
} }
gen9_sanitize_power_well_requests(dev_priv, power_well); gen9_wait_for_power_well_disable(dev_priv, power_well);
} }
if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
1))
DRM_ERROR("%s %s timeout\n",
power_well->name, enable ? "enable" : "disable");
if (check_fuse_status) { if (check_fuse_status) {
if (power_well->id == SKL_DISP_PW_1) { if (power_well->id == SKL_DISP_PW_1) {
if (intel_wait_for_register(dev_priv, if (intel_wait_for_register(dev_priv,
@ -2479,7 +2492,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc; int requested_dc;
int max_dc; int max_dc;
if (IS_GEN9_BC(dev_priv)) { if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
max_dc = 2; max_dc = 2;
mask = 0; mask = 0;
} else if (IS_GEN9_LP(dev_priv)) { } else if (IS_GEN9_LP(dev_priv)) {
@ -2694,13 +2707,18 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
mutex_lock(&power_domains->lock); mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); /*
intel_power_well_disable(dev_priv, well); * BSpec says to keep the MISC IO power well enabled here, only
* remove our request for power well 1.
* Note that even though the driver's request is removed power well 1
* may stay enabled after this due to DMC's own request on it.
*/
well = lookup_power_well(dev_priv, SKL_DISP_PW_1); well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well); intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock); mutex_unlock(&power_domains->lock);
usleep_range(10, 30); /* 10 us delay per Bspec */
} }
void bxt_display_core_init(struct drm_i915_private *dev_priv, void bxt_display_core_init(struct drm_i915_private *dev_priv,
@ -2751,13 +2769,19 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
/* The spec doesn't call for removing the reset handshake flag */ /* The spec doesn't call for removing the reset handshake flag */
/* Disable PG1 */ /*
* Disable PW1 (PG1).
* Note that even though the driver's request is removed power well 1
* may stay enabled after this due to DMC's own request on it.
*/
mutex_lock(&power_domains->lock); mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1); well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well); intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock); mutex_unlock(&power_domains->lock);
usleep_range(10, 30); /* 10 us delay per Bspec */
} }
#define CNL_PROCMON_IDX(val) \ #define CNL_PROCMON_IDX(val) \
@ -2821,7 +2845,10 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
val |= CL_POWER_DOWN_ENABLE; val |= CL_POWER_DOWN_ENABLE;
I915_WRITE(CNL_PORT_CL1CM_DW5, val); I915_WRITE(CNL_PORT_CL1CM_DW5, val);
/* 4. Enable Power Well 1 (PG1) and Aux IO Power */ /*
* 4. Enable Power Well 1 (PG1).
* The AUX IO power wells will be enabled on demand.
*/
mutex_lock(&power_domains->lock); mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1); well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_enable(dev_priv, well); intel_power_well_enable(dev_priv, well);
@ -2853,12 +2880,18 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 3. Disable CD clock */ /* 3. Disable CD clock */
cnl_uninit_cdclk(dev_priv); cnl_uninit_cdclk(dev_priv);
/* 4. Disable Power Well 1 (PG1) and Aux IO Power */ /*
* 4. Disable Power Well 1 (PG1).
* The AUX IO power wells are toggled on demand, so they are already
* disabled at this point.
*/
mutex_lock(&power_domains->lock); mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1); well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well); intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock); mutex_unlock(&power_domains->lock);
usleep_range(10, 30); /* 10 us delay per Bspec */
/* 5. Disable Comp */ /* 5. Disable Comp */
val = I915_READ(CHICKEN_MISC_2); val = I915_READ(CHICKEN_MISC_2);
val |= COMP_PWR_DOWN; val |= COMP_PWR_DOWN;

View file

@ -1343,7 +1343,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
} }
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CPT) if (HAS_PCH_CPT(dev_priv))
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe); sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else else
sdvox |= SDVO_PIPE_SEL(crtc->pipe); sdvox |= SDVO_PIPE_SEL(crtc->pipe);

View file

@ -262,7 +262,7 @@ skl_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (IS_GEMINILAKE(dev_priv)) { if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
PLANE_COLOR_PIPE_GAMMA_ENABLE | PLANE_COLOR_PIPE_GAMMA_ENABLE |
PLANE_COLOR_PIPE_CSC_ENABLE | PLANE_COLOR_PIPE_CSC_ENABLE |

View file

@ -643,7 +643,7 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
{ .start = (s), .end = (e), .domains = (d) } { .start = (s), .end = (e), .domains = (d) }
#define HAS_FWTABLE(dev_priv) \ #define HAS_FWTABLE(dev_priv) \
(IS_GEN9(dev_priv) || \ (INTEL_GEN(dev_priv) >= 9 || \
IS_CHERRYVIEW(dev_priv) || \ IS_CHERRYVIEW(dev_priv) || \
IS_VALLEYVIEW(dev_priv)) IS_VALLEYVIEW(dev_priv))
@ -1072,7 +1072,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
} }
if (IS_GEN9(dev_priv)) { if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get; dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put; dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@ -1719,6 +1719,17 @@ bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
return intel_get_gpu_reset(dev_priv) != NULL; return intel_get_gpu_reset(dev_priv) != NULL;
} }
/*
* When GuC submission is enabled, GuC manages ELSP and can initiate the
* engine reset too. For now, fall back to full GPU reset if it is enabled.
*/
bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
{
return (dev_priv->info.has_reset_engine &&
!dev_priv->guc.execbuf_client &&
i915.reset >= 2);
}
int intel_guc_reset(struct drm_i915_private *dev_priv) int intel_guc_reset(struct drm_i915_private *dev_priv)
{ {
int ret; int ret;

View file

@ -197,6 +197,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
{ {
I915_RND_STATE(seed_prng); I915_RND_STATE(seed_prng);
unsigned int size; unsigned int size;
struct i915_vma mock_vma;
memset(&mock_vma, 0, sizeof(struct i915_vma));
/* Keep creating larger objects until one cannot fit into the hole */ /* Keep creating larger objects until one cannot fit into the hole */
for (size = 12; (hole_end - hole_start) >> size; size++) { for (size = 12; (hole_end - hole_start) >> size; size++) {
@ -255,8 +258,11 @@ static int lowlevel_hole(struct drm_i915_private *i915,
vm->allocate_va_range(vm, addr, BIT_ULL(size))) vm->allocate_va_range(vm, addr, BIT_ULL(size)))
break; break;
vm->insert_entries(vm, obj->mm.pages, addr, mock_vma.pages = obj->mm.pages;
I915_CACHE_NONE, 0); mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
} }
count = n; count = n;

View file

@ -186,16 +186,20 @@ static int igt_vma_create(void *arg)
goto end; goto end;
} }
list_for_each_entry_safe(ctx, cn, &contexts, link) list_for_each_entry_safe(ctx, cn, &contexts, link) {
list_del_init(&ctx->link);
mock_context_close(ctx); mock_context_close(ctx);
} }
}
end: end:
/* Final pass to lookup all created contexts */ /* Final pass to lookup all created contexts */
err = create_vmas(i915, &objects, &contexts); err = create_vmas(i915, &objects, &contexts);
out: out:
list_for_each_entry_safe(ctx, cn, &contexts, link) list_for_each_entry_safe(ctx, cn, &contexts, link) {
list_del_init(&ctx->link);
mock_context_close(ctx); mock_context_close(ctx);
}
list_for_each_entry_safe(obj, on, &objects, st_link) list_for_each_entry_safe(obj, on, &objects, st_link)
i915_gem_object_put(obj); i915_gem_object_put(obj);

View file

@ -316,6 +316,56 @@ static int igt_global_reset(void *arg)
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
return err;
}
static int igt_reset_engine(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int reset_count, reset_engine_count;
int err = 0;
/* Check that we can issue a global GPU and engine reset */
if (!intel_has_reset_engine(i915))
return 0;
for_each_engine(engine, i915, id) {
set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
reset_count = i915_reset_count(&i915->gpu_error);
reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
engine);
err = i915_reset_engine(engine);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
}
if (i915_reset_count(&i915->gpu_error) != reset_count) {
pr_err("Full GPU reset recorded! (engine reset expected)\n");
err = -EINVAL;
break;
}
if (i915_reset_engine_count(&i915->gpu_error, engine) ==
reset_engine_count) {
pr_err("No %s engine reset recorded!\n", engine->name);
err = -EINVAL;
break;
}
clear_bit(I915_RESET_ENGINE + engine->id,
&i915->gpu_error.flags);
}
if (i915_terminally_wedged(&i915->gpu_error)) if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO; err = -EIO;
@ -404,6 +454,7 @@ fini:
unlock: unlock:
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
if (i915_terminally_wedged(&i915->gpu_error)) if (i915_terminally_wedged(&i915->gpu_error))
return -EIO; return -EIO;
@ -519,6 +570,7 @@ fini:
unlock: unlock:
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
if (i915_terminally_wedged(&i915->gpu_error)) if (i915_terminally_wedged(&i915->gpu_error))
return -EIO; return -EIO;
@ -526,13 +578,120 @@ unlock:
return err; return err;
} }
static int igt_render_engine_reset_fallback(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine = i915->engine[RCS];
struct hang h;
struct drm_i915_gem_request *rq;
unsigned int reset_count, reset_engine_count;
int err = 0;
/* Check that we can issue a global GPU and engine reset */
if (!intel_has_reset_engine(i915))
return 0;
set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
if (err)
goto err_unlock;
rq = hang_create_request(&h, engine, i915->kernel_context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_fini;
}
i915_gem_request_get(rq);
__i915_add_request(rq, true);
/* make reset engine fail */
rq->fence.error = -EIO;
if (!wait_for_hang(&h, rq)) {
pr_err("Failed to start request %x\n", rq->fence.seqno);
err = -EIO;
goto err_request;
}
reset_engine_count = i915_reset_engine_count(&i915->gpu_error, engine);
reset_count = fake_hangcheck(rq);
/* unlock since we'll call handle_error */
mutex_unlock(&i915->drm.struct_mutex);
clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
i915_handle_error(i915, intel_engine_flag(engine), "live test");
if (i915_reset_engine_count(&i915->gpu_error, engine) !=
reset_engine_count) {
pr_err("render engine reset recorded! (full reset expected)\n");
err = -EINVAL;
goto out_rq;
}
if (i915_reset_count(&i915->gpu_error) == reset_count) {
pr_err("No full GPU reset recorded!\n");
err = -EINVAL;
goto out_rq;
}
/*
* by using fence.error = -EIO, full reset sets the wedged flag, do one
* more full reset to re-enable the hw.
*/
if (i915_terminally_wedged(&i915->gpu_error)) {
set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
rq->fence.error = 0;
mutex_lock(&i915->drm.struct_mutex);
set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
i915_reset(i915);
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF,
&i915->gpu_error.flags));
mutex_unlock(&i915->drm.struct_mutex);
if (i915_reset_count(&i915->gpu_error) == reset_count) {
pr_err("No full GPU reset recorded!\n");
err = -EINVAL;
goto out_rq;
}
}
out_rq:
i915_gem_request_put(rq);
hang_fini(&h);
out_backoff:
clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
return err;
err_request:
i915_gem_request_put(rq);
err_fini:
hang_fini(&h);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
goto out_backoff;
}
int intel_hangcheck_live_selftests(struct drm_i915_private *i915) int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
{ {
static const struct i915_subtest tests[] = { static const struct i915_subtest tests[] = {
SUBTEST(igt_hang_sanitycheck), SUBTEST(igt_hang_sanitycheck),
SUBTEST(igt_global_reset), SUBTEST(igt_global_reset),
SUBTEST(igt_reset_engine),
SUBTEST(igt_wait_reset), SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue), SUBTEST(igt_reset_queue),
SUBTEST(igt_render_engine_reset_fallback),
}; };
if (!intel_has_gpu_reset(i915)) if (!intel_has_gpu_reset(i915))

View file

@ -48,7 +48,7 @@ mock_context(struct drm_i915_private *i915,
if (!ctx->vma_lut.ht) if (!ctx->vma_lut.ht)
goto err_free; goto err_free;
ret = ida_simple_get(&i915->context_hw_ida, ret = ida_simple_get(&i915->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL); 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto err_vma_ht; goto err_vma_ht;
@ -86,3 +86,12 @@ void mock_context_close(struct i915_gem_context *ctx)
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
} }
void mock_init_contexts(struct drm_i915_private *i915)
{
INIT_LIST_HEAD(&i915->contexts.list);
ida_init(&i915->contexts.hw_ida);
INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
init_llist_head(&i915->contexts.free_list);
}

View file

@ -25,6 +25,8 @@
#ifndef __MOCK_CONTEXT_H #ifndef __MOCK_CONTEXT_H
#define __MOCK_CONTEXT_H #define __MOCK_CONTEXT_H
void mock_init_contexts(struct drm_i915_private *i915);
struct i915_gem_context * struct i915_gem_context *
mock_context(struct drm_i915_private *i915, mock_context(struct drm_i915_private *i915,
const char *name); const char *name);

View file

@ -57,11 +57,12 @@ static void mock_device_release(struct drm_device *dev)
cancel_delayed_work_sync(&i915->gt.retire_work); cancel_delayed_work_sync(&i915->gt.retire_work);
cancel_delayed_work_sync(&i915->gt.idle_work); cancel_delayed_work_sync(&i915->gt.idle_work);
flush_workqueue(i915->wq);
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id) for_each_engine(engine, i915, id)
mock_engine_free(engine); mock_engine_free(engine);
i915_gem_context_fini(i915); i915_gem_contexts_fini(i915);
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
drain_workqueue(i915->wq); drain_workqueue(i915->wq);
@ -160,7 +161,7 @@ struct drm_i915_private *mock_gem_device(void)
INIT_LIST_HEAD(&i915->mm.unbound_list); INIT_LIST_HEAD(&i915->mm.unbound_list);
INIT_LIST_HEAD(&i915->mm.bound_list); INIT_LIST_HEAD(&i915->mm.bound_list);
ida_init(&i915->context_hw_ida); mock_init_contexts(i915);
INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler); INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler); INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);

View file

@ -33,8 +33,7 @@ static void mock_insert_page(struct i915_address_space *vm,
} }
static void mock_insert_entries(struct i915_address_space *vm, static void mock_insert_entries(struct i915_address_space *vm,
struct sg_table *st, struct i915_vma *vma,
u64 start,
enum i915_cache_level level, u32 flags) enum i915_cache_level level, u32 flags)
{ {
} }