mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-24 23:52:40 +00:00
Merge tag 'drm-intel-next-fixes-2019-11-14' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
- PMU "Frequency" is reported as accumulated cycles - Avoid OOPS in dumb_create IOCTL when no CRTCs - Mitigation for userptr put_pages deadlock with trylock_page - Fix to avoid freeing heartbeat request too early - Fix LRC coherency issue - Fix Bugzilla #112212: Avoid screen corruption on MST - Error path fix to unlock context on failed context VM SETPARAM - Always consider holding preemption a privileged op in perf/OA - Preload LUTs if the hw isn't currently using them to avoid color flash on VLV/CHV - Protect context while grabbing its name for the request - Don't resize aliasing ppGTT size - Smaller fixes picked by tooling Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191114085213.GA6440@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
commit
2d0720f5a4
15 changed files with 185 additions and 83 deletions
|
@ -200,6 +200,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
|
|||
crtc_state->update_wm_pre = false;
|
||||
crtc_state->update_wm_post = false;
|
||||
crtc_state->fifo_changed = false;
|
||||
crtc_state->preload_luts = false;
|
||||
crtc_state->wm.need_postvbl_update = false;
|
||||
crtc_state->fb_bits = 0;
|
||||
crtc_state->update_planes = 0;
|
||||
|
|
|
@ -1022,6 +1022,55 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state)
|
|||
dev_priv->display.color_commit(crtc_state);
|
||||
}
|
||||
|
||||
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
|
||||
struct intel_atomic_state *state =
|
||||
to_intel_atomic_state(new_crtc_state->base.state);
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
|
||||
return !old_crtc_state->base.gamma_lut &&
|
||||
!old_crtc_state->base.degamma_lut;
|
||||
}
|
||||
|
||||
static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
|
||||
struct intel_atomic_state *state =
|
||||
to_intel_atomic_state(new_crtc_state->base.state);
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
|
||||
/*
|
||||
* CGM_PIPE_MODE is itself single buffered. We'd have to
|
||||
* somehow split it out from chv_load_luts() if we wanted
|
||||
* the ability to preload the CGM LUTs/CSC without tearing.
|
||||
*/
|
||||
if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode)
|
||||
return false;
|
||||
|
||||
return !old_crtc_state->base.gamma_lut;
|
||||
}
|
||||
|
||||
static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
|
||||
struct intel_atomic_state *state =
|
||||
to_intel_atomic_state(new_crtc_state->base.state);
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
|
||||
/*
|
||||
* The hardware degamma is active whenever the pipe
|
||||
* CSC is active. Thus even if the old state has no
|
||||
* software degamma we need to avoid clobbering the
|
||||
* linear hardware degamma mid scanout.
|
||||
*/
|
||||
return !old_crtc_state->csc_enable &&
|
||||
!old_crtc_state->base.gamma_lut;
|
||||
}
|
||||
|
||||
int intel_color_check(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
|
||||
|
@ -1165,6 +1214,8 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1217,6 +1268,8 @@ static int chv_color_check(struct intel_crtc_state *crtc_state)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state->preload_luts = chv_can_preload_luts(crtc_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1271,6 +1324,8 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1328,6 +1383,8 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1366,6 +1423,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state->preload_luts = glk_can_preload_luts(crtc_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1415,6 +1474,8 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
|
|||
|
||||
crtc_state->csc_mode = icl_csc_mode(crtc_state);
|
||||
|
||||
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1794,10 +1794,8 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
|
|||
* of Color Encoding Format and Content Color Gamut] while sending
|
||||
* YCBCR 420, HDR BT.2020 signals we should program MSA MISC1 fields
|
||||
* which indicate VSC SDP for the Pixel Encoding/Colorimetry Format.
|
||||
*
|
||||
* FIXME MST doesn't pass in the conn_state
|
||||
*/
|
||||
if (conn_state && intel_dp_needs_vsc_sdp(crtc_state, conn_state))
|
||||
if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
|
||||
temp |= DP_MSA_MISC_COLOR_VSC_SDP;
|
||||
|
||||
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
|
||||
|
@ -3605,7 +3603,11 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
|||
else
|
||||
hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
|
||||
|
||||
intel_ddi_set_dp_msa(crtc_state, conn_state);
|
||||
/* MST will call a setting of MSA after an allocating of Virtual Channel
|
||||
* from MST encoder pre_enable callback.
|
||||
*/
|
||||
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
|
||||
intel_ddi_set_dp_msa(crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
|
||||
|
|
|
@ -66,6 +66,7 @@
|
|||
#include "intel_cdclk.h"
|
||||
#include "intel_color.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp_link_training.h"
|
||||
#include "intel_fbc.h"
|
||||
#include "intel_fbdev.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
|
@ -2528,6 +2529,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
|
|||
* the highest stride limits of them all.
|
||||
*/
|
||||
crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
|
||||
if (!crtc)
|
||||
return 0;
|
||||
|
||||
plane = to_intel_plane(crtc->base.primary);
|
||||
|
||||
return plane->max_stride(plane, pixel_format, modifier,
|
||||
|
@ -14201,6 +14205,11 @@ static void intel_update_crtc(struct intel_crtc *crtc,
|
|||
/* vblanks work again, re-enable pipe CRC. */
|
||||
intel_crtc_enable_pipe_crc(crtc);
|
||||
} else {
|
||||
if (new_crtc_state->preload_luts &&
|
||||
(new_crtc_state->base.color_mgmt_changed ||
|
||||
new_crtc_state->update_pipe))
|
||||
intel_color_load_luts(new_crtc_state);
|
||||
|
||||
intel_pre_plane_update(old_crtc_state, new_crtc_state);
|
||||
|
||||
if (new_crtc_state->update_pipe)
|
||||
|
@ -14713,6 +14722,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
|||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
if (new_crtc_state->base.active &&
|
||||
!needs_modeset(new_crtc_state) &&
|
||||
!new_crtc_state->preload_luts &&
|
||||
(new_crtc_state->base.color_mgmt_changed ||
|
||||
new_crtc_state->update_pipe))
|
||||
intel_color_load_luts(new_crtc_state);
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
|
||||
#include <drm/drm_util.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "intel_dp_link_training.h"
|
||||
|
||||
enum link_m_n_set;
|
||||
struct dpll;
|
||||
|
|
|
@ -775,6 +775,7 @@ struct intel_crtc_state {
|
|||
bool disable_cxsr;
|
||||
bool update_wm_pre, update_wm_post; /* watermarks are updated */
|
||||
bool fifo_changed; /* FIFO split is changed */
|
||||
bool preload_luts;
|
||||
|
||||
/* Pipe source size (ie. panel fitter input size)
|
||||
* All planes will be positioned inside this space,
|
||||
|
|
|
@ -331,6 +331,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
|
|||
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
|
||||
|
||||
intel_ddi_enable_pipe_clock(pipe_config);
|
||||
|
||||
intel_ddi_set_dp_msa(pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void intel_mst_enable_dp(struct intel_encoder *encoder,
|
||||
|
|
|
@ -1141,7 +1141,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
|
|||
|
||||
if (i915_gem_context_is_closed(ctx)) {
|
||||
err = -ENOENT;
|
||||
goto out;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (vm == rcu_access_pointer(ctx->vm))
|
||||
|
|
|
@ -646,8 +646,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
|
|||
obj->mm.dirty = false;
|
||||
|
||||
for_each_sgt_page(page, sgt_iter, pages) {
|
||||
if (obj->mm.dirty)
|
||||
if (obj->mm.dirty && trylock_page(page)) {
|
||||
/*
|
||||
* As this may not be anonymous memory (e.g. shmem)
|
||||
* but exist on a real mapping, we have to lock
|
||||
* the page in order to dirty it -- holding
|
||||
* the page reference is not sufficient to
|
||||
* prevent the inode from being truncated.
|
||||
* Play safe and take the lock.
|
||||
*
|
||||
* However...!
|
||||
*
|
||||
* The mmu-notifier can be invalidated for a
|
||||
* migrate_page, that is alreadying holding the lock
|
||||
* on the page. Such a try_to_unmap() will result
|
||||
* in us calling put_pages() and so recursively try
|
||||
* to lock the page. We avoid that deadlock with
|
||||
* a trylock_page() and in exchange we risk missing
|
||||
* some page dirtying.
|
||||
*/
|
||||
set_page_dirty(page);
|
||||
unlock_page(page);
|
||||
}
|
||||
|
||||
mark_page_accessed(page);
|
||||
put_page(page);
|
||||
|
|
|
@ -1372,6 +1372,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||
}
|
||||
|
||||
execlists_active_lock_bh(execlists);
|
||||
rcu_read_lock();
|
||||
for (port = execlists->active; (rq = *port); port++) {
|
||||
char hdr[80];
|
||||
int len;
|
||||
|
@ -1409,6 +1410,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||
if (tl)
|
||||
intel_timeline_put(tl);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
execlists_active_unlock_bh(execlists);
|
||||
} else if (INTEL_GEN(dev_priv) > 6) {
|
||||
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
|
||||
|
|
|
@ -141,8 +141,8 @@ void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
|
|||
|
||||
void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
|
||||
{
|
||||
cancel_delayed_work(&engine->heartbeat.work);
|
||||
i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
|
||||
if (cancel_delayed_work(&engine->heartbeat.work))
|
||||
i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
|
||||
}
|
||||
|
||||
void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
|
||||
|
|
|
@ -990,63 +990,6 @@ static void intel_engine_context_out(struct intel_engine_cs *engine)
|
|||
write_sequnlock_irqrestore(&engine->stats.lock, flags);
|
||||
}
|
||||
|
||||
static inline struct intel_engine_cs *
|
||||
__execlists_schedule_in(struct i915_request *rq)
|
||||
{
|
||||
struct intel_engine_cs * const engine = rq->engine;
|
||||
struct intel_context * const ce = rq->hw_context;
|
||||
|
||||
intel_context_get(ce);
|
||||
|
||||
if (ce->tag) {
|
||||
/* Use a fixed tag for OA and friends */
|
||||
ce->lrc_desc |= (u64)ce->tag << 32;
|
||||
} else {
|
||||
/* We don't need a strict matching tag, just different values */
|
||||
ce->lrc_desc &= ~GENMASK_ULL(47, 37);
|
||||
ce->lrc_desc |=
|
||||
(u64)(engine->context_tag++ % NUM_CONTEXT_TAG) <<
|
||||
GEN11_SW_CTX_ID_SHIFT;
|
||||
BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
|
||||
}
|
||||
|
||||
intel_gt_pm_get(engine->gt);
|
||||
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
|
||||
intel_engine_context_in(engine);
|
||||
|
||||
return engine;
|
||||
}
|
||||
|
||||
static inline struct i915_request *
|
||||
execlists_schedule_in(struct i915_request *rq, int idx)
|
||||
{
|
||||
struct intel_context * const ce = rq->hw_context;
|
||||
struct intel_engine_cs *old;
|
||||
|
||||
GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
|
||||
trace_i915_request_in(rq, idx);
|
||||
|
||||
old = READ_ONCE(ce->inflight);
|
||||
do {
|
||||
if (!old) {
|
||||
WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq));
|
||||
break;
|
||||
}
|
||||
} while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old)));
|
||||
|
||||
GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
|
||||
return i915_request_get(rq);
|
||||
}
|
||||
|
||||
static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
|
||||
{
|
||||
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
|
||||
struct i915_request *next = READ_ONCE(ve->request);
|
||||
|
||||
if (next && next->execution_mask & ~rq->execution_mask)
|
||||
tasklet_schedule(&ve->base.execlists.tasklet);
|
||||
}
|
||||
|
||||
static void restore_default_state(struct intel_context *ce,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
|
@ -1100,19 +1043,82 @@ static void reset_active(struct i915_request *rq,
|
|||
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
|
||||
}
|
||||
|
||||
static inline struct intel_engine_cs *
|
||||
__execlists_schedule_in(struct i915_request *rq)
|
||||
{
|
||||
struct intel_engine_cs * const engine = rq->engine;
|
||||
struct intel_context * const ce = rq->hw_context;
|
||||
|
||||
intel_context_get(ce);
|
||||
|
||||
if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
|
||||
reset_active(rq, engine);
|
||||
|
||||
if (ce->tag) {
|
||||
/* Use a fixed tag for OA and friends */
|
||||
ce->lrc_desc |= (u64)ce->tag << 32;
|
||||
} else {
|
||||
/* We don't need a strict matching tag, just different values */
|
||||
ce->lrc_desc &= ~GENMASK_ULL(47, 37);
|
||||
ce->lrc_desc |=
|
||||
(u64)(engine->context_tag++ % NUM_CONTEXT_TAG) <<
|
||||
GEN11_SW_CTX_ID_SHIFT;
|
||||
BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
|
||||
}
|
||||
|
||||
intel_gt_pm_get(engine->gt);
|
||||
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
|
||||
intel_engine_context_in(engine);
|
||||
|
||||
return engine;
|
||||
}
|
||||
|
||||
static inline struct i915_request *
|
||||
execlists_schedule_in(struct i915_request *rq, int idx)
|
||||
{
|
||||
struct intel_context * const ce = rq->hw_context;
|
||||
struct intel_engine_cs *old;
|
||||
|
||||
GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
|
||||
trace_i915_request_in(rq, idx);
|
||||
|
||||
old = READ_ONCE(ce->inflight);
|
||||
do {
|
||||
if (!old) {
|
||||
WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq));
|
||||
break;
|
||||
}
|
||||
} while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old)));
|
||||
|
||||
GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
|
||||
return i915_request_get(rq);
|
||||
}
|
||||
|
||||
static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
|
||||
{
|
||||
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
|
||||
struct i915_request *next = READ_ONCE(ve->request);
|
||||
|
||||
if (next && next->execution_mask & ~rq->execution_mask)
|
||||
tasklet_schedule(&ve->base.execlists.tasklet);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__execlists_schedule_out(struct i915_request *rq,
|
||||
struct intel_engine_cs * const engine)
|
||||
{
|
||||
struct intel_context * const ce = rq->hw_context;
|
||||
|
||||
/*
|
||||
* NB process_csb() is not under the engine->active.lock and hence
|
||||
* schedule_out can race with schedule_in meaning that we should
|
||||
* refrain from doing non-trivial work here.
|
||||
*/
|
||||
|
||||
intel_engine_context_out(engine);
|
||||
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
|
||||
intel_gt_pm_put(engine->gt);
|
||||
|
||||
if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
|
||||
reset_active(rq, engine);
|
||||
|
||||
/*
|
||||
* If this is part of a virtual engine, its next request may
|
||||
* have been blocked waiting for access to the active context.
|
||||
|
|
|
@ -2609,8 +2609,6 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
|
|||
GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
|
||||
ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
|
||||
|
||||
ppgtt->vm.total = ggtt->vm.total;
|
||||
|
||||
return 0;
|
||||
|
||||
err_ppgtt:
|
||||
|
|
|
@ -3307,15 +3307,6 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
|
|||
}
|
||||
}
|
||||
|
||||
if (props->hold_preemption) {
|
||||
if (!props->single_context) {
|
||||
DRM_DEBUG("preemption disable with no context\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
privileged_op = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* On Haswell the OA unit supports clock gating off for a specific
|
||||
* context and in this mode there's no visibility of metrics for the
|
||||
|
@ -3335,12 +3326,21 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
|
|||
* doesn't request global stream access (i.e. query based sampling
|
||||
* using MI_RECORD_PERF_COUNT.
|
||||
*/
|
||||
if (IS_HASWELL(perf->i915) && specific_ctx && !props->hold_preemption)
|
||||
if (IS_HASWELL(perf->i915) && specific_ctx)
|
||||
privileged_op = false;
|
||||
else if (IS_GEN(perf->i915, 12) && specific_ctx &&
|
||||
(props->sample_flags & SAMPLE_OA_REPORT) == 0)
|
||||
privileged_op = false;
|
||||
|
||||
if (props->hold_preemption) {
|
||||
if (!props->single_context) {
|
||||
DRM_DEBUG("preemption disable with no context\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
privileged_op = true;
|
||||
}
|
||||
|
||||
/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
|
||||
* we check a dev.i915.perf_stream_paranoid sysctl option
|
||||
* to determine if it's ok to access system wide OA counters
|
||||
|
|
|
@ -878,8 +878,8 @@ create_event_attributes(struct i915_pmu *pmu)
|
|||
const char *name;
|
||||
const char *unit;
|
||||
} events[] = {
|
||||
__event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
|
||||
__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
|
||||
__event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
|
||||
__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
|
||||
__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
|
||||
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue