mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-04 05:11:46 +00:00
- Make sure the destroy callback is reset when a event initialization fails
- Update the event constraints for Icelake - Make sure the active time of an event is updated even for inactive events -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmFZfZoACgkQEsHwGGHe VUqonA//SskDtPRTrbHoc1pDZEBOoYmuYZpsVIXMTcjxt8DkdGZcmgSRzJ53QdGT aOv+Oc7qIsOa7qcRPaUxlyjnqbWEynOmeHaIN2PtKebOKsXsvdf0ehrrOYjgoD5g 1ENCisvV6TyyPtQPDv9QkYkGScZXg5/D8VyrlV5GsRj+MslWsfVWaJvUCHQtJcnz YPbcOFw1MHCBtUuJXtXuzA2JDp0Lfr6aX5Zl6hXqP8k69Y+Ob0bjA3jopo29l5rU lp/y4MQcFHuEAbQ7AdVeKg3Yi/RnSEI0w3HT1xDeKyXlMGp+Xz5rDBg1I+WREDri u7QuT9Mx6XSzGHGeg/Y5XbMN+M5WCQUxH5gJMLiduG0YYULWhq/j4f5tA0U0cQ80 qVKRH8luh+0R+RLZtqno9pNg0pSdB47X8lqXOX0/DyAA7f6xPm17VBvyDeuF75AL vYuhdtu0vB5Iwsccj+h5cXGsaMWCzcriGABWZL/1JZFwRT8kRadiybJkv0n6R77F qDqUMu6cLM9kDhTeggDffoQZoxLiEL6pfeWFuWcaWdVXa8veX4lZZNnPFbqNhnJm zRm758r3jdHie1BAgFXym6Gt5EhkBhtofbUBaw3T2SdxFTs0csKzF5tzCXIvIyz2 LH3Is3ce0pw+A72barhbLua2IJ2h895DGE529sQBiaSibB53tYk= =TQvw -----END PGP SIGNATURE----- Merge tag 'perf_urgent_for_v5.15_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull perf fixes from Borislav Petkov: - Make sure the destroy callback is reset when a event initialization fails - Update the event constraints for Icelake - Make sure the active time of an event is updated even for inactive events * tag 'perf_urgent_for_v5.15_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/core: fix userpage->time_enabled of inactive events perf/x86/intel: Update event constraints for ICX perf/x86: Reset destroy callback on event init failure
This commit is contained in:
commit
3a399a2bc4
4 changed files with 35 additions and 5 deletions
|
@ -2465,6 +2465,7 @@ static int x86_pmu_event_init(struct perf_event *event)
|
||||||
if (err) {
|
if (err) {
|
||||||
if (event->destroy)
|
if (event->destroy)
|
||||||
event->destroy(event);
|
event->destroy(event);
|
||||||
|
event->destroy = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (READ_ONCE(x86_pmu.attr_rdpmc) &&
|
if (READ_ONCE(x86_pmu.attr_rdpmc) &&
|
||||||
|
|
|
@ -263,6 +263,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
|
||||||
INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
|
INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
|
||||||
INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
|
INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
|
||||||
INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
|
INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
|
||||||
|
INTEL_EVENT_CONSTRAINT(0xef, 0xf),
|
||||||
INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
|
INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
|
||||||
EVENT_CONSTRAINT_END
|
EVENT_CONSTRAINT_END
|
||||||
};
|
};
|
||||||
|
|
|
@ -683,7 +683,9 @@ struct perf_event {
|
||||||
/*
|
/*
|
||||||
* timestamp shadows the actual context timing but it can
|
* timestamp shadows the actual context timing but it can
|
||||||
* be safely used in NMI interrupt context. It reflects the
|
* be safely used in NMI interrupt context. It reflects the
|
||||||
* context time as it was when the event was last scheduled in.
|
* context time as it was when the event was last scheduled in,
|
||||||
|
* or when ctx_sched_in failed to schedule the event because we
|
||||||
|
* run out of PMC.
|
||||||
*
|
*
|
||||||
* ctx_time already accounts for ctx->timestamp. Therefore to
|
* ctx_time already accounts for ctx->timestamp. Therefore to
|
||||||
* compute ctx_time for a sample, simply add perf_clock().
|
* compute ctx_time for a sample, simply add perf_clock().
|
||||||
|
|
|
@ -3707,6 +3707,29 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool event_update_userpage(struct perf_event *event)
|
||||||
|
{
|
||||||
|
if (likely(!atomic_read(&event->mmap_count)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
perf_event_update_time(event);
|
||||||
|
perf_set_shadow_time(event, event->ctx);
|
||||||
|
perf_event_update_userpage(event);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void group_update_userpage(struct perf_event *group_event)
|
||||||
|
{
|
||||||
|
struct perf_event *event;
|
||||||
|
|
||||||
|
if (!event_update_userpage(group_event))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for_each_sibling_event(event, group_event)
|
||||||
|
event_update_userpage(event);
|
||||||
|
}
|
||||||
|
|
||||||
static int merge_sched_in(struct perf_event *event, void *data)
|
static int merge_sched_in(struct perf_event *event, void *data)
|
||||||
{
|
{
|
||||||
struct perf_event_context *ctx = event->ctx;
|
struct perf_event_context *ctx = event->ctx;
|
||||||
|
@ -3725,14 +3748,15 @@ static int merge_sched_in(struct perf_event *event, void *data)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (event->state == PERF_EVENT_STATE_INACTIVE) {
|
if (event->state == PERF_EVENT_STATE_INACTIVE) {
|
||||||
|
*can_add_hw = 0;
|
||||||
if (event->attr.pinned) {
|
if (event->attr.pinned) {
|
||||||
perf_cgroup_event_disable(event, ctx);
|
perf_cgroup_event_disable(event, ctx);
|
||||||
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
|
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
|
||||||
|
} else {
|
||||||
|
ctx->rotate_necessary = 1;
|
||||||
|
perf_mux_hrtimer_restart(cpuctx);
|
||||||
|
group_update_userpage(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
*can_add_hw = 0;
|
|
||||||
ctx->rotate_necessary = 1;
|
|
||||||
perf_mux_hrtimer_restart(cpuctx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -6324,6 +6348,8 @@ accounting:
|
||||||
|
|
||||||
ring_buffer_attach(event, rb);
|
ring_buffer_attach(event, rb);
|
||||||
|
|
||||||
|
perf_event_update_time(event);
|
||||||
|
perf_set_shadow_time(event, event->ctx);
|
||||||
perf_event_init_userpage(event);
|
perf_event_init_userpage(event);
|
||||||
perf_event_update_userpage(event);
|
perf_event_update_userpage(event);
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue