mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-21 22:21:21 +00:00
perf: Optimize throttling code
By pre-computing the maximum number of samples per tick we can avoid a multiplication and a conditional since MAX_INTERRUPTS > max_samples_per_tick. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
4979d2729a
commit
163ec4354a
3 changed files with 29 additions and 20 deletions
|
@ -1110,6 +1110,10 @@ extern int sysctl_perf_event_paranoid;
|
||||||
extern int sysctl_perf_event_mlock;
|
extern int sysctl_perf_event_mlock;
|
||||||
extern int sysctl_perf_event_sample_rate;
|
extern int sysctl_perf_event_sample_rate;
|
||||||
|
|
||||||
|
extern int perf_proc_update_handler(struct ctl_table *table, int write,
|
||||||
|
void __user *buffer, size_t *lenp,
|
||||||
|
loff_t *ppos);
|
||||||
|
|
||||||
static inline bool perf_paranoid_tracepoint_raw(void)
|
static inline bool perf_paranoid_tracepoint_raw(void)
|
||||||
{
|
{
|
||||||
return sysctl_perf_event_paranoid > -1;
|
return sysctl_perf_event_paranoid > -1;
|
||||||
|
|
|
@ -150,7 +150,24 @@ int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
|
||||||
/*
|
/*
|
||||||
* max perf event sample rate
|
* max perf event sample rate
|
||||||
*/
|
*/
|
||||||
int sysctl_perf_event_sample_rate __read_mostly = 100000;
|
#define DEFAULT_MAX_SAMPLE_RATE 100000
|
||||||
|
int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
|
||||||
|
static int max_samples_per_tick __read_mostly =
|
||||||
|
DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
|
||||||
|
|
||||||
|
int perf_proc_update_handler(struct ctl_table *table, int write,
|
||||||
|
void __user *buffer, size_t *lenp,
|
||||||
|
loff_t *ppos)
|
||||||
|
{
|
||||||
|
int ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
||||||
|
|
||||||
|
if (ret || !write)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static atomic64_t perf_event_id;
|
static atomic64_t perf_event_id;
|
||||||
|
|
||||||
|
@ -4941,26 +4958,14 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
|
||||||
if (unlikely(!is_sampling_event(event)))
|
if (unlikely(!is_sampling_event(event)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!throttle) {
|
if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
|
||||||
hwc->interrupts++;
|
if (throttle) {
|
||||||
} else {
|
hwc->interrupts = MAX_INTERRUPTS;
|
||||||
if (hwc->interrupts != MAX_INTERRUPTS) {
|
perf_log_throttle(event, 0);
|
||||||
hwc->interrupts++;
|
|
||||||
if (HZ * hwc->interrupts >
|
|
||||||
(u64)sysctl_perf_event_sample_rate) {
|
|
||||||
hwc->interrupts = MAX_INTERRUPTS;
|
|
||||||
perf_log_throttle(event, 0);
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* Keep re-disabling events even though on the previous
|
|
||||||
* pass we disabled it - just in case we raced with a
|
|
||||||
* sched-in and the event got enabled again:
|
|
||||||
*/
|
|
||||||
ret = 1;
|
ret = 1;
|
||||||
}
|
}
|
||||||
}
|
} else
|
||||||
|
hwc->interrupts++;
|
||||||
|
|
||||||
if (event->attr.freq) {
|
if (event->attr.freq) {
|
||||||
u64 now = perf_clock();
|
u64 now = perf_clock();
|
||||||
|
|
|
@ -948,7 +948,7 @@ static struct ctl_table kern_table[] = {
|
||||||
.data = &sysctl_perf_event_sample_rate,
|
.data = &sysctl_perf_event_sample_rate,
|
||||||
.maxlen = sizeof(sysctl_perf_event_sample_rate),
|
.maxlen = sizeof(sysctl_perf_event_sample_rate),
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = perf_proc_update_handler,
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_KMEMCHECK
|
#ifdef CONFIG_KMEMCHECK
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue