mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-24 23:52:40 +00:00
arm_pmu: Clean up maximum period handling
Each PMU defines their max_period of the counter as the maximum value that can be counted. Since all the PMU backends support 32bit counters by default, let us remove the redundant field. No functional changes. Cc: Will Deacon <will.deacon@arm.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Julien Thierry <julien.thierry@arm.com> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
64b2f02571
commit
8d3e994241
6 changed files with 12 additions and 11 deletions
|
@ -28,6 +28,11 @@
|
|||
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
|
||||
static DEFINE_PER_CPU(int, cpu_irq);
|
||||
|
||||
static inline u64 arm_pmu_max_period(void)
|
||||
{
|
||||
return (1ULL << 32) - 1;
|
||||
}
|
||||
|
||||
static int
|
||||
armpmu_map_cache_event(const unsigned (*cache_map)
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
|
@ -114,8 +119,10 @@ int armpmu_event_set_period(struct perf_event *event)
|
|||
struct hw_perf_event *hwc = &event->hw;
|
||||
s64 left = local64_read(&hwc->period_left);
|
||||
s64 period = hwc->sample_period;
|
||||
u64 max_period;
|
||||
int ret = 0;
|
||||
|
||||
max_period = arm_pmu_max_period();
|
||||
if (unlikely(left <= -period)) {
|
||||
left = period;
|
||||
local64_set(&hwc->period_left, left);
|
||||
|
@ -136,8 +143,8 @@ int armpmu_event_set_period(struct perf_event *event)
|
|||
* effect we are reducing max_period to account for
|
||||
* interrupt latency (and we are being very conservative).
|
||||
*/
|
||||
if (left > (armpmu->max_period >> 1))
|
||||
left = armpmu->max_period >> 1;
|
||||
if (left > (max_period >> 1))
|
||||
left = (max_period >> 1);
|
||||
|
||||
local64_set(&hwc->prev_count, (u64)-left);
|
||||
|
||||
|
@ -153,6 +160,7 @@ u64 armpmu_event_update(struct perf_event *event)
|
|||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
u64 delta, prev_raw_count, new_raw_count;
|
||||
u64 max_period = arm_pmu_max_period();
|
||||
|
||||
again:
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
|
@ -162,7 +170,7 @@ again:
|
|||
new_raw_count) != prev_raw_count)
|
||||
goto again;
|
||||
|
||||
delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
|
||||
delta = (new_raw_count - prev_raw_count) & max_period;
|
||||
|
||||
local64_add(delta, &event->count);
|
||||
local64_sub(delta, &hwc->period_left);
|
||||
|
@ -402,7 +410,7 @@ __hw_perf_event_init(struct perf_event *event)
|
|||
* is far less likely to overtake the previous one unless
|
||||
* you have some serious IRQ latency issues.
|
||||
*/
|
||||
hwc->sample_period = armpmu->max_period >> 1;
|
||||
hwc->sample_period = arm_pmu_max_period() >> 1;
|
||||
hwc->last_period = hwc->sample_period;
|
||||
local64_set(&hwc->period_left, hwc->sample_period);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue