mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 07:12:09 +00:00
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Ingo Molnar: "The main changes in this cycle were: - rework hrtimer expiry calculation in hrtimer_interrupt(): the previous code had a subtle bug where expiry caching would miss an expiry, resulting in occasional bogus (late) expiry of hrtimers. - continuing Y2038 fixes - ktime division optimization - misc smaller fixes and cleanups" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: hrtimer: Make __hrtimer_get_next_event() static rtc: Convert rtc_set_ntp_time() to use timespec64 rtc: Remove redundant rtc_valid_tm() from rtc_hctosys() rtc: Modify rtc_hctosys() to address y2038 issues rtc: Update rtc-dev to use y2038-safe time interfaces rtc: Update interface.c to use y2038-safe time interfaces time: Expose get_monotonic_boottime64 for in-kernel use time: Expose getboottime64 for in-kernel uses ktime: Optimize ktime_divns for constant divisors hrtimer: Prevent stale expiry time in hrtimer_interrupt() ktime.h: Introduce ktime_ms_delta
This commit is contained in:
commit
0ba97bc4b4
11 changed files with 120 additions and 104 deletions
|
@ -266,7 +266,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
|
|||
/*
|
||||
* Divide a ktime value by a nanosecond value
|
||||
*/
|
||||
u64 ktime_divns(const ktime_t kt, s64 div)
|
||||
u64 __ktime_divns(const ktime_t kt, s64 div)
|
||||
{
|
||||
u64 dclc;
|
||||
int sft = 0;
|
||||
|
@ -282,7 +282,7 @@ u64 ktime_divns(const ktime_t kt, s64 div)
|
|||
|
||||
return dclc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ktime_divns);
|
||||
EXPORT_SYMBOL_GPL(__ktime_divns);
|
||||
#endif /* BITS_PER_LONG >= 64 */
|
||||
|
||||
/*
|
||||
|
@ -440,6 +440,37 @@ static inline void debug_deactivate(struct hrtimer *timer)
|
|||
trace_hrtimer_cancel(timer);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
|
||||
static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
|
||||
{
|
||||
struct hrtimer_clock_base *base = cpu_base->clock_base;
|
||||
ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
|
||||
int i;
|
||||
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
|
||||
struct timerqueue_node *next;
|
||||
struct hrtimer *timer;
|
||||
|
||||
next = timerqueue_getnext(&base->active);
|
||||
if (!next)
|
||||
continue;
|
||||
|
||||
timer = container_of(next, struct hrtimer, node);
|
||||
expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
|
||||
if (expires.tv64 < expires_next.tv64)
|
||||
expires_next = expires;
|
||||
}
|
||||
/*
|
||||
* clock_was_set() might have changed base->offset of any of
|
||||
* the clock bases so the result might be negative. Fix it up
|
||||
* to prevent a false positive in clockevents_program_event().
|
||||
*/
|
||||
if (expires_next.tv64 < 0)
|
||||
expires_next.tv64 = 0;
|
||||
return expires_next;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* High resolution timer related functions */
|
||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
|
||||
|
@ -488,32 +519,7 @@ static inline int hrtimer_hres_active(void)
|
|||
static void
|
||||
hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
|
||||
{
|
||||
int i;
|
||||
struct hrtimer_clock_base *base = cpu_base->clock_base;
|
||||
ktime_t expires, expires_next;
|
||||
|
||||
expires_next.tv64 = KTIME_MAX;
|
||||
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
|
||||
struct hrtimer *timer;
|
||||
struct timerqueue_node *next;
|
||||
|
||||
next = timerqueue_getnext(&base->active);
|
||||
if (!next)
|
||||
continue;
|
||||
timer = container_of(next, struct hrtimer, node);
|
||||
|
||||
expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
|
||||
/*
|
||||
* clock_was_set() has changed base->offset so the
|
||||
* result might be negative. Fix it up to prevent a
|
||||
* false positive in clockevents_program_event()
|
||||
*/
|
||||
if (expires.tv64 < 0)
|
||||
expires.tv64 = 0;
|
||||
if (expires.tv64 < expires_next.tv64)
|
||||
expires_next = expires;
|
||||
}
|
||||
ktime_t expires_next = __hrtimer_get_next_event(cpu_base);
|
||||
|
||||
if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
|
||||
return;
|
||||
|
@ -586,6 +592,15 @@ static int hrtimer_reprogram(struct hrtimer *timer,
|
|||
if (expires.tv64 >= cpu_base->expires_next.tv64)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* When the target cpu of the timer is currently executing
|
||||
* hrtimer_interrupt(), then we do not touch the clock event
|
||||
* device. hrtimer_interrupt() will reevaluate all clock bases
|
||||
* before reprogramming the device.
|
||||
*/
|
||||
if (cpu_base->in_hrtirq)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If a hang was detected in the last timer interrupt then we
|
||||
* do not schedule a timer which is earlier than the expiry
|
||||
|
@ -1104,29 +1119,14 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
|
|||
ktime_t hrtimer_get_next_event(void)
|
||||
{
|
||||
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
|
||||
struct hrtimer_clock_base *base = cpu_base->clock_base;
|
||||
ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
|
||||
ktime_t mindelta = { .tv64 = KTIME_MAX };
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_base->lock, flags);
|
||||
|
||||
if (!hrtimer_hres_active()) {
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
|
||||
struct hrtimer *timer;
|
||||
struct timerqueue_node *next;
|
||||
|
||||
next = timerqueue_getnext(&base->active);
|
||||
if (!next)
|
||||
continue;
|
||||
|
||||
timer = container_of(next, struct hrtimer, node);
|
||||
delta.tv64 = hrtimer_get_expires_tv64(timer);
|
||||
delta = ktime_sub(delta, base->get_time());
|
||||
if (delta.tv64 < mindelta.tv64)
|
||||
mindelta.tv64 = delta.tv64;
|
||||
}
|
||||
}
|
||||
if (!hrtimer_hres_active())
|
||||
mindelta = ktime_sub(__hrtimer_get_next_event(cpu_base),
|
||||
ktime_get());
|
||||
|
||||
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
|
||||
|
||||
|
@ -1253,7 +1253,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
|||
raw_spin_lock(&cpu_base->lock);
|
||||
entry_time = now = hrtimer_update_base(cpu_base);
|
||||
retry:
|
||||
expires_next.tv64 = KTIME_MAX;
|
||||
cpu_base->in_hrtirq = 1;
|
||||
/*
|
||||
* We set expires_next to KTIME_MAX here with cpu_base->lock
|
||||
* held to prevent that a timer is enqueued in our queue via
|
||||
|
@ -1291,28 +1291,20 @@ retry:
|
|||
* are right-of a not yet expired timer, because that
|
||||
* timer will have to trigger a wakeup anyway.
|
||||
*/
|
||||
|
||||
if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
|
||||
ktime_t expires;
|
||||
|
||||
expires = ktime_sub(hrtimer_get_expires(timer),
|
||||
base->offset);
|
||||
if (expires.tv64 < 0)
|
||||
expires.tv64 = KTIME_MAX;
|
||||
if (expires.tv64 < expires_next.tv64)
|
||||
expires_next = expires;
|
||||
if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
|
||||
break;
|
||||
}
|
||||
|
||||
__run_hrtimer(timer, &basenow);
|
||||
}
|
||||
}
|
||||
|
||||
/* Reevaluate the clock bases for the next expiry */
|
||||
expires_next = __hrtimer_get_next_event(cpu_base);
|
||||
/*
|
||||
* Store the new expiry value so the migration code can verify
|
||||
* against it.
|
||||
*/
|
||||
cpu_base->expires_next = expires_next;
|
||||
cpu_base->in_hrtirq = 0;
|
||||
raw_spin_unlock(&cpu_base->lock);
|
||||
|
||||
/* Reprogramming necessary ? */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue