mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 22:51:37 +00:00
Merge back earlier cpuidle material for v4.10.
This commit is contained in:
commit
4e28ec3d5f
13 changed files with 143 additions and 81 deletions
|
@ -3380,6 +3380,7 @@ M: Daniel Lezcano <daniel.lezcano@linaro.org>
|
||||||
L: linux-pm@vger.kernel.org
|
L: linux-pm@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
|
||||||
|
B: https://bugzilla.kernel.org
|
||||||
F: drivers/cpuidle/*
|
F: drivers/cpuidle/*
|
||||||
F: include/linux/cpuidle.h
|
F: include/linux/cpuidle.h
|
||||||
|
|
||||||
|
@ -6289,9 +6290,11 @@ S: Maintained
|
||||||
F: drivers/platform/x86/intel-vbtn.c
|
F: drivers/platform/x86/intel-vbtn.c
|
||||||
|
|
||||||
INTEL IDLE DRIVER
|
INTEL IDLE DRIVER
|
||||||
|
M: Jacob Pan <jacob.jun.pan@linux.intel.com>
|
||||||
M: Len Brown <lenb@kernel.org>
|
M: Len Brown <lenb@kernel.org>
|
||||||
L: linux-pm@vger.kernel.org
|
L: linux-pm@vger.kernel.org
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
|
||||||
|
B: https://bugzilla.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/idle/intel_idle.c
|
F: drivers/idle/intel_idle.c
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
|
|
||||||
#define POWERNV_THRESHOLD_LATENCY_NS 200000
|
#define POWERNV_THRESHOLD_LATENCY_NS 200000
|
||||||
|
|
||||||
struct cpuidle_driver powernv_idle_driver = {
|
static struct cpuidle_driver powernv_idle_driver = {
|
||||||
.name = "powernv_idle",
|
.name = "powernv_idle",
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
|
@ -97,7 +97,17 @@ static int find_deepest_state(struct cpuidle_driver *drv,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SUSPEND
|
/* Set the current cpu to use the deepest idle state, override governors */
|
||||||
|
void cpuidle_use_deepest_state(bool enable)
|
||||||
|
{
|
||||||
|
struct cpuidle_device *dev;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
dev = cpuidle_get_device();
|
||||||
|
dev->use_deepest_state = enable;
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpuidle_find_deepest_state - Find the deepest available idle state.
|
* cpuidle_find_deepest_state - Find the deepest available idle state.
|
||||||
* @drv: cpuidle driver for the given CPU.
|
* @drv: cpuidle driver for the given CPU.
|
||||||
|
@ -109,6 +119,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||||
return find_deepest_state(drv, dev, UINT_MAX, 0, false);
|
return find_deepest_state(drv, dev, UINT_MAX, 0, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SUSPEND
|
||||||
static void enter_freeze_proper(struct cpuidle_driver *drv,
|
static void enter_freeze_proper(struct cpuidle_driver *drv,
|
||||||
struct cpuidle_device *dev, int index)
|
struct cpuidle_device *dev, int index)
|
||||||
{
|
{
|
||||||
|
|
|
@ -38,6 +38,12 @@ static int init_state_node(struct cpuidle_state *idle_state,
|
||||||
* state enter function.
|
* state enter function.
|
||||||
*/
|
*/
|
||||||
idle_state->enter = match_id->data;
|
idle_state->enter = match_id->data;
|
||||||
|
/*
|
||||||
|
* Since this is not a "coupled" state, it's safe to assume interrupts
|
||||||
|
* won't be enabled when it exits allowing the tick to be frozen
|
||||||
|
* safely. So enter() can be also enter_freeze() callback.
|
||||||
|
*/
|
||||||
|
idle_state->enter_freeze = match_id->data;
|
||||||
|
|
||||||
err = of_property_read_u32(state_node, "wakeup-latency-us",
|
err = of_property_read_u32(state_node, "wakeup-latency-us",
|
||||||
&idle_state->exit_latency);
|
&idle_state->exit_latency);
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/cpuidle.h>
|
#include <linux/cpuidle.h>
|
||||||
|
|
||||||
#include "cpuidle.h"
|
#include "cpuidle.h"
|
||||||
|
@ -53,14 +52,11 @@ int cpuidle_switch_governor(struct cpuidle_governor *gov)
|
||||||
if (cpuidle_curr_governor) {
|
if (cpuidle_curr_governor) {
|
||||||
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
|
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
|
||||||
cpuidle_disable_device(dev);
|
cpuidle_disable_device(dev);
|
||||||
module_put(cpuidle_curr_governor->owner);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cpuidle_curr_governor = gov;
|
cpuidle_curr_governor = gov;
|
||||||
|
|
||||||
if (gov) {
|
if (gov) {
|
||||||
if (!try_module_get(cpuidle_curr_governor->owner))
|
|
||||||
return -EINVAL;
|
|
||||||
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
|
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
|
||||||
cpuidle_enable_device(dev);
|
cpuidle_enable_device(dev);
|
||||||
cpuidle_install_idle_handler();
|
cpuidle_install_idle_handler();
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/cpuidle.h>
|
#include <linux/cpuidle.h>
|
||||||
#include <linux/pm_qos.h>
|
#include <linux/pm_qos.h>
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/jiffies.h>
|
#include <linux/jiffies.h>
|
||||||
#include <linux/tick.h>
|
#include <linux/tick.h>
|
||||||
|
|
||||||
|
@ -177,7 +176,6 @@ static struct cpuidle_governor ladder_governor = {
|
||||||
.enable = ladder_enable_device,
|
.enable = ladder_enable_device,
|
||||||
.select = ladder_select_state,
|
.select = ladder_select_state,
|
||||||
.reflect = ladder_reflect,
|
.reflect = ladder_reflect,
|
||||||
.owner = THIS_MODULE,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
#include <linux/tick.h>
|
#include <linux/tick.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/math64.h>
|
#include <linux/math64.h>
|
||||||
#include <linux/module.h>
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Please note when changing the tuning values:
|
* Please note when changing the tuning values:
|
||||||
|
@ -484,7 +483,6 @@ static struct cpuidle_governor menu_governor = {
|
||||||
.enable = menu_enable_device,
|
.enable = menu_enable_device,
|
||||||
.select = menu_select,
|
.select = menu_select,
|
||||||
.reflect = menu_reflect,
|
.reflect = menu_reflect,
|
||||||
.owner = THIS_MODULE,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -245,6 +245,8 @@ void arch_cpu_idle_dead(void);
|
||||||
int cpu_report_state(int cpu);
|
int cpu_report_state(int cpu);
|
||||||
int cpu_check_up_prepare(int cpu);
|
int cpu_check_up_prepare(int cpu);
|
||||||
void cpu_set_state_online(int cpu);
|
void cpu_set_state_online(int cpu);
|
||||||
|
void play_idle(unsigned long duration_ms);
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
bool cpu_wait_death(unsigned int cpu, int seconds);
|
bool cpu_wait_death(unsigned int cpu, int seconds);
|
||||||
bool cpu_report_death(void);
|
bool cpu_report_death(void);
|
||||||
|
|
|
@ -74,6 +74,7 @@ struct cpuidle_driver_kobj;
|
||||||
struct cpuidle_device {
|
struct cpuidle_device {
|
||||||
unsigned int registered:1;
|
unsigned int registered:1;
|
||||||
unsigned int enabled:1;
|
unsigned int enabled:1;
|
||||||
|
unsigned int use_deepest_state:1;
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
|
||||||
int last_residency;
|
int last_residency;
|
||||||
|
@ -192,11 +193,12 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
|
||||||
static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
|
static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
|
#ifdef CONFIG_CPU_IDLE
|
||||||
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||||
struct cpuidle_device *dev);
|
struct cpuidle_device *dev);
|
||||||
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
|
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
|
||||||
struct cpuidle_device *dev);
|
struct cpuidle_device *dev);
|
||||||
|
extern void cpuidle_use_deepest_state(bool enable);
|
||||||
#else
|
#else
|
||||||
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||||
struct cpuidle_device *dev)
|
struct cpuidle_device *dev)
|
||||||
|
@ -204,6 +206,9 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||||
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
|
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
|
||||||
struct cpuidle_device *dev)
|
struct cpuidle_device *dev)
|
||||||
{return -ENODEV; }
|
{return -ENODEV; }
|
||||||
|
static inline void cpuidle_use_deepest_state(bool enable)
|
||||||
|
{
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* kernel/sched/idle.c */
|
/* kernel/sched/idle.c */
|
||||||
|
@ -235,8 +240,6 @@ struct cpuidle_governor {
|
||||||
int (*select) (struct cpuidle_driver *drv,
|
int (*select) (struct cpuidle_driver *drv,
|
||||||
struct cpuidle_device *dev);
|
struct cpuidle_device *dev);
|
||||||
void (*reflect) (struct cpuidle_device *dev, int index);
|
void (*reflect) (struct cpuidle_device *dev, int index);
|
||||||
|
|
||||||
struct module *owner;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_IDLE
|
#ifdef CONFIG_CPU_IDLE
|
||||||
|
|
|
@ -2254,6 +2254,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
|
||||||
/*
|
/*
|
||||||
* Per process flags
|
* Per process flags
|
||||||
*/
|
*/
|
||||||
|
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
|
||||||
#define PF_EXITING 0x00000004 /* getting shut down */
|
#define PF_EXITING 0x00000004 /* getting shut down */
|
||||||
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
|
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
|
||||||
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
|
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
|
||||||
|
@ -2611,7 +2612,7 @@ extern struct task_struct *idle_task(int cpu);
|
||||||
*/
|
*/
|
||||||
static inline bool is_idle_task(const struct task_struct *p)
|
static inline bool is_idle_task(const struct task_struct *p)
|
||||||
{
|
{
|
||||||
return p->pid == 0;
|
return !!(p->flags & PF_IDLE);
|
||||||
}
|
}
|
||||||
extern struct task_struct *curr_task(int cpu);
|
extern struct task_struct *curr_task(int cpu);
|
||||||
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
|
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
|
||||||
|
|
|
@ -1540,7 +1540,7 @@ static __latent_entropy struct task_struct *copy_process(
|
||||||
goto bad_fork_cleanup_count;
|
goto bad_fork_cleanup_count;
|
||||||
|
|
||||||
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
|
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
|
||||||
p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
|
p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
|
||||||
p->flags |= PF_FORKNOEXEC;
|
p->flags |= PF_FORKNOEXEC;
|
||||||
INIT_LIST_HEAD(&p->children);
|
INIT_LIST_HEAD(&p->children);
|
||||||
INIT_LIST_HEAD(&p->sibling);
|
INIT_LIST_HEAD(&p->sibling);
|
||||||
|
|
|
@ -5279,6 +5279,7 @@ void init_idle(struct task_struct *idle, int cpu)
|
||||||
__sched_fork(0, idle);
|
__sched_fork(0, idle);
|
||||||
idle->state = TASK_RUNNING;
|
idle->state = TASK_RUNNING;
|
||||||
idle->se.exec_start = sched_clock();
|
idle->se.exec_start = sched_clock();
|
||||||
|
idle->flags |= PF_IDLE;
|
||||||
|
|
||||||
kasan_unpoison_task_stack(idle);
|
kasan_unpoison_task_stack(idle);
|
||||||
|
|
||||||
|
|
|
@ -164,11 +164,14 @@ static void cpuidle_idle_call(void)
|
||||||
* timekeeping to prevent timer interrupts from kicking us out of idle
|
* timekeeping to prevent timer interrupts from kicking us out of idle
|
||||||
* until a proper wakeup interrupt happens.
|
* until a proper wakeup interrupt happens.
|
||||||
*/
|
*/
|
||||||
if (idle_should_freeze()) {
|
|
||||||
entered_state = cpuidle_enter_freeze(drv, dev);
|
if (idle_should_freeze() || dev->use_deepest_state) {
|
||||||
if (entered_state > 0) {
|
if (idle_should_freeze()) {
|
||||||
local_irq_enable();
|
entered_state = cpuidle_enter_freeze(drv, dev);
|
||||||
goto exit_idle;
|
if (entered_state > 0) {
|
||||||
|
local_irq_enable();
|
||||||
|
goto exit_idle;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
next_state = cpuidle_find_deepest_state(drv, dev);
|
next_state = cpuidle_find_deepest_state(drv, dev);
|
||||||
|
@ -202,76 +205,65 @@ exit_idle:
|
||||||
*
|
*
|
||||||
* Called with polling cleared.
|
* Called with polling cleared.
|
||||||
*/
|
*/
|
||||||
static void cpu_idle_loop(void)
|
static void do_idle(void)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
/*
|
||||||
|
* If the arch has a polling bit, we maintain an invariant:
|
||||||
|
*
|
||||||
|
* Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
|
||||||
|
* rq->idle). This means that, if rq->idle has the polling bit set,
|
||||||
|
* then setting need_resched is guaranteed to cause the CPU to
|
||||||
|
* reschedule.
|
||||||
|
*/
|
||||||
|
|
||||||
while (1) {
|
__current_set_polling();
|
||||||
/*
|
tick_nohz_idle_enter();
|
||||||
* If the arch has a polling bit, we maintain an invariant:
|
|
||||||
*
|
|
||||||
* Our polling bit is clear if we're not scheduled (i.e. if
|
|
||||||
* rq->curr != rq->idle). This means that, if rq->idle has
|
|
||||||
* the polling bit set, then setting need_resched is
|
|
||||||
* guaranteed to cause the cpu to reschedule.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__current_set_polling();
|
while (!need_resched()) {
|
||||||
quiet_vmstat();
|
check_pgt_cache();
|
||||||
tick_nohz_idle_enter();
|
rmb();
|
||||||
|
|
||||||
while (!need_resched()) {
|
if (cpu_is_offline(smp_processor_id())) {
|
||||||
check_pgt_cache();
|
cpuhp_report_idle_dead();
|
||||||
rmb();
|
arch_cpu_idle_dead();
|
||||||
|
|
||||||
if (cpu_is_offline(cpu)) {
|
|
||||||
cpuhp_report_idle_dead();
|
|
||||||
arch_cpu_idle_dead();
|
|
||||||
}
|
|
||||||
|
|
||||||
local_irq_disable();
|
|
||||||
arch_cpu_idle_enter();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* In poll mode we reenable interrupts and spin.
|
|
||||||
*
|
|
||||||
* Also if we detected in the wakeup from idle
|
|
||||||
* path that the tick broadcast device expired
|
|
||||||
* for us, we don't want to go deep idle as we
|
|
||||||
* know that the IPI is going to arrive right
|
|
||||||
* away
|
|
||||||
*/
|
|
||||||
if (cpu_idle_force_poll || tick_check_broadcast_expired())
|
|
||||||
cpu_idle_poll();
|
|
||||||
else
|
|
||||||
cpuidle_idle_call();
|
|
||||||
|
|
||||||
arch_cpu_idle_exit();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
local_irq_disable();
|
||||||
* Since we fell out of the loop above, we know
|
arch_cpu_idle_enter();
|
||||||
* TIF_NEED_RESCHED must be set, propagate it into
|
|
||||||
* PREEMPT_NEED_RESCHED.
|
|
||||||
*
|
|
||||||
* This is required because for polling idle loops we will
|
|
||||||
* not have had an IPI to fold the state for us.
|
|
||||||
*/
|
|
||||||
preempt_set_need_resched();
|
|
||||||
tick_nohz_idle_exit();
|
|
||||||
__current_clr_polling();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We promise to call sched_ttwu_pending and reschedule
|
* In poll mode we reenable interrupts and spin. Also if we
|
||||||
* if need_resched is set while polling is set. That
|
* detected in the wakeup from idle path that the tick
|
||||||
* means that clearing polling needs to be visible
|
* broadcast device expired for us, we don't want to go deep
|
||||||
* before doing these things.
|
* idle as we know that the IPI is going to arrive right away.
|
||||||
*/
|
*/
|
||||||
smp_mb__after_atomic();
|
if (cpu_idle_force_poll || tick_check_broadcast_expired())
|
||||||
|
cpu_idle_poll();
|
||||||
sched_ttwu_pending();
|
else
|
||||||
schedule_preempt_disabled();
|
cpuidle_idle_call();
|
||||||
|
arch_cpu_idle_exit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since we fell out of the loop above, we know TIF_NEED_RESCHED must
|
||||||
|
* be set, propagate it into PREEMPT_NEED_RESCHED.
|
||||||
|
*
|
||||||
|
* This is required because for polling idle loops we will not have had
|
||||||
|
* an IPI to fold the state for us.
|
||||||
|
*/
|
||||||
|
preempt_set_need_resched();
|
||||||
|
tick_nohz_idle_exit();
|
||||||
|
__current_clr_polling();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We promise to call sched_ttwu_pending() and reschedule if
|
||||||
|
* need_resched() is set while polling is set. That means that clearing
|
||||||
|
* polling needs to be visible before doing these things.
|
||||||
|
*/
|
||||||
|
smp_mb__after_atomic();
|
||||||
|
|
||||||
|
sched_ttwu_pending();
|
||||||
|
schedule_preempt_disabled();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool cpu_in_idle(unsigned long pc)
|
bool cpu_in_idle(unsigned long pc)
|
||||||
|
@ -280,6 +272,56 @@ bool cpu_in_idle(unsigned long pc)
|
||||||
pc < (unsigned long)__cpuidle_text_end;
|
pc < (unsigned long)__cpuidle_text_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct idle_timer {
|
||||||
|
struct hrtimer timer;
|
||||||
|
int done;
|
||||||
|
};
|
||||||
|
|
||||||
|
static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
|
||||||
|
{
|
||||||
|
struct idle_timer *it = container_of(timer, struct idle_timer, timer);
|
||||||
|
|
||||||
|
WRITE_ONCE(it->done, 1);
|
||||||
|
set_tsk_need_resched(current);
|
||||||
|
|
||||||
|
return HRTIMER_NORESTART;
|
||||||
|
}
|
||||||
|
|
||||||
|
void play_idle(unsigned long duration_ms)
|
||||||
|
{
|
||||||
|
struct idle_timer it;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Only FIFO tasks can disable the tick since they don't need the forced
|
||||||
|
* preemption.
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(current->policy != SCHED_FIFO);
|
||||||
|
WARN_ON_ONCE(current->nr_cpus_allowed != 1);
|
||||||
|
WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
|
||||||
|
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
|
||||||
|
WARN_ON_ONCE(!duration_ms);
|
||||||
|
|
||||||
|
rcu_sleep_check();
|
||||||
|
preempt_disable();
|
||||||
|
current->flags |= PF_IDLE;
|
||||||
|
cpuidle_use_deepest_state(true);
|
||||||
|
|
||||||
|
it.done = 0;
|
||||||
|
hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||||
|
it.timer.function = idle_inject_timer_fn;
|
||||||
|
hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
|
||||||
|
|
||||||
|
while (!READ_ONCE(it.done))
|
||||||
|
do_idle();
|
||||||
|
|
||||||
|
cpuidle_use_deepest_state(false);
|
||||||
|
current->flags &= ~PF_IDLE;
|
||||||
|
|
||||||
|
preempt_fold_need_resched();
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(play_idle);
|
||||||
|
|
||||||
void cpu_startup_entry(enum cpuhp_state state)
|
void cpu_startup_entry(enum cpuhp_state state)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -299,5 +341,6 @@ void cpu_startup_entry(enum cpuhp_state state)
|
||||||
#endif
|
#endif
|
||||||
arch_cpu_idle_prepare();
|
arch_cpu_idle_prepare();
|
||||||
cpuhp_online_idle(state);
|
cpuhp_online_idle(state);
|
||||||
cpu_idle_loop();
|
while (1)
|
||||||
|
do_idle();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue