mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
Merge branch 'rcu-tasks.2014.09.10a' into HEAD
rcu-tasks.2014.09.10a: Add RCU-tasks flavor of RCU.
This commit is contained in:
commit
96b4672703
24 changed files with 613 additions and 93 deletions
|
@ -197,22 +197,24 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
|
|||
* one since the start of the grace period, this just sets a flag.
|
||||
* The caller must have disabled preemption.
|
||||
*/
|
||||
void rcu_sched_qs(int cpu)
|
||||
void rcu_sched_qs(void)
|
||||
{
|
||||
struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
|
||||
|
||||
if (rdp->passed_quiesce == 0)
|
||||
trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
|
||||
rdp->passed_quiesce = 1;
|
||||
if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
|
||||
trace_rcu_grace_period(TPS("rcu_sched"),
|
||||
__this_cpu_read(rcu_sched_data.gpnum),
|
||||
TPS("cpuqs"));
|
||||
__this_cpu_write(rcu_sched_data.passed_quiesce, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void rcu_bh_qs(int cpu)
|
||||
void rcu_bh_qs(void)
|
||||
{
|
||||
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
|
||||
|
||||
if (rdp->passed_quiesce == 0)
|
||||
trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
|
||||
rdp->passed_quiesce = 1;
|
||||
if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
|
||||
trace_rcu_grace_period(TPS("rcu_bh"),
|
||||
__this_cpu_read(rcu_bh_data.gpnum),
|
||||
TPS("cpuqs"));
|
||||
__this_cpu_write(rcu_bh_data.passed_quiesce, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
|
||||
|
@ -287,7 +289,7 @@ static void rcu_momentary_dyntick_idle(void)
|
|||
void rcu_note_context_switch(int cpu)
|
||||
{
|
||||
trace_rcu_utilization(TPS("Start context switch"));
|
||||
rcu_sched_qs(cpu);
|
||||
rcu_sched_qs();
|
||||
rcu_preempt_note_context_switch(cpu);
|
||||
if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
|
||||
rcu_momentary_dyntick_idle();
|
||||
|
@ -535,6 +537,7 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
|
|||
atomic_inc(&rdtp->dynticks);
|
||||
smp_mb__after_atomic(); /* Force ordering with next sojourn. */
|
||||
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
||||
rcu_dynticks_task_enter();
|
||||
|
||||
/*
|
||||
* It is illegal to enter an extended quiescent state while
|
||||
|
@ -651,6 +654,7 @@ void rcu_irq_exit(void)
|
|||
static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
|
||||
int user)
|
||||
{
|
||||
rcu_dynticks_task_exit();
|
||||
smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
|
||||
atomic_inc(&rdtp->dynticks);
|
||||
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
|
||||
|
@ -1656,7 +1660,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|||
rnp->level, rnp->grplo,
|
||||
rnp->grphi, rnp->qsmask);
|
||||
raw_spin_unlock_irq(&rnp->lock);
|
||||
cond_resched();
|
||||
cond_resched_rcu_qs();
|
||||
}
|
||||
|
||||
mutex_unlock(&rsp->onoff_mutex);
|
||||
|
@ -1746,7 +1750,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|||
/* smp_mb() provided by prior unlock-lock pair. */
|
||||
nocb += rcu_future_gp_cleanup(rsp, rnp);
|
||||
raw_spin_unlock_irq(&rnp->lock);
|
||||
cond_resched();
|
||||
cond_resched_rcu_qs();
|
||||
}
|
||||
rnp = rcu_get_root(rsp);
|
||||
raw_spin_lock_irq(&rnp->lock);
|
||||
|
@ -1795,7 +1799,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
|||
/* Locking provides needed memory barrier. */
|
||||
if (rcu_gp_init(rsp))
|
||||
break;
|
||||
cond_resched();
|
||||
cond_resched_rcu_qs();
|
||||
WARN_ON(signal_pending(current));
|
||||
trace_rcu_grace_period(rsp->name,
|
||||
ACCESS_ONCE(rsp->gpnum),
|
||||
|
@ -1838,10 +1842,10 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
|||
trace_rcu_grace_period(rsp->name,
|
||||
ACCESS_ONCE(rsp->gpnum),
|
||||
TPS("fqsend"));
|
||||
cond_resched();
|
||||
cond_resched_rcu_qs();
|
||||
} else {
|
||||
/* Deal with stray signal. */
|
||||
cond_resched();
|
||||
cond_resched_rcu_qs();
|
||||
WARN_ON(signal_pending(current));
|
||||
trace_rcu_grace_period(rsp->name,
|
||||
ACCESS_ONCE(rsp->gpnum),
|
||||
|
@ -2401,8 +2405,8 @@ void rcu_check_callbacks(int cpu, int user)
|
|||
* at least not while the corresponding CPU is online.
|
||||
*/
|
||||
|
||||
rcu_sched_qs(cpu);
|
||||
rcu_bh_qs(cpu);
|
||||
rcu_sched_qs();
|
||||
rcu_bh_qs();
|
||||
|
||||
} else if (!in_softirq()) {
|
||||
|
||||
|
@ -2413,11 +2417,13 @@ void rcu_check_callbacks(int cpu, int user)
|
|||
* critical section, so note it.
|
||||
*/
|
||||
|
||||
rcu_bh_qs(cpu);
|
||||
rcu_bh_qs();
|
||||
}
|
||||
rcu_preempt_check_callbacks(cpu);
|
||||
if (rcu_pending(cpu))
|
||||
invoke_rcu_core();
|
||||
if (user)
|
||||
rcu_note_voluntary_context_switch(current);
|
||||
trace_rcu_utilization(TPS("End scheduler-tick"));
|
||||
}
|
||||
|
||||
|
@ -2440,7 +2446,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
|
|||
struct rcu_node *rnp;
|
||||
|
||||
rcu_for_each_leaf_node(rsp, rnp) {
|
||||
cond_resched();
|
||||
cond_resched_rcu_qs();
|
||||
mask = 0;
|
||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||
smp_mb__after_unlock_lock();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue