mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-23 07:01:23 +00:00
rcu: Simplify rcu_pending()/rcu_check_callbacks() API
All calls from outside RCU are of the form: if (rcu_pending(cpu)) rcu_check_callbacks(cpu, user); This is silly, instead we put a call to rcu_pending() in rcu_check_callbacks(), and then make the outside calls be to rcu_check_callbacks(). This cuts down on the code a bit and also gives the compiler a better chance of optimizing. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josht@linux.vnet.ibm.com Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org LKML-Reference: <125097461311-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
22f00b69f6
commit
a157229cab
6 changed files with 14 additions and 9 deletions
|
@ -133,7 +133,6 @@ consider_steal_time(unsigned long new_itm)
|
||||||
account_idle_ticks(blocked);
|
account_idle_ticks(blocked);
|
||||||
run_local_timers();
|
run_local_timers();
|
||||||
|
|
||||||
if (rcu_pending(cpu))
|
|
||||||
rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
|
rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
|
||||||
|
|
||||||
scheduler_tick();
|
scheduler_tick();
|
||||||
|
|
|
@ -66,7 +66,6 @@ extern void call_rcu_sched(struct rcu_head *head,
|
||||||
|
|
||||||
extern void __rcu_read_lock(void);
|
extern void __rcu_read_lock(void);
|
||||||
extern void __rcu_read_unlock(void);
|
extern void __rcu_read_unlock(void);
|
||||||
extern int rcu_pending(int cpu);
|
|
||||||
extern int rcu_needs_cpu(int cpu);
|
extern int rcu_needs_cpu(int cpu);
|
||||||
|
|
||||||
#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); }
|
#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); }
|
||||||
|
|
|
@ -33,7 +33,6 @@
|
||||||
extern void rcu_sched_qs(int cpu);
|
extern void rcu_sched_qs(int cpu);
|
||||||
extern void rcu_bh_qs(int cpu);
|
extern void rcu_bh_qs(int cpu);
|
||||||
|
|
||||||
extern int rcu_pending(int cpu);
|
|
||||||
extern int rcu_needs_cpu(int cpu);
|
extern int rcu_needs_cpu(int cpu);
|
||||||
|
|
||||||
static inline void __rcu_read_lock(void)
|
static inline void __rcu_read_lock(void)
|
||||||
|
|
|
@ -159,6 +159,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched
|
||||||
.dynticks = 1,
|
.dynticks = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int rcu_pending(int cpu);
|
||||||
|
|
||||||
void rcu_sched_qs(int cpu)
|
void rcu_sched_qs(int cpu)
|
||||||
{
|
{
|
||||||
struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
|
struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
|
||||||
|
@ -961,7 +963,10 @@ static void rcu_check_mb(int cpu)
|
||||||
void rcu_check_callbacks(int cpu, int user)
|
void rcu_check_callbacks(int cpu, int user)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rcu_data *rdp = RCU_DATA_CPU(cpu);
|
struct rcu_data *rdp;
|
||||||
|
|
||||||
|
if (!rcu_pending(cpu))
|
||||||
|
return; /* if nothing for RCU to do. */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this CPU took its interrupt from user mode or from the
|
* If this CPU took its interrupt from user mode or from the
|
||||||
|
@ -976,6 +981,7 @@ void rcu_check_callbacks(int cpu, int user)
|
||||||
* CPUs to happen after any such write.
|
* CPUs to happen after any such write.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
rdp = RCU_DATA_CPU(cpu);
|
||||||
if (user ||
|
if (user ||
|
||||||
(idle_cpu(cpu) && !in_softirq() &&
|
(idle_cpu(cpu) && !in_softirq() &&
|
||||||
hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
|
hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
|
||||||
|
@ -1382,7 +1388,7 @@ int rcu_needs_cpu(int cpu)
|
||||||
rdp->waitschedlist != NULL);
|
rdp->waitschedlist != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int rcu_pending(int cpu)
|
static int rcu_pending(int cpu)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = RCU_DATA_CPU(cpu);
|
struct rcu_data *rdp = RCU_DATA_CPU(cpu);
|
||||||
|
|
||||||
|
|
|
@ -111,6 +111,7 @@ static int qhimark = 10000; /* If this many pending, ignore blimit. */
|
||||||
static int qlowmark = 100; /* Once only this many pending, use blimit. */
|
static int qlowmark = 100; /* Once only this many pending, use blimit. */
|
||||||
|
|
||||||
static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
|
static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
|
||||||
|
static int rcu_pending(int cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the number of RCU-sched batches processed thus far for debug & stats.
|
* Return the number of RCU-sched batches processed thus far for debug & stats.
|
||||||
|
@ -974,6 +975,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||||
*/
|
*/
|
||||||
void rcu_check_callbacks(int cpu, int user)
|
void rcu_check_callbacks(int cpu, int user)
|
||||||
{
|
{
|
||||||
|
if (!rcu_pending(cpu))
|
||||||
|
return; /* if nothing for RCU to do. */
|
||||||
if (user ||
|
if (user ||
|
||||||
(idle_cpu(cpu) && rcu_scheduler_active &&
|
(idle_cpu(cpu) && rcu_scheduler_active &&
|
||||||
!in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
|
!in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
|
||||||
|
@ -1329,7 +1332,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||||
* by the current CPU, returning 1 if so. This function is part of the
|
* by the current CPU, returning 1 if so. This function is part of the
|
||||||
* RCU implementation; it is -not- an exported member of the RCU API.
|
* RCU implementation; it is -not- an exported member of the RCU API.
|
||||||
*/
|
*/
|
||||||
int rcu_pending(int cpu)
|
static int rcu_pending(int cpu)
|
||||||
{
|
{
|
||||||
return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
|
return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
|
||||||
__rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu));
|
__rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu));
|
||||||
|
|
|
@ -1156,7 +1156,6 @@ void update_process_times(int user_tick)
|
||||||
/* Note: this timer irq context must be accounted for as well. */
|
/* Note: this timer irq context must be accounted for as well. */
|
||||||
account_process_tick(p, user_tick);
|
account_process_tick(p, user_tick);
|
||||||
run_local_timers();
|
run_local_timers();
|
||||||
if (rcu_pending(cpu))
|
|
||||||
rcu_check_callbacks(cpu, user_tick);
|
rcu_check_callbacks(cpu, user_tick);
|
||||||
printk_tick();
|
printk_tick();
|
||||||
scheduler_tick();
|
scheduler_tick();
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue