mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 07:12:09 +00:00
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU updates from Ingo Molnar: - add RCU torture scripts/tooling - static analysis improvements - update RCU documentation - miscellaneous fixes * 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits) rcu: Remove "extern" from function declarations in kernel/rcu/rcu.h rcu: Remove "extern" from function declarations in include/linux/*rcu*.h rcu/torture: Dynamically allocate SRCU output buffer to avoid overflow rcu: Don't activate RCU core on NO_HZ_FULL CPUs rcu: Warn on allegedly impossible rcu_read_unlock_special() from irq rcu: Add an RCU_INITIALIZER for global RCU-protected pointers rcu: Make rcu_assign_pointer's assignment volatile and type-safe bonding: Use RCU_INIT_POINTER() for better overhead and for sparse rcu: Add comment on evaluate-once properties of rcu_assign_pointer(). rcu: Provide better diagnostics for blocking in RCU callback functions rcu: Improve SRCU's grace-period comments rcu: Fix CONFIG_RCU_FANOUT_EXACT for odd fanout/leaf values rcu: Fix coccinelle warnings rcutorture: Stop tracking FSF's postal address rcutorture: Move checkarg to functions.sh rcutorture: Flag errors and warnings with color coding rcutorture: Record results from repeated runs of the same test scenario rcutorture: Test summary at end of run with less chattiness rcutorture: Update comment in kvm.sh listing typical RCU trace events rcutorture: Add tracing-enabled version of TREE08 ...
This commit is contained in:
commit
a693c46e14
127 changed files with 3768 additions and 198 deletions
|
@ -369,6 +369,9 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
|
|||
static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
|
||||
bool user)
|
||||
{
|
||||
struct rcu_state *rsp;
|
||||
struct rcu_data *rdp;
|
||||
|
||||
trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
|
||||
if (!user && !is_idle_task(current)) {
|
||||
struct task_struct *idle __maybe_unused =
|
||||
|
@ -380,6 +383,10 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
|
|||
current->pid, current->comm,
|
||||
idle->pid, idle->comm); /* must be idle task! */
|
||||
}
|
||||
for_each_rcu_flavor(rsp) {
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
do_nocb_deferred_wakeup(rdp);
|
||||
}
|
||||
rcu_prepare_for_idle(smp_processor_id());
|
||||
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
|
||||
smp_mb__before_atomic_inc(); /* See above. */
|
||||
|
@ -411,11 +418,12 @@ static void rcu_eqs_enter(bool user)
|
|||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
oldval = rdtp->dynticks_nesting;
|
||||
WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
|
||||
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
|
||||
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
|
||||
rdtp->dynticks_nesting = 0;
|
||||
else
|
||||
rcu_eqs_enter_common(rdtp, oldval, user);
|
||||
} else {
|
||||
rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
|
||||
rcu_eqs_enter_common(rdtp, oldval, user);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -533,11 +541,12 @@ static void rcu_eqs_exit(bool user)
|
|||
rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
oldval = rdtp->dynticks_nesting;
|
||||
WARN_ON_ONCE(oldval < 0);
|
||||
if (oldval & DYNTICK_TASK_NEST_MASK)
|
||||
if (oldval & DYNTICK_TASK_NEST_MASK) {
|
||||
rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
|
||||
else
|
||||
} else {
|
||||
rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
|
||||
rcu_eqs_exit_common(rdtp, oldval, user);
|
||||
rcu_eqs_exit_common(rdtp, oldval, user);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -716,7 +725,7 @@ bool rcu_lockdep_current_cpu_online(void)
|
|||
bool ret;
|
||||
|
||||
if (in_nmi())
|
||||
return 1;
|
||||
return true;
|
||||
preempt_disable();
|
||||
rdp = this_cpu_ptr(&rcu_sched_data);
|
||||
rnp = rdp->mynode;
|
||||
|
@ -754,6 +763,12 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
|
|||
return (rdp->dynticks_snap & 0x1) == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function really isn't for public consumption, but RCU is special in
|
||||
* that context switches can allow the state machine to make progress.
|
||||
*/
|
||||
extern void resched_cpu(int cpu);
|
||||
|
||||
/*
|
||||
* Return true if the specified CPU has passed through a quiescent
|
||||
* state by virtue of being in or having passed through an dynticks
|
||||
|
@ -812,16 +827,34 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
|
|||
*/
|
||||
rcu_kick_nohz_cpu(rdp->cpu);
|
||||
|
||||
/*
|
||||
* Alternatively, the CPU might be running in the kernel
|
||||
* for an extended period of time without a quiescent state.
|
||||
* Attempt to force the CPU through the scheduler to gain the
|
||||
* needed quiescent state, but only if the grace period has gone
|
||||
* on for an uncommonly long time. If there are many stuck CPUs,
|
||||
* we will beat on the first one until it gets unstuck, then move
|
||||
* to the next. Only do this for the primary flavor of RCU.
|
||||
*/
|
||||
if (rdp->rsp == rcu_state &&
|
||||
ULONG_CMP_GE(ACCESS_ONCE(jiffies), rdp->rsp->jiffies_resched)) {
|
||||
rdp->rsp->jiffies_resched += 5;
|
||||
resched_cpu(rdp->cpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void record_gp_stall_check_time(struct rcu_state *rsp)
|
||||
{
|
||||
unsigned long j = ACCESS_ONCE(jiffies);
|
||||
unsigned long j1;
|
||||
|
||||
rsp->gp_start = j;
|
||||
smp_wmb(); /* Record start time before stall time. */
|
||||
rsp->jiffies_stall = j + rcu_jiffies_till_stall_check();
|
||||
j1 = rcu_jiffies_till_stall_check();
|
||||
rsp->jiffies_stall = j + j1;
|
||||
rsp->jiffies_resched = j + j1 / 2;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1517,6 +1550,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|||
rdp = this_cpu_ptr(rsp->rda);
|
||||
if (rnp == rdp->mynode)
|
||||
__note_gp_changes(rsp, rnp, rdp);
|
||||
/* smp_mb() provided by prior unlock-lock pair. */
|
||||
nocb += rcu_future_gp_cleanup(rsp, rnp);
|
||||
raw_spin_unlock_irq(&rnp->lock);
|
||||
cond_resched();
|
||||
|
@ -1562,6 +1596,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
|||
wait_event_interruptible(rsp->gp_wq,
|
||||
ACCESS_ONCE(rsp->gp_flags) &
|
||||
RCU_GP_FLAG_INIT);
|
||||
/* Locking provides needed memory barrier. */
|
||||
if (rcu_gp_init(rsp))
|
||||
break;
|
||||
cond_resched();
|
||||
|
@ -1591,6 +1626,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
|||
(!ACCESS_ONCE(rnp->qsmask) &&
|
||||
!rcu_preempt_blocked_readers_cgp(rnp)),
|
||||
j);
|
||||
/* Locking provides needed memory barriers. */
|
||||
/* If grace period done, leave loop. */
|
||||
if (!ACCESS_ONCE(rnp->qsmask) &&
|
||||
!rcu_preempt_blocked_readers_cgp(rnp))
|
||||
|
@ -1912,13 +1948,13 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
|||
* Adopt the RCU callbacks from the specified rcu_state structure's
|
||||
* orphanage. The caller must hold the ->orphan_lock.
|
||||
*/
|
||||
static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
|
||||
static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
|
||||
{
|
||||
int i;
|
||||
struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
|
||||
|
||||
/* No-CBs CPUs are handled specially. */
|
||||
if (rcu_nocb_adopt_orphan_cbs(rsp, rdp))
|
||||
if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
|
||||
return;
|
||||
|
||||
/* Do the accounting first. */
|
||||
|
@ -1997,7 +2033,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
|
|||
|
||||
/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
|
||||
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
|
||||
rcu_adopt_orphan_cbs(rsp);
|
||||
rcu_adopt_orphan_cbs(rsp, flags);
|
||||
|
||||
/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
|
||||
mask = rdp->grpmask; /* rnp->grplo is constant. */
|
||||
|
@ -2318,6 +2354,9 @@ __rcu_process_callbacks(struct rcu_state *rsp)
|
|||
/* If there are callbacks ready, invoke them. */
|
||||
if (cpu_has_callbacks_ready_to_invoke(rdp))
|
||||
invoke_rcu_callbacks(rsp, rdp);
|
||||
|
||||
/* Do any needed deferred wakeups of rcuo kthreads. */
|
||||
do_nocb_deferred_wakeup(rdp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2453,7 +2492,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
|||
|
||||
if (cpu != -1)
|
||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
offline = !__call_rcu_nocb(rdp, head, lazy);
|
||||
offline = !__call_rcu_nocb(rdp, head, lazy, flags);
|
||||
WARN_ON_ONCE(offline);
|
||||
/* _call_rcu() is illegal on offline CPU; leak the callback. */
|
||||
local_irq_restore(flags);
|
||||
|
@ -2773,6 +2812,10 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
|
|||
/* Check for CPU stalls, if enabled. */
|
||||
check_cpu_stall(rsp, rdp);
|
||||
|
||||
/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
|
||||
if (rcu_nohz_full_cpu(rsp))
|
||||
return 0;
|
||||
|
||||
/* Is the RCU core waiting for a quiescent state from this CPU? */
|
||||
if (rcu_scheduler_fully_active &&
|
||||
rdp->qs_pending && !rdp->passed_quiesce) {
|
||||
|
@ -2806,6 +2849,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* Does this CPU need a deferred NOCB wakeup? */
|
||||
if (rcu_nocb_need_deferred_wakeup(rdp)) {
|
||||
rdp->n_rp_nocb_defer_wakeup++;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* nothing to do */
|
||||
rdp->n_rp_need_nothing++;
|
||||
return 0;
|
||||
|
@ -3230,9 +3279,9 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = rcu_num_lvls - 1; i > 0; i--)
|
||||
rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
|
||||
for (i = rcu_num_lvls - 2; i >= 0; i--)
|
||||
rsp->levelspread[i] = CONFIG_RCU_FANOUT;
|
||||
rsp->levelspread[0] = rcu_fanout_leaf;
|
||||
}
|
||||
#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
|
||||
static void __init rcu_init_levelspread(struct rcu_state *rsp)
|
||||
|
@ -3362,6 +3411,8 @@ static void __init rcu_init_geometry(void)
|
|||
if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
|
||||
nr_cpu_ids == NR_CPUS)
|
||||
return;
|
||||
pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
|
||||
rcu_fanout_leaf, nr_cpu_ids);
|
||||
|
||||
/*
|
||||
* Compute number of nodes that can be handled an rcu_node tree
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue