mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
Merge branches 'doc.2021.01.06a', 'fixes.2021.01.04b', 'kfree_rcu.2021.01.04a', 'mmdumpobj.2021.01.22a', 'nocb.2021.01.06a', 'rt.2021.01.04a', 'stall.2021.01.06a', 'torture.2021.01.12a' and 'tortureall.2021.01.06a' into HEAD
doc.2021.01.06a: Documentation updates. fixes.2021.01.04b: Miscellaneous fixes. kfree_rcu.2021.01.04a: kfree_rcu() updates. mmdumpobj.2021.01.22a: Dump allocation point for memory blocks. nocb.2021.01.06a: RCU callback offload updates and cblist segment lengths. rt.2021.01.04a: Real-time updates. stall.2021.01.06a: RCU CPU stall warning updates. torture.2021.01.12a: Torture-test updates and polling SRCU grace-period API. tortureall.2021.01.06a: Torture-test script updates.
This commit is contained in:
parent
81ad58be2f
c26165efac
5ea5d1ed57
3375efeddf
147c6852d3
36221e109e
683954e55c
d945f797e4
e3e1a99787
commit
0d2460ba61
57 changed files with 2651 additions and 349 deletions
|
@ -807,6 +807,46 @@ static void srcu_leak_callback(struct rcu_head *rhp)
|
|||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Start an SRCU grace period, and also queue the callback if non-NULL.
|
||||
*/
|
||||
static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
|
||||
struct rcu_head *rhp, bool do_norm)
|
||||
{
|
||||
unsigned long flags;
|
||||
int idx;
|
||||
bool needexp = false;
|
||||
bool needgp = false;
|
||||
unsigned long s;
|
||||
struct srcu_data *sdp;
|
||||
|
||||
check_init_srcu_struct(ssp);
|
||||
idx = srcu_read_lock(ssp);
|
||||
sdp = raw_cpu_ptr(ssp->sda);
|
||||
spin_lock_irqsave_rcu_node(sdp, flags);
|
||||
if (rhp)
|
||||
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
|
||||
rcu_segcblist_advance(&sdp->srcu_cblist,
|
||||
rcu_seq_current(&ssp->srcu_gp_seq));
|
||||
s = rcu_seq_snap(&ssp->srcu_gp_seq);
|
||||
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
|
||||
if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
|
||||
sdp->srcu_gp_seq_needed = s;
|
||||
needgp = true;
|
||||
}
|
||||
if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
|
||||
sdp->srcu_gp_seq_needed_exp = s;
|
||||
needexp = true;
|
||||
}
|
||||
spin_unlock_irqrestore_rcu_node(sdp, flags);
|
||||
if (needgp)
|
||||
srcu_funnel_gp_start(ssp, sdp, s, do_norm);
|
||||
else if (needexp)
|
||||
srcu_funnel_exp_start(ssp, sdp->mynode, s);
|
||||
srcu_read_unlock(ssp, idx);
|
||||
return s;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enqueue an SRCU callback on the srcu_data structure associated with
|
||||
* the current CPU and the specified srcu_struct structure, initiating
|
||||
|
@ -838,14 +878,6 @@ static void srcu_leak_callback(struct rcu_head *rhp)
|
|||
static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
|
||||
rcu_callback_t func, bool do_norm)
|
||||
{
|
||||
unsigned long flags;
|
||||
int idx;
|
||||
bool needexp = false;
|
||||
bool needgp = false;
|
||||
unsigned long s;
|
||||
struct srcu_data *sdp;
|
||||
|
||||
check_init_srcu_struct(ssp);
|
||||
if (debug_rcu_head_queue(rhp)) {
|
||||
/* Probable double call_srcu(), so leak the callback. */
|
||||
WRITE_ONCE(rhp->func, srcu_leak_callback);
|
||||
|
@ -853,28 +885,7 @@ static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
|
|||
return;
|
||||
}
|
||||
rhp->func = func;
|
||||
idx = srcu_read_lock(ssp);
|
||||
sdp = raw_cpu_ptr(ssp->sda);
|
||||
spin_lock_irqsave_rcu_node(sdp, flags);
|
||||
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
|
||||
rcu_segcblist_advance(&sdp->srcu_cblist,
|
||||
rcu_seq_current(&ssp->srcu_gp_seq));
|
||||
s = rcu_seq_snap(&ssp->srcu_gp_seq);
|
||||
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
|
||||
if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
|
||||
sdp->srcu_gp_seq_needed = s;
|
||||
needgp = true;
|
||||
}
|
||||
if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
|
||||
sdp->srcu_gp_seq_needed_exp = s;
|
||||
needexp = true;
|
||||
}
|
||||
spin_unlock_irqrestore_rcu_node(sdp, flags);
|
||||
if (needgp)
|
||||
srcu_funnel_gp_start(ssp, sdp, s, do_norm);
|
||||
else if (needexp)
|
||||
srcu_funnel_exp_start(ssp, sdp->mynode, s);
|
||||
srcu_read_unlock(ssp, idx);
|
||||
(void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1003,6 +1014,77 @@ void synchronize_srcu(struct srcu_struct *ssp)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_srcu);
|
||||
|
||||
/**
|
||||
* get_state_synchronize_srcu - Provide an end-of-grace-period cookie
|
||||
* @ssp: srcu_struct to provide cookie for.
|
||||
*
|
||||
* This function returns a cookie that can be passed to
|
||||
* poll_state_synchronize_srcu(), which will return true if a full grace
|
||||
* period has elapsed in the meantime. It is the caller's responsibility
|
||||
* to make sure that grace period happens, for example, by invoking
|
||||
* call_srcu() after return from get_state_synchronize_srcu().
|
||||
*/
|
||||
unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
|
||||
{
|
||||
// Any prior manipulation of SRCU-protected data must happen
|
||||
// before the load from ->srcu_gp_seq.
|
||||
smp_mb();
|
||||
return rcu_seq_snap(&ssp->srcu_gp_seq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
|
||||
|
||||
/**
|
||||
* start_poll_synchronize_srcu - Provide cookie and start grace period
|
||||
* @ssp: srcu_struct to provide cookie for.
|
||||
*
|
||||
* This function returns a cookie that can be passed to
|
||||
* poll_state_synchronize_srcu(), which will return true if a full grace
|
||||
* period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
|
||||
* this function also ensures that any needed SRCU grace period will be
|
||||
* started. This convenience does come at a cost in terms of CPU overhead.
|
||||
*/
|
||||
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
|
||||
{
|
||||
return srcu_gp_start_if_needed(ssp, NULL, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
|
||||
|
||||
/**
|
||||
* poll_state_synchronize_srcu - Has cookie's grace period ended?
|
||||
* @ssp: srcu_struct to provide cookie for.
|
||||
* @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
|
||||
*
|
||||
* This function takes the cookie that was returned from either
|
||||
* get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
|
||||
* returns @true if an SRCU grace period elapsed since the time that the
|
||||
* cookie was created.
|
||||
*
|
||||
* Because cookies are finite in size, wrapping/overflow is possible.
|
||||
* This is more pronounced on 32-bit systems where cookies are 32 bits,
|
||||
* where in theory wrapping could happen in about 14 hours assuming
|
||||
* 25-microsecond expedited SRCU grace periods. However, a more likely
|
||||
* overflow lower bound is on the order of 24 days in the case of
|
||||
* one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit
|
||||
* system requires geologic timespans, as in more than seven million years
|
||||
* even for expedited SRCU grace periods.
|
||||
*
|
||||
* Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
|
||||
* that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses
|
||||
* a 16-bit cookie, which rcutorture routinely wraps in a matter of a
|
||||
* few minutes. If this proves to be a problem, this counter will be
|
||||
* expanded to the same size as for Tree SRCU.
|
||||
*/
|
||||
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
|
||||
{
|
||||
if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
|
||||
return false;
|
||||
// Ensure that the end of the SRCU grace period happens before
|
||||
// any subsequent code that the caller might execute.
|
||||
smp_mb(); // ^^^
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
|
||||
|
||||
/*
|
||||
* Callback function for srcu_barrier() use.
|
||||
*/
|
||||
|
@ -1160,6 +1242,7 @@ static void srcu_advance_state(struct srcu_struct *ssp)
|
|||
*/
|
||||
static void srcu_invoke_callbacks(struct work_struct *work)
|
||||
{
|
||||
long len;
|
||||
bool more;
|
||||
struct rcu_cblist ready_cbs;
|
||||
struct rcu_head *rhp;
|
||||
|
@ -1182,6 +1265,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
|
|||
/* We are on the job! Extract and invoke ready callbacks. */
|
||||
sdp->srcu_cblist_invoking = true;
|
||||
rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
|
||||
len = ready_cbs.len;
|
||||
spin_unlock_irq_rcu_node(sdp);
|
||||
rhp = rcu_cblist_dequeue(&ready_cbs);
|
||||
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
|
||||
|
@ -1190,13 +1274,14 @@ static void srcu_invoke_callbacks(struct work_struct *work)
|
|||
rhp->func(rhp);
|
||||
local_bh_enable();
|
||||
}
|
||||
WARN_ON_ONCE(ready_cbs.len);
|
||||
|
||||
/*
|
||||
* Update counts, accelerate new callbacks, and if needed,
|
||||
* schedule another round of callback invocation.
|
||||
*/
|
||||
spin_lock_irq_rcu_node(sdp);
|
||||
rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
|
||||
rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
|
||||
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
|
||||
rcu_seq_snap(&ssp->srcu_gp_seq));
|
||||
sdp->srcu_cblist_invoking = false;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue