mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 14:41:27 +00:00
srcu: Avoid local_irq_save() before acquiring spinlock_t
SRCU disables interrupts to get a stable per-CPU pointer and then acquires the spinlock which is in the per-CPU data structure. The release uses spin_unlock_irqrestore(). While this is correct on a non-RT kernel, this conflicts with the RT semantics because the spinlock is converted to a 'sleeping' spinlock. Sleeping locks can obviously not be acquired with interrupts disabled. Acquire the per-CPU pointer `ssp->sda' without disabling preemption and then acquire the spinlock_t of the per-CPU data structure. The lock will ensure that the data is consistent. The added call to check_init_srcu_struct() is now needed because a statically defined srcu_struct may remain uninitialized until this point and the newly introduced locking operation requires an initialized spinlock_t. This change was tested for four hours with 8*SRCU-N and 8*SRCU-P without causing any warnings. Cc: Lai Jiangshan <jiangshanlai@gmail.com> Cc: "Paul E. McKenney" <paulmck@kernel.org> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: rcu@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
7fef6cff8f
commit
bde50d8ff8
1 changed files with 7 additions and 7 deletions
|
@ -777,14 +777,15 @@ static bool srcu_might_be_idle(struct srcu_struct *ssp)
|
||||||
unsigned long t;
|
unsigned long t;
|
||||||
unsigned long tlast;
|
unsigned long tlast;
|
||||||
|
|
||||||
|
check_init_srcu_struct(ssp);
|
||||||
/* If the local srcu_data structure has callbacks, not idle. */
|
/* If the local srcu_data structure has callbacks, not idle. */
|
||||||
local_irq_save(flags);
|
sdp = raw_cpu_ptr(ssp->sda);
|
||||||
sdp = this_cpu_ptr(ssp->sda);
|
spin_lock_irqsave_rcu_node(sdp, flags);
|
||||||
if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
|
if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
|
||||||
local_irq_restore(flags);
|
spin_unlock_irqrestore_rcu_node(sdp, flags);
|
||||||
return false; /* Callbacks already present, so not idle. */
|
return false; /* Callbacks already present, so not idle. */
|
||||||
}
|
}
|
||||||
local_irq_restore(flags);
|
spin_unlock_irqrestore_rcu_node(sdp, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No local callbacks, so probabalistically probe global state.
|
* No local callbacks, so probabalistically probe global state.
|
||||||
|
@ -864,9 +865,8 @@ static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
|
||||||
}
|
}
|
||||||
rhp->func = func;
|
rhp->func = func;
|
||||||
idx = srcu_read_lock(ssp);
|
idx = srcu_read_lock(ssp);
|
||||||
local_irq_save(flags);
|
sdp = raw_cpu_ptr(ssp->sda);
|
||||||
sdp = this_cpu_ptr(ssp->sda);
|
spin_lock_irqsave_rcu_node(sdp, flags);
|
||||||
spin_lock_rcu_node(sdp);
|
|
||||||
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
|
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
|
||||||
rcu_segcblist_advance(&sdp->srcu_cblist,
|
rcu_segcblist_advance(&sdp->srcu_cblist,
|
||||||
rcu_seq_current(&ssp->srcu_gp_seq));
|
rcu_seq_current(&ssp->srcu_gp_seq));
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue