mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-27 17:11:46 +00:00
[PATCH] Deprecate synchronize_kernel, GPL replacement
The synchronize_kernel() primitive is used for quite a few different purposes: waiting for RCU readers, waiting for NMIs, waiting for interrupts, and so on. This makes RCU code harder to read, since synchronize_kernel() might or might not have matching rcu_read_lock()s. This patch creates a new synchronize_rcu() that is to be used for RCU readers and a new synchronize_sched() that is used for the rest. These two new primitives currently have the same implementation, but this is might well change with additional real-time support. Both new primitives are GPL-only, the old primitive is deprecated. Signed-off-by: Paul E. McKenney <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
512345be25
commit
9b06e81898
2 changed files with 34 additions and 5 deletions
|
@ -157,9 +157,9 @@ static inline int rcu_pending(int cpu)
|
|||
/**
|
||||
* rcu_read_lock - mark the beginning of an RCU read-side critical section.
|
||||
*
|
||||
* When synchronize_kernel() is invoked on one CPU while other CPUs
|
||||
* When synchronize_rcu() is invoked on one CPU while other CPUs
|
||||
* are within RCU read-side critical sections, then the
|
||||
* synchronize_kernel() is guaranteed to block until after all the other
|
||||
* synchronize_rcu() is guaranteed to block until after all the other
|
||||
* CPUs exit their critical sections. Similarly, if call_rcu() is invoked
|
||||
* on one CPU while other CPUs are within RCU read-side critical
|
||||
* sections, invocation of the corresponding RCU callback is deferred
|
||||
|
@ -256,6 +256,21 @@ static inline int rcu_pending(int cpu)
|
|||
(p) = (v); \
|
||||
})
|
||||
|
||||
/**
|
||||
* synchronize_sched - block until all CPUs have exited any non-preemptive
|
||||
* kernel code sequences.
|
||||
*
|
||||
* This means that all preempt_disable code sequences, including NMI and
|
||||
* hardware-interrupt handlers, in progress on entry will have completed
|
||||
* before this primitive returns. However, this does not guarantee that
|
||||
* softirq handlers will have completed, since in some kernels
|
||||
*
|
||||
* This primitive provides the guarantees made by the (deprecated)
|
||||
* synchronize_kernel() API. In contrast, synchronize_rcu() only
|
||||
* guarantees that rcu_read_lock() sections will have completed.
|
||||
*/
|
||||
#define synchronize_sched() synchronize_rcu()
|
||||
|
||||
extern void rcu_init(void);
|
||||
extern void rcu_check_callbacks(int cpu, int user);
|
||||
extern void rcu_restart_cpu(int cpu);
|
||||
|
@ -265,7 +280,9 @@ extern void FASTCALL(call_rcu(struct rcu_head *head,
|
|||
void (*func)(struct rcu_head *head)));
|
||||
extern void FASTCALL(call_rcu_bh(struct rcu_head *head,
|
||||
void (*func)(struct rcu_head *head)));
|
||||
extern void synchronize_kernel(void);
|
||||
extern __deprecated_for_modules void synchronize_kernel(void);
|
||||
extern void synchronize_rcu(void);
|
||||
void synchronize_idle(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __LINUX_RCUPDATE_H */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue