mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 23:32:14 +00:00
rcu: Abstract out rcu_irq_enter_check_tick() from rcu_nmi_enter()
There will likely be exception handlers that can sleep, which rules out the usual approach of invoking rcu_nmi_enter() on entry and also rcu_nmi_exit() on all exit paths. However, the alternative approach of just not calling anything can prevent RCU from coaxing quiescent states from nohz_full CPUs that are looping in the kernel: RCU must instead IPI them explicitly. It would be better to enable the scheduler tick on such CPUs to interact with RCU in a lighter-weight manner, and this enabling is one of the things that rcu_nmi_enter() currently does. What is needed is something that helps RCU coax quiescent states while not preventing subsequent sleeps. This commit therefore splits out the nohz_full scheduler-tick enabling from the rest of the rcu_nmi_enter() logic into a new function named rcu_irq_enter_check_tick(). [ tglx: Renamed the function and made it a nop when context tracking is off ] [ mingo: Fixed a CONFIG_NO_HZ_FULL assumption, harmonized and fixed all the comment blocks and cleaned up rcu_nmi_enter()/exit() definitions. ] Suggested-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20200521202116.996113173@linutronix.de
This commit is contained in:
parent
b1fcf9b83c
commit
aaf2bc50df
2 changed files with 80 additions and 33 deletions
|
@ -2,31 +2,28 @@
|
|||
#ifndef LINUX_HARDIRQ_H
|
||||
#define LINUX_HARDIRQ_H
|
||||
|
||||
#include <linux/context_tracking_state.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/ftrace_irq.h>
|
||||
#include <linux/vtime.h>
|
||||
#include <asm/hardirq.h>
|
||||
|
||||
|
||||
extern void synchronize_irq(unsigned int irq);
|
||||
extern bool synchronize_hardirq(unsigned int irq);
|
||||
|
||||
#if defined(CONFIG_TINY_RCU)
|
||||
|
||||
static inline void rcu_nmi_enter(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rcu_nmi_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
void __rcu_irq_enter_check_tick(void);
|
||||
#else
|
||||
extern void rcu_nmi_enter(void);
|
||||
extern void rcu_nmi_exit(void);
|
||||
static inline void __rcu_irq_enter_check_tick(void) { }
|
||||
#endif
|
||||
|
||||
static __always_inline void rcu_irq_enter_check_tick(void)
|
||||
{
|
||||
if (context_tracking_enabled())
|
||||
__rcu_irq_enter_check_tick();
|
||||
}
|
||||
|
||||
/*
|
||||
* It is safe to do non-atomic ops on ->hardirq_context,
|
||||
* because NMI handlers may not preempt and the ops are
|
||||
|
@ -65,6 +62,14 @@ extern void irq_exit(void);
|
|||
#define arch_nmi_exit() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TINY_RCU
|
||||
static inline void rcu_nmi_enter(void) { }
|
||||
static inline void rcu_nmi_exit(void) { }
|
||||
#else
|
||||
extern void rcu_nmi_enter(void);
|
||||
extern void rcu_nmi_exit(void);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* NMI vs Tracing
|
||||
* --------------
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue