mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-23 23:21:46 +00:00
Merge branch 'rcu/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu into core/urgent
This commit is contained in:
commit
29f742f88a
6 changed files with 74 additions and 103 deletions
|
@ -99,18 +99,11 @@ o "qp" indicates that RCU still expects a quiescent state from
|
||||||
|
|
||||||
o "dt" is the current value of the dyntick counter that is incremented
|
o "dt" is the current value of the dyntick counter that is incremented
|
||||||
when entering or leaving dynticks idle state, either by the
|
when entering or leaving dynticks idle state, either by the
|
||||||
scheduler or by irq. The number after the "/" is the interrupt
|
scheduler or by irq. This number is even if the CPU is in
|
||||||
nesting depth when in dyntick-idle state, or one greater than
|
dyntick idle mode and odd otherwise. The number after the first
|
||||||
the interrupt-nesting depth otherwise.
|
"/" is the interrupt nesting depth when in dyntick-idle state,
|
||||||
|
or one greater than the interrupt-nesting depth otherwise.
|
||||||
This field is displayed only for CONFIG_NO_HZ kernels.
|
The number after the second "/" is the NMI nesting depth.
|
||||||
|
|
||||||
o "dn" is the current value of the dyntick counter that is incremented
|
|
||||||
when entering or leaving dynticks idle state via NMI. If both
|
|
||||||
the "dt" and "dn" values are even, then this CPU is in dynticks
|
|
||||||
idle mode and may be ignored by RCU. If either of these two
|
|
||||||
counters is odd, then RCU must be alert to the possibility of
|
|
||||||
an RCU read-side critical section running on this CPU.
|
|
||||||
|
|
||||||
This field is displayed only for CONFIG_NO_HZ kernels.
|
This field is displayed only for CONFIG_NO_HZ kernels.
|
||||||
|
|
||||||
|
|
130
kernel/rcutree.c
130
kernel/rcutree.c
|
@ -163,7 +163,7 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ
|
||||||
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
||||||
.dynticks_nesting = 1,
|
.dynticks_nesting = 1,
|
||||||
.dynticks = 1,
|
.dynticks = ATOMIC_INIT(1),
|
||||||
};
|
};
|
||||||
#endif /* #ifdef CONFIG_NO_HZ */
|
#endif /* #ifdef CONFIG_NO_HZ */
|
||||||
|
|
||||||
|
@ -322,13 +322,25 @@ void rcu_enter_nohz(void)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rcu_dynticks *rdtp;
|
struct rcu_dynticks *rdtp;
|
||||||
|
|
||||||
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
rdtp = &__get_cpu_var(rcu_dynticks);
|
rdtp = &__get_cpu_var(rcu_dynticks);
|
||||||
rdtp->dynticks++;
|
if (--rdtp->dynticks_nesting) {
|
||||||
rdtp->dynticks_nesting--;
|
local_irq_restore(flags);
|
||||||
WARN_ON_ONCE(rdtp->dynticks & 0x1);
|
return;
|
||||||
|
}
|
||||||
|
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
|
||||||
|
smp_mb__before_atomic_inc(); /* See above. */
|
||||||
|
atomic_inc(&rdtp->dynticks);
|
||||||
|
smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
|
||||||
|
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
/* If the interrupt queued a callback, get out of dyntick mode. */
|
||||||
|
if (in_irq() &&
|
||||||
|
(__get_cpu_var(rcu_sched_data).nxtlist ||
|
||||||
|
__get_cpu_var(rcu_bh_data).nxtlist ||
|
||||||
|
rcu_preempt_needs_cpu(smp_processor_id())))
|
||||||
|
set_need_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -344,11 +356,16 @@ void rcu_exit_nohz(void)
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
rdtp = &__get_cpu_var(rcu_dynticks);
|
rdtp = &__get_cpu_var(rcu_dynticks);
|
||||||
rdtp->dynticks++;
|
if (rdtp->dynticks_nesting++) {
|
||||||
rdtp->dynticks_nesting++;
|
local_irq_restore(flags);
|
||||||
WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
|
return;
|
||||||
|
}
|
||||||
|
smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
|
||||||
|
atomic_inc(&rdtp->dynticks);
|
||||||
|
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
|
||||||
|
smp_mb__after_atomic_inc(); /* See above. */
|
||||||
|
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -362,11 +379,15 @@ void rcu_nmi_enter(void)
|
||||||
{
|
{
|
||||||
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
||||||
|
|
||||||
if (rdtp->dynticks & 0x1)
|
if (rdtp->dynticks_nmi_nesting == 0 &&
|
||||||
|
(atomic_read(&rdtp->dynticks) & 0x1))
|
||||||
return;
|
return;
|
||||||
rdtp->dynticks_nmi++;
|
rdtp->dynticks_nmi_nesting++;
|
||||||
WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
|
smp_mb__before_atomic_inc(); /* Force delay from prior write. */
|
||||||
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
atomic_inc(&rdtp->dynticks);
|
||||||
|
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
|
||||||
|
smp_mb__after_atomic_inc(); /* See above. */
|
||||||
|
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -380,11 +401,14 @@ void rcu_nmi_exit(void)
|
||||||
{
|
{
|
||||||
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
||||||
|
|
||||||
if (rdtp->dynticks & 0x1)
|
if (rdtp->dynticks_nmi_nesting == 0 ||
|
||||||
|
--rdtp->dynticks_nmi_nesting != 0)
|
||||||
return;
|
return;
|
||||||
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
|
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
|
||||||
rdtp->dynticks_nmi++;
|
smp_mb__before_atomic_inc(); /* See above. */
|
||||||
WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
|
atomic_inc(&rdtp->dynticks);
|
||||||
|
smp_mb__after_atomic_inc(); /* Force delay to next write. */
|
||||||
|
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -395,13 +419,7 @@ void rcu_nmi_exit(void)
|
||||||
*/
|
*/
|
||||||
void rcu_irq_enter(void)
|
void rcu_irq_enter(void)
|
||||||
{
|
{
|
||||||
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
rcu_exit_nohz();
|
||||||
|
|
||||||
if (rdtp->dynticks_nesting++)
|
|
||||||
return;
|
|
||||||
rdtp->dynticks++;
|
|
||||||
WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
|
|
||||||
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -413,18 +431,7 @@ void rcu_irq_enter(void)
|
||||||
*/
|
*/
|
||||||
void rcu_irq_exit(void)
|
void rcu_irq_exit(void)
|
||||||
{
|
{
|
||||||
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
rcu_enter_nohz();
|
||||||
|
|
||||||
if (--rdtp->dynticks_nesting)
|
|
||||||
return;
|
|
||||||
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
|
|
||||||
rdtp->dynticks++;
|
|
||||||
WARN_ON_ONCE(rdtp->dynticks & 0x1);
|
|
||||||
|
|
||||||
/* If the interrupt queued a callback, get out of dyntick mode. */
|
|
||||||
if (__this_cpu_read(rcu_sched_data.nxtlist) ||
|
|
||||||
__this_cpu_read(rcu_bh_data.nxtlist))
|
|
||||||
set_need_resched();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@ -436,19 +443,8 @@ void rcu_irq_exit(void)
|
||||||
*/
|
*/
|
||||||
static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
||||||
{
|
{
|
||||||
int ret;
|
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
|
||||||
int snap;
|
return 0;
|
||||||
int snap_nmi;
|
|
||||||
|
|
||||||
snap = rdp->dynticks->dynticks;
|
|
||||||
snap_nmi = rdp->dynticks->dynticks_nmi;
|
|
||||||
smp_mb(); /* Order sampling of snap with end of grace period. */
|
|
||||||
rdp->dynticks_snap = snap;
|
|
||||||
rdp->dynticks_nmi_snap = snap_nmi;
|
|
||||||
ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
|
|
||||||
if (ret)
|
|
||||||
rdp->dynticks_fqs++;
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -459,16 +455,11 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
||||||
*/
|
*/
|
||||||
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
||||||
{
|
{
|
||||||
long curr;
|
unsigned long curr;
|
||||||
long curr_nmi;
|
unsigned long snap;
|
||||||
long snap;
|
|
||||||
long snap_nmi;
|
|
||||||
|
|
||||||
curr = rdp->dynticks->dynticks;
|
curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
|
||||||
snap = rdp->dynticks_snap;
|
snap = (unsigned long)rdp->dynticks_snap;
|
||||||
curr_nmi = rdp->dynticks->dynticks_nmi;
|
|
||||||
snap_nmi = rdp->dynticks_nmi_snap;
|
|
||||||
smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the CPU passed through or entered a dynticks idle phase with
|
* If the CPU passed through or entered a dynticks idle phase with
|
||||||
|
@ -478,8 +469,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
||||||
* read-side critical section that started before the beginning
|
* read-side critical section that started before the beginning
|
||||||
* of the current RCU grace period.
|
* of the current RCU grace period.
|
||||||
*/
|
*/
|
||||||
if ((curr != snap || (curr & 0x1) == 0) &&
|
if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
|
||||||
(curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
|
|
||||||
rdp->dynticks_fqs++;
|
rdp->dynticks_fqs++;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -908,6 +898,12 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
|
||||||
unsigned long gp_duration;
|
unsigned long gp_duration;
|
||||||
|
|
||||||
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
|
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that all grace-period and pre-grace-period activity
|
||||||
|
* is seen before the assignment to rsp->completed.
|
||||||
|
*/
|
||||||
|
smp_mb(); /* See above block comment. */
|
||||||
gp_duration = jiffies - rsp->gp_start;
|
gp_duration = jiffies - rsp->gp_start;
|
||||||
if (gp_duration > rsp->gp_max)
|
if (gp_duration > rsp->gp_max)
|
||||||
rsp->gp_max = gp_duration;
|
rsp->gp_max = gp_duration;
|
||||||
|
@ -1455,25 +1451,11 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||||
*/
|
*/
|
||||||
static void rcu_process_callbacks(void)
|
static void rcu_process_callbacks(void)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* Memory references from any prior RCU read-side critical sections
|
|
||||||
* executed by the interrupted code must be seen before any RCU
|
|
||||||
* grace-period manipulations below.
|
|
||||||
*/
|
|
||||||
smp_mb(); /* See above block comment. */
|
|
||||||
|
|
||||||
__rcu_process_callbacks(&rcu_sched_state,
|
__rcu_process_callbacks(&rcu_sched_state,
|
||||||
&__get_cpu_var(rcu_sched_data));
|
&__get_cpu_var(rcu_sched_data));
|
||||||
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
|
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
|
||||||
rcu_preempt_process_callbacks();
|
rcu_preempt_process_callbacks();
|
||||||
|
|
||||||
/*
|
|
||||||
* Memory references from any later RCU read-side critical sections
|
|
||||||
* executed by the interrupted code must be seen after any RCU
|
|
||||||
* grace-period manipulations above.
|
|
||||||
*/
|
|
||||||
smp_mb(); /* See above block comment. */
|
|
||||||
|
|
||||||
/* If we are last CPU on way to dyntick-idle mode, accelerate it. */
|
/* If we are last CPU on way to dyntick-idle mode, accelerate it. */
|
||||||
rcu_needs_cpu_flush();
|
rcu_needs_cpu_flush();
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,11 +84,9 @@
|
||||||
* Dynticks per-CPU state.
|
* Dynticks per-CPU state.
|
||||||
*/
|
*/
|
||||||
struct rcu_dynticks {
|
struct rcu_dynticks {
|
||||||
int dynticks_nesting; /* Track nesting level, sort of. */
|
int dynticks_nesting; /* Track irq/process nesting level. */
|
||||||
int dynticks; /* Even value for dynticks-idle, else odd. */
|
int dynticks_nmi_nesting; /* Track NMI nesting level. */
|
||||||
int dynticks_nmi; /* Even value for either dynticks-idle or */
|
atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
|
||||||
/* not in nmi handler, else odd. So this */
|
|
||||||
/* remains even for nmi from irq handler. */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* RCU's kthread states for tracing. */
|
/* RCU's kthread states for tracing. */
|
||||||
|
@ -284,7 +282,6 @@ struct rcu_data {
|
||||||
/* 3) dynticks interface. */
|
/* 3) dynticks interface. */
|
||||||
struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
|
struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
|
||||||
int dynticks_snap; /* Per-GP tracking for dynticks. */
|
int dynticks_snap; /* Per-GP tracking for dynticks. */
|
||||||
int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
|
|
||||||
#endif /* #ifdef CONFIG_NO_HZ */
|
#endif /* #ifdef CONFIG_NO_HZ */
|
||||||
|
|
||||||
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
|
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
|
||||||
|
|
|
@ -1520,7 +1520,6 @@ int rcu_needs_cpu(int cpu)
|
||||||
{
|
{
|
||||||
int c = 0;
|
int c = 0;
|
||||||
int snap;
|
int snap;
|
||||||
int snap_nmi;
|
|
||||||
int thatcpu;
|
int thatcpu;
|
||||||
|
|
||||||
/* Check for being in the holdoff period. */
|
/* Check for being in the holdoff period. */
|
||||||
|
@ -1531,10 +1530,10 @@ int rcu_needs_cpu(int cpu)
|
||||||
for_each_online_cpu(thatcpu) {
|
for_each_online_cpu(thatcpu) {
|
||||||
if (thatcpu == cpu)
|
if (thatcpu == cpu)
|
||||||
continue;
|
continue;
|
||||||
snap = per_cpu(rcu_dynticks, thatcpu).dynticks;
|
snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
|
||||||
snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi;
|
thatcpu).dynticks);
|
||||||
smp_mb(); /* Order sampling of snap with end of grace period. */
|
smp_mb(); /* Order sampling of snap with end of grace period. */
|
||||||
if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) {
|
if ((snap & 0x1) != 0) {
|
||||||
per_cpu(rcu_dyntick_drain, cpu) = 0;
|
per_cpu(rcu_dyntick_drain, cpu) = 0;
|
||||||
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
|
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
|
||||||
return rcu_needs_cpu_quick_check(cpu);
|
return rcu_needs_cpu_quick_check(cpu);
|
||||||
|
|
|
@ -69,10 +69,10 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
|
||||||
rdp->passed_quiesc, rdp->passed_quiesc_completed,
|
rdp->passed_quiesc, rdp->passed_quiesc_completed,
|
||||||
rdp->qs_pending);
|
rdp->qs_pending);
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ
|
||||||
seq_printf(m, " dt=%d/%d dn=%d df=%lu",
|
seq_printf(m, " dt=%d/%d/%d df=%lu",
|
||||||
rdp->dynticks->dynticks,
|
atomic_read(&rdp->dynticks->dynticks),
|
||||||
rdp->dynticks->dynticks_nesting,
|
rdp->dynticks->dynticks_nesting,
|
||||||
rdp->dynticks->dynticks_nmi,
|
rdp->dynticks->dynticks_nmi_nesting,
|
||||||
rdp->dynticks_fqs);
|
rdp->dynticks_fqs);
|
||||||
#endif /* #ifdef CONFIG_NO_HZ */
|
#endif /* #ifdef CONFIG_NO_HZ */
|
||||||
seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
|
seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
|
||||||
|
@ -141,9 +141,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
|
||||||
rdp->qs_pending);
|
rdp->qs_pending);
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ
|
||||||
seq_printf(m, ",%d,%d,%d,%lu",
|
seq_printf(m, ",%d,%d,%d,%lu",
|
||||||
rdp->dynticks->dynticks,
|
atomic_read(&rdp->dynticks->dynticks),
|
||||||
rdp->dynticks->dynticks_nesting,
|
rdp->dynticks->dynticks_nesting,
|
||||||
rdp->dynticks->dynticks_nmi,
|
rdp->dynticks->dynticks_nmi_nesting,
|
||||||
rdp->dynticks_fqs);
|
rdp->dynticks_fqs);
|
||||||
#endif /* #ifdef CONFIG_NO_HZ */
|
#endif /* #ifdef CONFIG_NO_HZ */
|
||||||
seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
|
seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
|
||||||
|
@ -167,7 +167,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
|
||||||
{
|
{
|
||||||
seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",");
|
seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",");
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ
|
||||||
seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
|
seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
|
||||||
#endif /* #ifdef CONFIG_NO_HZ */
|
#endif /* #ifdef CONFIG_NO_HZ */
|
||||||
seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
|
seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
|
||||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||||
|
|
|
@ -144,7 +144,7 @@ static void init_shared_classes(void)
|
||||||
|
|
||||||
#define HARDIRQ_ENTER() \
|
#define HARDIRQ_ENTER() \
|
||||||
local_irq_disable(); \
|
local_irq_disable(); \
|
||||||
irq_enter(); \
|
__irq_enter(); \
|
||||||
WARN_ON(!in_irq());
|
WARN_ON(!in_irq());
|
||||||
|
|
||||||
#define HARDIRQ_EXIT() \
|
#define HARDIRQ_EXIT() \
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue