mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-30 11:04:25 +00:00
locking/lockdep: Improve 'invalid wait context' splat
The 'invalid wait context' splat doesn't print all the information required to reconstruct / validate the error, specifically the irq-context state is missing. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
a13f58a0ca
commit
9a019db0b6
1 changed files with 31 additions and 20 deletions
|
@ -3952,10 +3952,36 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline short task_wait_context(struct task_struct *curr)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Set appropriate wait type for the context; for IRQs we have to take
|
||||||
|
* into account force_irqthread as that is implied by PREEMPT_RT.
|
||||||
|
*/
|
||||||
|
if (curr->hardirq_context) {
|
||||||
|
/*
|
||||||
|
* Check if force_irqthreads will run us threaded.
|
||||||
|
*/
|
||||||
|
if (curr->hardirq_threaded || curr->irq_config)
|
||||||
|
return LD_WAIT_CONFIG;
|
||||||
|
|
||||||
|
return LD_WAIT_SPIN;
|
||||||
|
} else if (curr->softirq_context) {
|
||||||
|
/*
|
||||||
|
* Softirqs are always threaded.
|
||||||
|
*/
|
||||||
|
return LD_WAIT_CONFIG;
|
||||||
|
}
|
||||||
|
|
||||||
|
return LD_WAIT_MAX;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
print_lock_invalid_wait_context(struct task_struct *curr,
|
print_lock_invalid_wait_context(struct task_struct *curr,
|
||||||
struct held_lock *hlock)
|
struct held_lock *hlock)
|
||||||
{
|
{
|
||||||
|
short curr_inner;
|
||||||
|
|
||||||
if (!debug_locks_off())
|
if (!debug_locks_off())
|
||||||
return 0;
|
return 0;
|
||||||
if (debug_locks_silent)
|
if (debug_locks_silent)
|
||||||
|
@ -3971,6 +3997,10 @@ print_lock_invalid_wait_context(struct task_struct *curr,
|
||||||
print_lock(hlock);
|
print_lock(hlock);
|
||||||
|
|
||||||
pr_warn("other info that might help us debug this:\n");
|
pr_warn("other info that might help us debug this:\n");
|
||||||
|
|
||||||
|
curr_inner = task_wait_context(curr);
|
||||||
|
pr_warn("context-{%d:%d}\n", curr_inner, curr_inner);
|
||||||
|
|
||||||
lockdep_print_held_locks(curr);
|
lockdep_print_held_locks(curr);
|
||||||
|
|
||||||
pr_warn("stack backtrace:\n");
|
pr_warn("stack backtrace:\n");
|
||||||
|
@ -4017,26 +4047,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
|
||||||
}
|
}
|
||||||
depth++;
|
depth++;
|
||||||
|
|
||||||
/*
|
curr_inner = task_wait_context(curr);
|
||||||
* Set appropriate wait type for the context; for IRQs we have to take
|
|
||||||
* into account force_irqthread as that is implied by PREEMPT_RT.
|
|
||||||
*/
|
|
||||||
if (curr->hardirq_context) {
|
|
||||||
/*
|
|
||||||
* Check if force_irqthreads will run us threaded.
|
|
||||||
*/
|
|
||||||
if (curr->hardirq_threaded || curr->irq_config)
|
|
||||||
curr_inner = LD_WAIT_CONFIG;
|
|
||||||
else
|
|
||||||
curr_inner = LD_WAIT_SPIN;
|
|
||||||
} else if (curr->softirq_context) {
|
|
||||||
/*
|
|
||||||
* Softirqs are always threaded.
|
|
||||||
*/
|
|
||||||
curr_inner = LD_WAIT_CONFIG;
|
|
||||||
} else {
|
|
||||||
curr_inner = LD_WAIT_MAX;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (; depth < curr->lockdep_depth; depth++) {
|
for (; depth < curr->lockdep_depth; depth++) {
|
||||||
struct held_lock *prev = curr->held_locks + depth;
|
struct held_lock *prev = curr->held_locks + depth;
|
||||||
|
|
Loading…
Add table
Reference in a new issue