Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull lockdep fix from Thomas Gleixner:
 "A single fix for the stack trace caching logic in lockdep, where the
  duplicate avoidance managed to store no back trace at all"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/lockdep: Fix stack trace caching logic
This commit is contained in:
Linus Torvalds 2016-02-14 12:02:05 -08:00
commit cb490d632b

View file

@ -1822,7 +1822,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
*/ */
static int static int
check_prev_add(struct task_struct *curr, struct held_lock *prev, check_prev_add(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, int distance, int trylock_loop) struct held_lock *next, int distance, int *stack_saved)
{ {
struct lock_list *entry; struct lock_list *entry;
int ret; int ret;
@ -1883,8 +1883,11 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
} }
} }
if (!trylock_loop && !save_trace(&trace)) if (!*stack_saved) {
if (!save_trace(&trace))
return 0; return 0;
*stack_saved = 1;
}
/* /*
* Ok, all validations passed, add the new lock * Ok, all validations passed, add the new lock
@ -1907,6 +1910,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* Debugging printouts: * Debugging printouts:
*/ */
if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
/* We drop graph lock, so another thread can overwrite trace. */
*stack_saved = 0;
graph_unlock(); graph_unlock();
printk("\n new dependency: "); printk("\n new dependency: ");
print_lock_name(hlock_class(prev)); print_lock_name(hlock_class(prev));
@ -1929,7 +1934,7 @@ static int
check_prevs_add(struct task_struct *curr, struct held_lock *next) check_prevs_add(struct task_struct *curr, struct held_lock *next)
{ {
int depth = curr->lockdep_depth; int depth = curr->lockdep_depth;
int trylock_loop = 0; int stack_saved = 0;
struct held_lock *hlock; struct held_lock *hlock;
/* /*
@ -1956,7 +1961,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
*/ */
if (hlock->read != 2 && hlock->check) { if (hlock->read != 2 && hlock->check) {
if (!check_prev_add(curr, hlock, next, if (!check_prev_add(curr, hlock, next,
distance, trylock_loop)) distance, &stack_saved))
return 0; return 0;
/* /*
* Stop after the first non-trylock entry, * Stop after the first non-trylock entry,
@ -1979,7 +1984,6 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
if (curr->held_locks[depth].irq_context != if (curr->held_locks[depth].irq_context !=
curr->held_locks[depth-1].irq_context) curr->held_locks[depth-1].irq_context)
break; break;
trylock_loop = 1;
} }
return 1; return 1;
out_bug: out_bug: