mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-27 00:51:35 +00:00
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Ingo Molnar: "Misc fixes: - Fix a S390 boot hang that was caused by the lock-break logic. Remove lock-break to begin with, as review suggested it was unreasonably fragile and our confidence in its continued good health is lower than our confidence in its removal. - Remove the lockdep cross-release checking code for now, because of unresolved false positive warnings. This should make lockdep work well everywhere again. - Get rid of the final (and single) ACCESS_ONCE() straggler and remove the API from v4.15. - Fix a liblockdep build warning" * 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: tools/lib/lockdep: Add missing declaration of 'pr_cont()' checkpatch: Remove ACCESS_ONCE() warning compiler.h: Remove ACCESS_ONCE() tools/include: Remove ACCESS_ONCE() tools/perf: Convert ACCESS_ONCE() to READ_ONCE() locking/lockdep: Remove the cross-release locking checks locking/core: Remove break_lock field when CONFIG_GENERIC_LOCKBREAK=y locking/core: Fix deadlock during boot on systems with GENERIC_LOCKBREAK
This commit is contained in:
commit
1f76a75561
15 changed files with 63 additions and 1800 deletions
|
@ -57,10 +57,6 @@
|
|||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/lock.h>
|
||||
|
||||
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
||||
#include <linux/slab.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
int prove_locking = 1;
|
||||
module_param(prove_locking, int, 0644);
|
||||
|
@ -75,19 +71,6 @@ module_param(lock_stat, int, 0644);
|
|||
#define lock_stat 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
|
||||
static int crossrelease_fullstack = 1;
|
||||
#else
|
||||
static int crossrelease_fullstack;
|
||||
#endif
|
||||
static int __init allow_crossrelease_fullstack(char *str)
|
||||
{
|
||||
crossrelease_fullstack = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("crossrelease_fullstack", allow_crossrelease_fullstack);
|
||||
|
||||
/*
|
||||
* lockdep_lock: protects the lockdep graph, the hashes and the
|
||||
* class/list/hash allocators.
|
||||
|
@ -740,18 +723,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|||
return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
||||
static void cross_init(struct lockdep_map *lock, int cross);
|
||||
static int cross_lock(struct lockdep_map *lock);
|
||||
static int lock_acquire_crosslock(struct held_lock *hlock);
|
||||
static int lock_release_crosslock(struct lockdep_map *lock);
|
||||
#else
|
||||
static inline void cross_init(struct lockdep_map *lock, int cross) {}
|
||||
static inline int cross_lock(struct lockdep_map *lock) { return 0; }
|
||||
static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; }
|
||||
static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Register a lock's class in the hash-table, if the class is not present
|
||||
* yet. Otherwise we look it up. We cache the result in the lock object
|
||||
|
@ -1151,41 +1122,22 @@ print_circular_lock_scenario(struct held_lock *src,
|
|||
printk(KERN_CONT "\n\n");
|
||||
}
|
||||
|
||||
if (cross_lock(tgt->instance)) {
|
||||
printk(" Possible unsafe locking scenario by crosslock:\n\n");
|
||||
printk(" CPU0 CPU1\n");
|
||||
printk(" ---- ----\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(parent);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(target);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(source);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" unlock(");
|
||||
__print_lock_name(target);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk("\n *** DEADLOCK ***\n\n");
|
||||
} else {
|
||||
printk(" Possible unsafe locking scenario:\n\n");
|
||||
printk(" CPU0 CPU1\n");
|
||||
printk(" ---- ----\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(target);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(parent);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(target);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(source);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk("\n *** DEADLOCK ***\n\n");
|
||||
}
|
||||
printk(" Possible unsafe locking scenario:\n\n");
|
||||
printk(" CPU0 CPU1\n");
|
||||
printk(" ---- ----\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(target);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(parent);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(target);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(source);
|
||||
printk(KERN_CONT ");\n");
|
||||
printk("\n *** DEADLOCK ***\n\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1211,10 +1163,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
|
|||
curr->comm, task_pid_nr(curr));
|
||||
print_lock(check_src);
|
||||
|
||||
if (cross_lock(check_tgt->instance))
|
||||
pr_warn("\nbut now in release context of a crosslock acquired at the following:\n");
|
||||
else
|
||||
pr_warn("\nbut task is already holding lock:\n");
|
||||
pr_warn("\nbut task is already holding lock:\n");
|
||||
|
||||
print_lock(check_tgt);
|
||||
pr_warn("\nwhich lock already depends on the new lock.\n\n");
|
||||
|
@ -1244,9 +1193,7 @@ static noinline int print_circular_bug(struct lock_list *this,
|
|||
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
||||
return 0;
|
||||
|
||||
if (cross_lock(check_tgt->instance))
|
||||
this->trace = *trace;
|
||||
else if (!save_trace(&this->trace))
|
||||
if (!save_trace(&this->trace))
|
||||
return 0;
|
||||
|
||||
depth = get_lock_depth(target);
|
||||
|
@ -1850,9 +1797,6 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
|
|||
if (nest)
|
||||
return 2;
|
||||
|
||||
if (cross_lock(prev->instance))
|
||||
continue;
|
||||
|
||||
return print_deadlock_bug(curr, prev, next);
|
||||
}
|
||||
return 1;
|
||||
|
@ -2018,31 +1962,26 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
|
|||
for (;;) {
|
||||
int distance = curr->lockdep_depth - depth + 1;
|
||||
hlock = curr->held_locks + depth - 1;
|
||||
/*
|
||||
* Only non-crosslock entries get new dependencies added.
|
||||
* Crosslock entries will be added by commit later:
|
||||
*/
|
||||
if (!cross_lock(hlock->instance)) {
|
||||
/*
|
||||
* Only non-recursive-read entries get new dependencies
|
||||
* added:
|
||||
*/
|
||||
if (hlock->read != 2 && hlock->check) {
|
||||
int ret = check_prev_add(curr, hlock, next,
|
||||
distance, &trace, save_trace);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Stop after the first non-trylock entry,
|
||||
* as non-trylock entries have added their
|
||||
* own direct dependencies already, so this
|
||||
* lock is connected to them indirectly:
|
||||
*/
|
||||
if (!hlock->trylock)
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Only non-recursive-read entries get new dependencies
|
||||
* added:
|
||||
*/
|
||||
if (hlock->read != 2 && hlock->check) {
|
||||
int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Stop after the first non-trylock entry,
|
||||
* as non-trylock entries have added their
|
||||
* own direct dependencies already, so this
|
||||
* lock is connected to them indirectly:
|
||||
*/
|
||||
if (!hlock->trylock)
|
||||
break;
|
||||
}
|
||||
|
||||
depth--;
|
||||
/*
|
||||
* End of lock-stack?
|
||||
|
@ -3292,21 +3231,10 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
|
|||
void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||
struct lock_class_key *key, int subclass)
|
||||
{
|
||||
cross_init(lock, 0);
|
||||
__lockdep_init_map(lock, name, key, subclass);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lockdep_init_map);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
||||
void lockdep_init_map_crosslock(struct lockdep_map *lock, const char *name,
|
||||
struct lock_class_key *key, int subclass)
|
||||
{
|
||||
cross_init(lock, 1);
|
||||
__lockdep_init_map(lock, name, key, subclass);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lockdep_init_map_crosslock);
|
||||
#endif
|
||||
|
||||
struct lock_class_key __lockdep_no_validate__;
|
||||
EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
|
||||
|
||||
|
@ -3362,7 +3290,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
int chain_head = 0;
|
||||
int class_idx;
|
||||
u64 chain_key;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!debug_locks))
|
||||
return 0;
|
||||
|
@ -3411,8 +3338,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
|
||||
class_idx = class - lock_classes + 1;
|
||||
|
||||
/* TODO: nest_lock is not implemented for crosslock yet. */
|
||||
if (depth && !cross_lock(lock)) {
|
||||
if (depth) {
|
||||
hlock = curr->held_locks + depth - 1;
|
||||
if (hlock->class_idx == class_idx && nest_lock) {
|
||||
if (hlock->references) {
|
||||
|
@ -3500,14 +3426,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
|
||||
return 0;
|
||||
|
||||
ret = lock_acquire_crosslock(hlock);
|
||||
/*
|
||||
* 2 means normal acquire operations are needed. Otherwise, it's
|
||||
* ok just to return with '0:fail, 1:success'.
|
||||
*/
|
||||
if (ret != 2)
|
||||
return ret;
|
||||
|
||||
curr->curr_chain_key = chain_key;
|
||||
curr->lockdep_depth++;
|
||||
check_chain_key(curr);
|
||||
|
@ -3745,19 +3663,11 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
|
|||
struct task_struct *curr = current;
|
||||
struct held_lock *hlock;
|
||||
unsigned int depth;
|
||||
int ret, i;
|
||||
int i;
|
||||
|
||||
if (unlikely(!debug_locks))
|
||||
return 0;
|
||||
|
||||
ret = lock_release_crosslock(lock);
|
||||
/*
|
||||
* 2 means normal release operations are needed. Otherwise, it's
|
||||
* ok just to return with '0:fail, 1:success'.
|
||||
*/
|
||||
if (ret != 2)
|
||||
return ret;
|
||||
|
||||
depth = curr->lockdep_depth;
|
||||
/*
|
||||
* So we're all set to release this lock.. wait what lock? We don't
|
||||
|
@ -4675,495 +4585,3 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
|
|||
dump_stack();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
|
||||
|
||||
/*
|
||||
* Crossrelease works by recording a lock history for each thread and
|
||||
* connecting those historic locks that were taken after the
|
||||
* wait_for_completion() in the complete() context.
|
||||
*
|
||||
* Task-A Task-B
|
||||
*
|
||||
* mutex_lock(&A);
|
||||
* mutex_unlock(&A);
|
||||
*
|
||||
* wait_for_completion(&C);
|
||||
* lock_acquire_crosslock();
|
||||
* atomic_inc_return(&cross_gen_id);
|
||||
* |
|
||||
* | mutex_lock(&B);
|
||||
* | mutex_unlock(&B);
|
||||
* |
|
||||
* | complete(&C);
|
||||
* `-- lock_commit_crosslock();
|
||||
*
|
||||
* Which will then add a dependency between B and C.
|
||||
*/
|
||||
|
||||
#define xhlock(i) (current->xhlocks[(i) % MAX_XHLOCKS_NR])
|
||||
|
||||
/*
|
||||
* Whenever a crosslock is held, cross_gen_id will be increased.
|
||||
*/
|
||||
static atomic_t cross_gen_id; /* Can be wrapped */
|
||||
|
||||
/*
|
||||
* Make an entry of the ring buffer invalid.
|
||||
*/
|
||||
static inline void invalidate_xhlock(struct hist_lock *xhlock)
|
||||
{
|
||||
/*
|
||||
* Normally, xhlock->hlock.instance must be !NULL.
|
||||
*/
|
||||
xhlock->hlock.instance = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock history stacks; we have 2 nested lock history stacks:
|
||||
*
|
||||
* HARD(IRQ)
|
||||
* SOFT(IRQ)
|
||||
*
|
||||
* The thing is that once we complete a HARD/SOFT IRQ the future task locks
|
||||
* should not depend on any of the locks observed while running the IRQ. So
|
||||
* what we do is rewind the history buffer and erase all our knowledge of that
|
||||
* temporal event.
|
||||
*/
|
||||
|
||||
void crossrelease_hist_start(enum xhlock_context_t c)
|
||||
{
|
||||
struct task_struct *cur = current;
|
||||
|
||||
if (!cur->xhlocks)
|
||||
return;
|
||||
|
||||
cur->xhlock_idx_hist[c] = cur->xhlock_idx;
|
||||
cur->hist_id_save[c] = cur->hist_id;
|
||||
}
|
||||
|
||||
void crossrelease_hist_end(enum xhlock_context_t c)
|
||||
{
|
||||
struct task_struct *cur = current;
|
||||
|
||||
if (cur->xhlocks) {
|
||||
unsigned int idx = cur->xhlock_idx_hist[c];
|
||||
struct hist_lock *h = &xhlock(idx);
|
||||
|
||||
cur->xhlock_idx = idx;
|
||||
|
||||
/* Check if the ring was overwritten. */
|
||||
if (h->hist_id != cur->hist_id_save[c])
|
||||
invalidate_xhlock(h);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* lockdep_invariant_state() is used to annotate independence inside a task, to
|
||||
* make one task look like multiple independent 'tasks'.
|
||||
*
|
||||
* Take for instance workqueues; each work is independent of the last. The
|
||||
* completion of a future work does not depend on the completion of a past work
|
||||
* (in general). Therefore we must not carry that (lock) dependency across
|
||||
* works.
|
||||
*
|
||||
* This is true for many things; pretty much all kthreads fall into this
|
||||
* pattern, where they have an invariant state and future completions do not
|
||||
* depend on past completions. Its just that since they all have the 'same'
|
||||
* form -- the kthread does the same over and over -- it doesn't typically
|
||||
* matter.
|
||||
*
|
||||
* The same is true for system-calls, once a system call is completed (we've
|
||||
* returned to userspace) the next system call does not depend on the lock
|
||||
* history of the previous system call.
|
||||
*
|
||||
* They key property for independence, this invariant state, is that it must be
|
||||
* a point where we hold no locks and have no history. Because if we were to
|
||||
* hold locks, the restore at _end() would not necessarily recover it's history
|
||||
* entry. Similarly, independence per-definition means it does not depend on
|
||||
* prior state.
|
||||
*/
|
||||
void lockdep_invariant_state(bool force)
|
||||
{
|
||||
/*
|
||||
* We call this at an invariant point, no current state, no history.
|
||||
* Verify the former, enforce the latter.
|
||||
*/
|
||||
WARN_ON_ONCE(!force && current->lockdep_depth);
|
||||
if (current->xhlocks)
|
||||
invalidate_xhlock(&xhlock(current->xhlock_idx));
|
||||
}
|
||||
|
||||
static int cross_lock(struct lockdep_map *lock)
|
||||
{
|
||||
return lock ? lock->cross : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is needed to decide the relationship between wrapable variables.
|
||||
*/
|
||||
static inline int before(unsigned int a, unsigned int b)
|
||||
{
|
||||
return (int)(a - b) < 0;
|
||||
}
|
||||
|
||||
static inline struct lock_class *xhlock_class(struct hist_lock *xhlock)
|
||||
{
|
||||
return hlock_class(&xhlock->hlock);
|
||||
}
|
||||
|
||||
static inline struct lock_class *xlock_class(struct cross_lock *xlock)
|
||||
{
|
||||
return hlock_class(&xlock->hlock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Should we check a dependency with previous one?
|
||||
*/
|
||||
static inline int depend_before(struct held_lock *hlock)
|
||||
{
|
||||
return hlock->read != 2 && hlock->check && !hlock->trylock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Should we check a dependency with next one?
|
||||
*/
|
||||
static inline int depend_after(struct held_lock *hlock)
|
||||
{
|
||||
return hlock->read != 2 && hlock->check;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the xhlock is valid, which would be false if,
|
||||
*
|
||||
* 1. Has not used after initializaion yet.
|
||||
* 2. Got invalidated.
|
||||
*
|
||||
* Remind hist_lock is implemented as a ring buffer.
|
||||
*/
|
||||
static inline int xhlock_valid(struct hist_lock *xhlock)
|
||||
{
|
||||
/*
|
||||
* xhlock->hlock.instance must be !NULL.
|
||||
*/
|
||||
return !!xhlock->hlock.instance;
|
||||
}
|
||||
|
||||
/*
|
||||
* Record a hist_lock entry.
|
||||
*
|
||||
* Irq disable is only required.
|
||||
*/
|
||||
static void add_xhlock(struct held_lock *hlock)
|
||||
{
|
||||
unsigned int idx = ++current->xhlock_idx;
|
||||
struct hist_lock *xhlock = &xhlock(idx);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCKDEP
|
||||
/*
|
||||
* This can be done locklessly because they are all task-local
|
||||
* state, we must however ensure IRQs are disabled.
|
||||
*/
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
#endif
|
||||
|
||||
/* Initialize hist_lock's members */
|
||||
xhlock->hlock = *hlock;
|
||||
xhlock->hist_id = ++current->hist_id;
|
||||
|
||||
xhlock->trace.nr_entries = 0;
|
||||
xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
|
||||
xhlock->trace.entries = xhlock->trace_entries;
|
||||
|
||||
if (crossrelease_fullstack) {
|
||||
xhlock->trace.skip = 3;
|
||||
save_stack_trace(&xhlock->trace);
|
||||
} else {
|
||||
xhlock->trace.nr_entries = 1;
|
||||
xhlock->trace.entries[0] = hlock->acquire_ip;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int same_context_xhlock(struct hist_lock *xhlock)
|
||||
{
|
||||
return xhlock->hlock.irq_context == task_irq_context(current);
|
||||
}
|
||||
|
||||
/*
|
||||
* This should be lockless as far as possible because this would be
|
||||
* called very frequently.
|
||||
*/
|
||||
static void check_add_xhlock(struct held_lock *hlock)
|
||||
{
|
||||
/*
|
||||
* Record a hist_lock, only in case that acquisitions ahead
|
||||
* could depend on the held_lock. For example, if the held_lock
|
||||
* is trylock then acquisitions ahead never depends on that.
|
||||
* In that case, we don't need to record it. Just return.
|
||||
*/
|
||||
if (!current->xhlocks || !depend_before(hlock))
|
||||
return;
|
||||
|
||||
add_xhlock(hlock);
|
||||
}
|
||||
|
||||
/*
|
||||
* For crosslock.
|
||||
*/
|
||||
static int add_xlock(struct held_lock *hlock)
|
||||
{
|
||||
struct cross_lock *xlock;
|
||||
unsigned int gen_id;
|
||||
|
||||
if (!graph_lock())
|
||||
return 0;
|
||||
|
||||
xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock;
|
||||
|
||||
/*
|
||||
* When acquisitions for a crosslock are overlapped, we use
|
||||
* nr_acquire to perform commit for them, based on cross_gen_id
|
||||
* of the first acquisition, which allows to add additional
|
||||
* dependencies.
|
||||
*
|
||||
* Moreover, when no acquisition of a crosslock is in progress,
|
||||
* we should not perform commit because the lock might not exist
|
||||
* any more, which might cause incorrect memory access. So we
|
||||
* have to track the number of acquisitions of a crosslock.
|
||||
*
|
||||
* depend_after() is necessary to initialize only the first
|
||||
* valid xlock so that the xlock can be used on its commit.
|
||||
*/
|
||||
if (xlock->nr_acquire++ && depend_after(&xlock->hlock))
|
||||
goto unlock;
|
||||
|
||||
gen_id = (unsigned int)atomic_inc_return(&cross_gen_id);
|
||||
xlock->hlock = *hlock;
|
||||
xlock->hlock.gen_id = gen_id;
|
||||
unlock:
|
||||
graph_unlock();
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called for both normal and crosslock acquires. Normal locks will be
|
||||
* pushed on the hist_lock queue. Cross locks will record state and
|
||||
* stop regular lock_acquire() to avoid being placed on the held_lock
|
||||
* stack.
|
||||
*
|
||||
* Return: 0 - failure;
|
||||
* 1 - crosslock, done;
|
||||
* 2 - normal lock, continue to held_lock[] ops.
|
||||
*/
|
||||
static int lock_acquire_crosslock(struct held_lock *hlock)
|
||||
{
|
||||
/*
|
||||
* CONTEXT 1 CONTEXT 2
|
||||
* --------- ---------
|
||||
* lock A (cross)
|
||||
* X = atomic_inc_return(&cross_gen_id)
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
* Y = atomic_read_acquire(&cross_gen_id)
|
||||
* lock B
|
||||
*
|
||||
* atomic_read_acquire() is for ordering between A and B,
|
||||
* IOW, A happens before B, when CONTEXT 2 see Y >= X.
|
||||
*
|
||||
* Pairs with atomic_inc_return() in add_xlock().
|
||||
*/
|
||||
hlock->gen_id = (unsigned int)atomic_read_acquire(&cross_gen_id);
|
||||
|
||||
if (cross_lock(hlock->instance))
|
||||
return add_xlock(hlock);
|
||||
|
||||
check_add_xhlock(hlock);
|
||||
return 2;
|
||||
}
|
||||
|
||||
static int copy_trace(struct stack_trace *trace)
|
||||
{
|
||||
unsigned long *buf = stack_trace + nr_stack_trace_entries;
|
||||
unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
|
||||
unsigned int nr = min(max_nr, trace->nr_entries);
|
||||
|
||||
trace->nr_entries = nr;
|
||||
memcpy(buf, trace->entries, nr * sizeof(trace->entries[0]));
|
||||
trace->entries = buf;
|
||||
nr_stack_trace_entries += nr;
|
||||
|
||||
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
|
||||
if (!debug_locks_off_graph_unlock())
|
||||
return 0;
|
||||
|
||||
print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
|
||||
dump_stack();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
|
||||
{
|
||||
unsigned int xid, pid;
|
||||
u64 chain_key;
|
||||
|
||||
xid = xlock_class(xlock) - lock_classes;
|
||||
chain_key = iterate_chain_key((u64)0, xid);
|
||||
pid = xhlock_class(xhlock) - lock_classes;
|
||||
chain_key = iterate_chain_key(chain_key, pid);
|
||||
|
||||
if (lookup_chain_cache(chain_key))
|
||||
return 1;
|
||||
|
||||
if (!add_chain_cache_classes(xid, pid, xhlock->hlock.irq_context,
|
||||
chain_key))
|
||||
return 0;
|
||||
|
||||
if (!check_prev_add(current, &xlock->hlock, &xhlock->hlock, 1,
|
||||
&xhlock->trace, copy_trace))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void commit_xhlocks(struct cross_lock *xlock)
|
||||
{
|
||||
unsigned int cur = current->xhlock_idx;
|
||||
unsigned int prev_hist_id = xhlock(cur).hist_id;
|
||||
unsigned int i;
|
||||
|
||||
if (!graph_lock())
|
||||
return;
|
||||
|
||||
if (xlock->nr_acquire) {
|
||||
for (i = 0; i < MAX_XHLOCKS_NR; i++) {
|
||||
struct hist_lock *xhlock = &xhlock(cur - i);
|
||||
|
||||
if (!xhlock_valid(xhlock))
|
||||
break;
|
||||
|
||||
if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id))
|
||||
break;
|
||||
|
||||
if (!same_context_xhlock(xhlock))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Filter out the cases where the ring buffer was
|
||||
* overwritten and the current entry has a bigger
|
||||
* hist_id than the previous one, which is impossible
|
||||
* otherwise:
|
||||
*/
|
||||
if (unlikely(before(prev_hist_id, xhlock->hist_id)))
|
||||
break;
|
||||
|
||||
prev_hist_id = xhlock->hist_id;
|
||||
|
||||
/*
|
||||
* commit_xhlock() returns 0 with graph_lock already
|
||||
* released if fail.
|
||||
*/
|
||||
if (!commit_xhlock(xlock, xhlock))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
graph_unlock();
|
||||
}
|
||||
|
||||
void lock_commit_crosslock(struct lockdep_map *lock)
|
||||
{
|
||||
struct cross_lock *xlock;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
if (!current->xhlocks)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Do commit hist_locks with the cross_lock, only in case that
|
||||
* the cross_lock could depend on acquisitions after that.
|
||||
*
|
||||
* For example, if the cross_lock does not have the 'check' flag
|
||||
* then we don't need to check dependencies and commit for that.
|
||||
* Just skip it. In that case, of course, the cross_lock does
|
||||
* not depend on acquisitions ahead, either.
|
||||
*
|
||||
* WARNING: Don't do that in add_xlock() in advance. When an
|
||||
* acquisition context is different from the commit context,
|
||||
* invalid(skipped) cross_lock might be accessed.
|
||||
*/
|
||||
if (!depend_after(&((struct lockdep_map_cross *)lock)->xlock.hlock))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
current->lockdep_recursion = 1;
|
||||
xlock = &((struct lockdep_map_cross *)lock)->xlock;
|
||||
commit_xhlocks(xlock);
|
||||
current->lockdep_recursion = 0;
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lock_commit_crosslock);
|
||||
|
||||
/*
|
||||
* Return: 0 - failure;
|
||||
* 1 - crosslock, done;
|
||||
* 2 - normal lock, continue to held_lock[] ops.
|
||||
*/
|
||||
static int lock_release_crosslock(struct lockdep_map *lock)
|
||||
{
|
||||
if (cross_lock(lock)) {
|
||||
if (!graph_lock())
|
||||
return 0;
|
||||
((struct lockdep_map_cross *)lock)->xlock.nr_acquire--;
|
||||
graph_unlock();
|
||||
return 1;
|
||||
}
|
||||
return 2;
|
||||
}
|
||||
|
||||
static void cross_init(struct lockdep_map *lock, int cross)
|
||||
{
|
||||
if (cross)
|
||||
((struct lockdep_map_cross *)lock)->xlock.nr_acquire = 0;
|
||||
|
||||
lock->cross = cross;
|
||||
|
||||
/*
|
||||
* Crossrelease assumes that the ring buffer size of xhlocks
|
||||
* is aligned with power of 2. So force it on build.
|
||||
*/
|
||||
BUILD_BUG_ON(MAX_XHLOCKS_NR & (MAX_XHLOCKS_NR - 1));
|
||||
}
|
||||
|
||||
void lockdep_init_task(struct task_struct *task)
|
||||
{
|
||||
int i;
|
||||
|
||||
task->xhlock_idx = UINT_MAX;
|
||||
task->hist_id = 0;
|
||||
|
||||
for (i = 0; i < XHLOCK_CTX_NR; i++) {
|
||||
task->xhlock_idx_hist[i] = UINT_MAX;
|
||||
task->hist_id_save[i] = 0;
|
||||
}
|
||||
|
||||
task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
void lockdep_free_task(struct task_struct *task)
|
||||
{
|
||||
if (task->xhlocks) {
|
||||
void *tmp = task->xhlocks;
|
||||
/* Diable crossrelease for current */
|
||||
task->xhlocks = NULL;
|
||||
kfree(tmp);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -66,12 +66,8 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
|
|||
break; \
|
||||
preempt_enable(); \
|
||||
\
|
||||
if (!(lock)->break_lock) \
|
||||
(lock)->break_lock = 1; \
|
||||
while ((lock)->break_lock) \
|
||||
arch_##op##_relax(&lock->raw_lock); \
|
||||
arch_##op##_relax(&lock->raw_lock); \
|
||||
} \
|
||||
(lock)->break_lock = 0; \
|
||||
} \
|
||||
\
|
||||
unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
|
||||
|
@ -86,12 +82,9 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
|
|||
local_irq_restore(flags); \
|
||||
preempt_enable(); \
|
||||
\
|
||||
if (!(lock)->break_lock) \
|
||||
(lock)->break_lock = 1; \
|
||||
while ((lock)->break_lock) \
|
||||
arch_##op##_relax(&lock->raw_lock); \
|
||||
arch_##op##_relax(&lock->raw_lock); \
|
||||
} \
|
||||
(lock)->break_lock = 0; \
|
||||
\
|
||||
return flags; \
|
||||
} \
|
||||
\
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue