mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-26 00:21:17 +00:00
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core/locking changes from Ingo Molnar: "Main changes: - another mutex optimization, from Davidlohr Bueso - improved lglock lockdep tracking, from Michel Lespinasse - [ assorted smaller updates, improvements, cleanups. ]" * 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: generic-ipi/locking: Fix misleading smp_call_function_any() description hung_task debugging: Print more info when reporting the problem mutex: Avoid label warning when !CONFIG_MUTEX_SPIN_ON_OWNER mutex: Do not unnecessarily deal with waiters mutex: Fix/document access-once assumption in mutex_can_spin_on_owner() lglock: Update lockdep annotations to report recursive local locks lockdep: Introduce lock_acquire_exclusive()/shared() helper macros
This commit is contained in:
commit
4689550bb2
5 changed files with 59 additions and 105 deletions
|
@ -365,7 +365,7 @@ extern void lockdep_trace_alloc(gfp_t mask);
|
||||||
|
|
||||||
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
|
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
|
||||||
|
|
||||||
#else /* !LOCKDEP */
|
#else /* !CONFIG_LOCKDEP */
|
||||||
|
|
||||||
static inline void lockdep_off(void)
|
static inline void lockdep_off(void)
|
||||||
{
|
{
|
||||||
|
@ -479,82 +479,36 @@ static inline void print_irqtrace_events(struct task_struct *curr)
|
||||||
* on the per lock-class debug mode:
|
* on the per lock-class debug mode:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_PROVE_LOCKING
|
||||||
# ifdef CONFIG_PROVE_LOCKING
|
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
|
||||||
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
|
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 2, n, i)
|
||||||
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
|
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 2, n, i)
|
||||||
# else
|
|
||||||
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
|
||||||
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
|
||||||
# endif
|
|
||||||
# define spin_release(l, n, i) lock_release(l, n, i)
|
|
||||||
#else
|
#else
|
||||||
# define spin_acquire(l, s, t, i) do { } while (0)
|
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
|
||||||
# define spin_release(l, n, i) do { } while (0)
|
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
|
||||||
|
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||||
# ifdef CONFIG_PROVE_LOCKING
|
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||||
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
|
#define spin_release(l, n, i) lock_release(l, n, i)
|
||||||
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
|
|
||||||
# else
|
|
||||||
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
|
||||||
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
|
|
||||||
# endif
|
|
||||||
# define rwlock_release(l, n, i) lock_release(l, n, i)
|
|
||||||
#else
|
|
||||||
# define rwlock_acquire(l, s, t, i) do { } while (0)
|
|
||||||
# define rwlock_acquire_read(l, s, t, i) do { } while (0)
|
|
||||||
# define rwlock_release(l, n, i) do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||||
# ifdef CONFIG_PROVE_LOCKING
|
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
|
||||||
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
|
#define rwlock_release(l, n, i) lock_release(l, n, i)
|
||||||
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
|
|
||||||
# else
|
|
||||||
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
|
||||||
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
|
|
||||||
# endif
|
|
||||||
# define mutex_release(l, n, i) lock_release(l, n, i)
|
|
||||||
#else
|
|
||||||
# define mutex_acquire(l, s, t, i) do { } while (0)
|
|
||||||
# define mutex_acquire_nest(l, s, t, n, i) do { } while (0)
|
|
||||||
# define mutex_release(l, n, i) do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||||
# ifdef CONFIG_PROVE_LOCKING
|
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||||
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
|
#define mutex_release(l, n, i) lock_release(l, n, i)
|
||||||
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
|
|
||||||
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
|
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||||
# else
|
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||||
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
|
||||||
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
|
|
||||||
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
|
|
||||||
# endif
|
|
||||||
# define rwsem_release(l, n, i) lock_release(l, n, i)
|
# define rwsem_release(l, n, i) lock_release(l, n, i)
|
||||||
#else
|
|
||||||
# define rwsem_acquire(l, s, t, i) do { } while (0)
|
|
||||||
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0)
|
|
||||||
# define rwsem_acquire_read(l, s, t, i) do { } while (0)
|
|
||||||
# define rwsem_release(l, n, i) do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
|
||||||
# ifdef CONFIG_PROVE_LOCKING
|
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
|
||||||
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
|
|
||||||
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
|
|
||||||
# else
|
|
||||||
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
|
|
||||||
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
|
|
||||||
# endif
|
|
||||||
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
|
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
|
||||||
#else
|
|
||||||
# define lock_map_acquire(l) do { } while (0)
|
|
||||||
# define lock_map_acquire_read(l) do { } while (0)
|
|
||||||
# define lock_map_release(l) do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_PROVE_LOCKING
|
#ifdef CONFIG_PROVE_LOCKING
|
||||||
# define might_lock(lock) \
|
# define might_lock(lock) \
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <linux/lockdep.h>
|
#include <linux/lockdep.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/sysctl.h>
|
#include <linux/sysctl.h>
|
||||||
|
#include <linux/utsname.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The number of tasks checked:
|
* The number of tasks checked:
|
||||||
|
@ -99,9 +100,13 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
|
||||||
* Ok, the task did not get scheduled for more than 2 minutes,
|
* Ok, the task did not get scheduled for more than 2 minutes,
|
||||||
* complain:
|
* complain:
|
||||||
*/
|
*/
|
||||||
printk(KERN_ERR "INFO: task %s:%d blocked for more than "
|
pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
|
||||||
"%ld seconds.\n", t->comm, t->pid, timeout);
|
t->comm, t->pid, timeout);
|
||||||
printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
|
pr_err(" %s %s %.*s\n",
|
||||||
|
print_tainted(), init_utsname()->release,
|
||||||
|
(int)strcspn(init_utsname()->version, " "),
|
||||||
|
init_utsname()->version);
|
||||||
|
pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
|
||||||
" disables this message.\n");
|
" disables this message.\n");
|
||||||
sched_show_task(t);
|
sched_show_task(t);
|
||||||
debug_show_held_locks(t);
|
debug_show_held_locks(t);
|
||||||
|
|
|
@ -21,7 +21,7 @@ void lg_local_lock(struct lglock *lg)
|
||||||
arch_spinlock_t *lock;
|
arch_spinlock_t *lock;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
|
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||||
lock = this_cpu_ptr(lg->lock);
|
lock = this_cpu_ptr(lg->lock);
|
||||||
arch_spin_lock(lock);
|
arch_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,7 @@ void lg_local_unlock(struct lglock *lg)
|
||||||
{
|
{
|
||||||
arch_spinlock_t *lock;
|
arch_spinlock_t *lock;
|
||||||
|
|
||||||
rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||||
lock = this_cpu_ptr(lg->lock);
|
lock = this_cpu_ptr(lg->lock);
|
||||||
arch_spin_unlock(lock);
|
arch_spin_unlock(lock);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
@ -43,7 +43,7 @@ void lg_local_lock_cpu(struct lglock *lg, int cpu)
|
||||||
arch_spinlock_t *lock;
|
arch_spinlock_t *lock;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
|
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||||
lock = per_cpu_ptr(lg->lock, cpu);
|
lock = per_cpu_ptr(lg->lock, cpu);
|
||||||
arch_spin_lock(lock);
|
arch_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
|
||||||
{
|
{
|
||||||
arch_spinlock_t *lock;
|
arch_spinlock_t *lock;
|
||||||
|
|
||||||
rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||||
lock = per_cpu_ptr(lg->lock, cpu);
|
lock = per_cpu_ptr(lg->lock, cpu);
|
||||||
arch_spin_unlock(lock);
|
arch_spin_unlock(lock);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
@ -65,7 +65,7 @@ void lg_global_lock(struct lglock *lg)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
|
lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
arch_spinlock_t *lock;
|
arch_spinlock_t *lock;
|
||||||
lock = per_cpu_ptr(lg->lock, i);
|
lock = per_cpu_ptr(lg->lock, i);
|
||||||
|
@ -78,7 +78,7 @@ void lg_global_unlock(struct lglock *lg)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
arch_spinlock_t *lock;
|
arch_spinlock_t *lock;
|
||||||
lock = per_cpu_ptr(lg->lock, i);
|
lock = per_cpu_ptr(lg->lock, i);
|
||||||
|
|
|
@ -209,11 +209,13 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
||||||
*/
|
*/
|
||||||
static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
||||||
{
|
{
|
||||||
|
struct task_struct *owner;
|
||||||
int retval = 1;
|
int retval = 1;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (lock->owner)
|
owner = ACCESS_ONCE(lock->owner);
|
||||||
retval = lock->owner->on_cpu;
|
if (owner)
|
||||||
|
retval = owner->on_cpu;
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
/*
|
/*
|
||||||
* if lock->owner is not set, the mutex owner may have just acquired
|
* if lock->owner is not set, the mutex owner may have just acquired
|
||||||
|
@ -461,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
* performed the optimistic spinning cannot be done.
|
* performed the optimistic spinning cannot be done.
|
||||||
*/
|
*/
|
||||||
if (ACCESS_ONCE(ww->ctx))
|
if (ACCESS_ONCE(ww->ctx))
|
||||||
break;
|
goto slowpath;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -472,7 +474,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
owner = ACCESS_ONCE(lock->owner);
|
owner = ACCESS_ONCE(lock->owner);
|
||||||
if (owner && !mutex_spin_on_owner(lock, owner)) {
|
if (owner && !mutex_spin_on_owner(lock, owner)) {
|
||||||
mspin_unlock(MLOCK(lock), &node);
|
mspin_unlock(MLOCK(lock), &node);
|
||||||
break;
|
goto slowpath;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((atomic_read(&lock->count) == 1) &&
|
if ((atomic_read(&lock->count) == 1) &&
|
||||||
|
@ -499,7 +501,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
* the owner complete.
|
* the owner complete.
|
||||||
*/
|
*/
|
||||||
if (!owner && (need_resched() || rt_task(task)))
|
if (!owner && (need_resched() || rt_task(task)))
|
||||||
break;
|
goto slowpath;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The cpu_relax() call is a compiler barrier which forces
|
* The cpu_relax() call is a compiler barrier which forces
|
||||||
|
@ -513,6 +515,10 @@ slowpath:
|
||||||
#endif
|
#endif
|
||||||
spin_lock_mutex(&lock->wait_lock, flags);
|
spin_lock_mutex(&lock->wait_lock, flags);
|
||||||
|
|
||||||
|
/* once more, can we acquire the lock? */
|
||||||
|
if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
|
||||||
|
goto skip_wait;
|
||||||
|
|
||||||
debug_mutex_lock_common(lock, &waiter);
|
debug_mutex_lock_common(lock, &waiter);
|
||||||
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
|
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
|
||||||
|
|
||||||
|
@ -520,9 +526,6 @@ slowpath:
|
||||||
list_add_tail(&waiter.list, &lock->wait_list);
|
list_add_tail(&waiter.list, &lock->wait_list);
|
||||||
waiter.task = task;
|
waiter.task = task;
|
||||||
|
|
||||||
if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
|
|
||||||
goto done;
|
|
||||||
|
|
||||||
lock_contended(&lock->dep_map, ip);
|
lock_contended(&lock->dep_map, ip);
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
@ -561,24 +564,25 @@ slowpath:
|
||||||
schedule_preempt_disabled();
|
schedule_preempt_disabled();
|
||||||
spin_lock_mutex(&lock->wait_lock, flags);
|
spin_lock_mutex(&lock->wait_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
done:
|
|
||||||
lock_acquired(&lock->dep_map, ip);
|
|
||||||
/* got the lock - rejoice! */
|
|
||||||
mutex_remove_waiter(lock, &waiter, current_thread_info());
|
mutex_remove_waiter(lock, &waiter, current_thread_info());
|
||||||
|
/* set it to 0 if there are no waiters left: */
|
||||||
|
if (likely(list_empty(&lock->wait_list)))
|
||||||
|
atomic_set(&lock->count, 0);
|
||||||
|
debug_mutex_free_waiter(&waiter);
|
||||||
|
|
||||||
|
skip_wait:
|
||||||
|
/* got the lock - cleanup and rejoice! */
|
||||||
|
lock_acquired(&lock->dep_map, ip);
|
||||||
mutex_set_owner(lock);
|
mutex_set_owner(lock);
|
||||||
|
|
||||||
if (!__builtin_constant_p(ww_ctx == NULL)) {
|
if (!__builtin_constant_p(ww_ctx == NULL)) {
|
||||||
struct ww_mutex *ww = container_of(lock,
|
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
|
||||||
struct ww_mutex,
|
|
||||||
base);
|
|
||||||
struct mutex_waiter *cur;
|
struct mutex_waiter *cur;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This branch gets optimized out for the common case,
|
* This branch gets optimized out for the common case,
|
||||||
* and is only important for ww_mutex_lock.
|
* and is only important for ww_mutex_lock.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
ww_mutex_lock_acquired(ww, ww_ctx);
|
ww_mutex_lock_acquired(ww, ww_ctx);
|
||||||
ww->ctx = ww_ctx;
|
ww->ctx = ww_ctx;
|
||||||
|
|
||||||
|
@ -592,15 +596,8 @@ done:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set it to 0 if there are no waiters left: */
|
|
||||||
if (likely(list_empty(&lock->wait_list)))
|
|
||||||
atomic_set(&lock->count, 0);
|
|
||||||
|
|
||||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||||
|
|
||||||
debug_mutex_free_waiter(&waiter);
|
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
|
|
|
@ -278,8 +278,6 @@ EXPORT_SYMBOL(smp_call_function_single);
|
||||||
* @wait: If true, wait until function has completed.
|
* @wait: If true, wait until function has completed.
|
||||||
*
|
*
|
||||||
* Returns 0 on success, else a negative status code (if no cpus were online).
|
* Returns 0 on success, else a negative status code (if no cpus were online).
|
||||||
* Note that @wait will be implicitly turned on in case of allocation failures,
|
|
||||||
* since we fall back to on-stack allocation.
|
|
||||||
*
|
*
|
||||||
* Selection preference:
|
* Selection preference:
|
||||||
* 1) current cpu if in @mask
|
* 1) current cpu if in @mask
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue