mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 06:32:08 +00:00
Merge branch 'linus' into locking/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
0614621d89
768 changed files with 7468 additions and 3957 deletions
|
@ -2217,11 +2217,11 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
|
|||
* decrement the counter at queue_unlock() when some error has
|
||||
* occurred and we don't end up adding the task to the list.
|
||||
*/
|
||||
hb_waiters_inc(hb);
|
||||
hb_waiters_inc(hb); /* implies smp_mb(); (A) */
|
||||
|
||||
q->lock_ptr = &hb->lock;
|
||||
|
||||
spin_lock(&hb->lock); /* implies smp_mb(); (A) */
|
||||
spin_lock(&hb->lock);
|
||||
return hb;
|
||||
}
|
||||
|
||||
|
@ -2857,35 +2857,39 @@ retry_private:
|
|||
* and BUG when futex_unlock_pi() interleaves with this.
|
||||
*
|
||||
* Therefore acquire wait_lock while holding hb->lock, but drop the
|
||||
* latter before calling rt_mutex_start_proxy_lock(). This still fully
|
||||
* serializes against futex_unlock_pi() as that does the exact same
|
||||
* lock handoff sequence.
|
||||
* latter before calling __rt_mutex_start_proxy_lock(). This
|
||||
* interleaves with futex_unlock_pi() -- which does a similar lock
|
||||
* handoff -- such that the latter can observe the futex_q::pi_state
|
||||
* before __rt_mutex_start_proxy_lock() is done.
|
||||
*/
|
||||
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
|
||||
spin_unlock(q.lock_ptr);
|
||||
/*
|
||||
* __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
|
||||
* such that futex_unlock_pi() is guaranteed to observe the waiter when
|
||||
* it sees the futex_q::pi_state.
|
||||
*/
|
||||
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
|
||||
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
|
||||
|
||||
if (ret) {
|
||||
if (ret == 1)
|
||||
ret = 0;
|
||||
|
||||
spin_lock(q.lock_ptr);
|
||||
goto no_block;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
|
||||
if (unlikely(to))
|
||||
hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
|
||||
|
||||
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
|
||||
|
||||
cleanup:
|
||||
spin_lock(q.lock_ptr);
|
||||
/*
|
||||
* If we failed to acquire the lock (signal/timeout), we must
|
||||
* If we failed to acquire the lock (deadlock/signal/timeout), we must
|
||||
* first acquire the hb->lock before removing the lock from the
|
||||
* rt_mutex waitqueue, such that we can keep the hb and rt_mutex
|
||||
* wait lists consistent.
|
||||
* rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
|
||||
* lists consistent.
|
||||
*
|
||||
* In particular; it is important that futex_unlock_pi() can not
|
||||
* observe this inconsistency.
|
||||
|
@ -3009,6 +3013,10 @@ retry:
|
|||
* there is no point where we hold neither; and therefore
|
||||
* wake_futex_pi() must observe a state consistent with what we
|
||||
* observed.
|
||||
*
|
||||
* In particular; this forces __rt_mutex_start_proxy() to
|
||||
* complete such that we're guaranteed to observe the
|
||||
* rt_waiter. Also see the WARN in wake_futex_pi().
|
||||
*/
|
||||
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
||||
spin_unlock(&hb->lock);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue