mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-27 09:02:06 +00:00
locking/rtmutex: Consolidate the fast/slowpath invocation
The indirection via a function pointer (which is at least optimized into a tail call by the compiler) is making the code hard to read. Clean it up and move the futex related trylock functions down to the futex section. Move the wake_q wakeup into rt_mutex_slowunlock(). No point in handing it to the caller. The futex code uses a different function. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210326153944.247927548@linutronix.de
This commit is contained in:
parent
d7a2edb890
commit
70c80103aa
1 changed files with 60 additions and 86 deletions
|
@ -1298,14 +1298,25 @@ static int __sched rt_mutex_slowtrylock(struct rt_mutex *lock)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Performs the wakeup of the top-waiter and re-enables preemption.
|
||||||
|
*/
|
||||||
|
void __sched rt_mutex_postunlock(struct wake_q_head *wake_q)
|
||||||
|
{
|
||||||
|
wake_up_q(wake_q);
|
||||||
|
|
||||||
|
/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Slow path to release a rt-mutex.
|
* Slow path to release a rt-mutex.
|
||||||
*
|
*
|
||||||
* Return whether the current task needs to call rt_mutex_postunlock().
|
* Return whether the current task needs to call rt_mutex_postunlock().
|
||||||
*/
|
*/
|
||||||
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
static void __sched rt_mutex_slowunlock(struct rt_mutex *lock)
|
||||||
struct wake_q_head *wake_q)
|
|
||||||
{
|
{
|
||||||
|
DEFINE_WAKE_Q(wake_q);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* irqsave required to support early boot calls */
|
/* irqsave required to support early boot calls */
|
||||||
|
@ -1347,7 +1358,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
||||||
while (!rt_mutex_has_waiters(lock)) {
|
while (!rt_mutex_has_waiters(lock)) {
|
||||||
/* Drops lock->wait_lock ! */
|
/* Drops lock->wait_lock ! */
|
||||||
if (unlock_rt_mutex_safe(lock, flags) == true)
|
if (unlock_rt_mutex_safe(lock, flags) == true)
|
||||||
return false;
|
return;
|
||||||
/* Relock the rtmutex and try again */
|
/* Relock the rtmutex and try again */
|
||||||
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -1358,10 +1369,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
||||||
*
|
*
|
||||||
* Queue the next waiter for wakeup once we release the wait_lock.
|
* Queue the next waiter for wakeup once we release the wait_lock.
|
||||||
*/
|
*/
|
||||||
mark_wakeup_next_waiter(wake_q, lock);
|
mark_wakeup_next_waiter(&wake_q, lock);
|
||||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||||
|
|
||||||
return true; /* call rt_mutex_postunlock() */
|
rt_mutex_postunlock(&wake_q);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1370,60 +1381,21 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
||||||
* The atomic acquire/release ops are compiled away, when either the
|
* The atomic acquire/release ops are compiled away, when either the
|
||||||
* architecture does not support cmpxchg or when debugging is enabled.
|
* architecture does not support cmpxchg or when debugging is enabled.
|
||||||
*/
|
*/
|
||||||
static __always_inline int
|
static __always_inline int __rt_mutex_lock(struct rt_mutex *lock, long state,
|
||||||
rt_mutex_fastlock(struct rt_mutex *lock, int state,
|
unsigned int subclass)
|
||||||
int (*slowfn)(struct rt_mutex *lock, int state,
|
|
||||||
struct hrtimer_sleeper *timeout,
|
|
||||||
enum rtmutex_chainwalk chwalk))
|
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
might_sleep();
|
||||||
|
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
||||||
|
|
||||||
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
|
ret = rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
|
||||||
}
|
if (ret)
|
||||||
|
mutex_release(&lock->dep_map, _RET_IP_);
|
||||||
static __always_inline int
|
return ret;
|
||||||
rt_mutex_fasttrylock(struct rt_mutex *lock,
|
|
||||||
int (*slowfn)(struct rt_mutex *lock))
|
|
||||||
{
|
|
||||||
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
return slowfn(lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Performs the wakeup of the top-waiter and re-enables preemption.
|
|
||||||
*/
|
|
||||||
void __sched rt_mutex_postunlock(struct wake_q_head *wake_q)
|
|
||||||
{
|
|
||||||
wake_up_q(wake_q);
|
|
||||||
|
|
||||||
/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
|
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
static __always_inline void
|
|
||||||
rt_mutex_fastunlock(struct rt_mutex *lock,
|
|
||||||
bool (*slowfn)(struct rt_mutex *lock,
|
|
||||||
struct wake_q_head *wqh))
|
|
||||||
{
|
|
||||||
DEFINE_WAKE_Q(wake_q);
|
|
||||||
|
|
||||||
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (slowfn(lock, &wake_q))
|
|
||||||
rt_mutex_postunlock(&wake_q);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __always_inline void __rt_mutex_lock(struct rt_mutex *lock,
|
|
||||||
unsigned int subclass)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
|
||||||
rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
|
@ -1435,7 +1407,7 @@ static __always_inline void __rt_mutex_lock(struct rt_mutex *lock,
|
||||||
*/
|
*/
|
||||||
void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
|
void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
|
||||||
{
|
{
|
||||||
__rt_mutex_lock(lock, subclass);
|
__rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
|
EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
|
||||||
|
|
||||||
|
@ -1448,7 +1420,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
|
||||||
*/
|
*/
|
||||||
void __sched rt_mutex_lock(struct rt_mutex *lock)
|
void __sched rt_mutex_lock(struct rt_mutex *lock)
|
||||||
{
|
{
|
||||||
__rt_mutex_lock(lock, 0);
|
__rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
||||||
#endif
|
#endif
|
||||||
|
@ -1464,42 +1436,21 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
|
||||||
*/
|
*/
|
||||||
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
|
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
|
||||||
{
|
{
|
||||||
int ret;
|
return __rt_mutex_lock(lock, TASK_INTERRUPTIBLE, 0);
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
||||||
ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
|
|
||||||
if (ret)
|
|
||||||
mutex_release(&lock->dep_map, _RET_IP_);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
|
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
|
||||||
|
|
||||||
/*
|
|
||||||
* Futex variant, must not use fastpath.
|
|
||||||
*/
|
|
||||||
int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
|
|
||||||
{
|
|
||||||
return rt_mutex_slowtrylock(lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
|
|
||||||
{
|
|
||||||
return __rt_mutex_slowtrylock(lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rt_mutex_trylock - try to lock a rt_mutex
|
* rt_mutex_trylock - try to lock a rt_mutex
|
||||||
*
|
*
|
||||||
* @lock: the rt_mutex to be locked
|
* @lock: the rt_mutex to be locked
|
||||||
*
|
*
|
||||||
* This function can only be called in thread context. It's safe to
|
* This function can only be called in thread context. It's safe to call it
|
||||||
* call it from atomic regions, but not from hard interrupt or soft
|
* from atomic regions, but not from hard or soft interrupt context.
|
||||||
* interrupt context.
|
|
||||||
*
|
*
|
||||||
* Returns 1 on success and 0 on contention
|
* Returns:
|
||||||
|
* 1 on success
|
||||||
|
* 0 on contention
|
||||||
*/
|
*/
|
||||||
int __sched rt_mutex_trylock(struct rt_mutex *lock)
|
int __sched rt_mutex_trylock(struct rt_mutex *lock)
|
||||||
{
|
{
|
||||||
|
@ -1508,7 +1459,14 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
|
||||||
if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
|
if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
|
/*
|
||||||
|
* No lockdep annotation required because lockdep disables the fast
|
||||||
|
* path.
|
||||||
|
*/
|
||||||
|
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
ret = rt_mutex_slowtrylock(lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
||||||
|
|
||||||
|
@ -1524,10 +1482,26 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
|
||||||
void __sched rt_mutex_unlock(struct rt_mutex *lock)
|
void __sched rt_mutex_unlock(struct rt_mutex *lock)
|
||||||
{
|
{
|
||||||
mutex_release(&lock->dep_map, _RET_IP_);
|
mutex_release(&lock->dep_map, _RET_IP_);
|
||||||
rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
|
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
rt_mutex_slowunlock(lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
|
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Futex variants, must not use fastpath.
|
||||||
|
*/
|
||||||
|
int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
|
||||||
|
{
|
||||||
|
return rt_mutex_slowtrylock(lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
|
||||||
|
{
|
||||||
|
return __rt_mutex_slowtrylock(lock);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __rt_mutex_futex_unlock - Futex variant, that since futex variants
|
* __rt_mutex_futex_unlock - Futex variant, that since futex variants
|
||||||
* do not use the fast-path, can be simple and will not need to retry.
|
* do not use the fast-path, can be simple and will not need to retry.
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue