mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-29 01:51:39 +00:00
locking/rtmutex: Provide the spin/rwlock core lock function
A simplified version of the rtmutex slowlock function, which neither handles signals nor timeouts, and is careful about preserving the state of the blocked task across the lock operation. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211303.770228446@linutronix.de
This commit is contained in:
parent
342a93247e
commit
1c143c4b65
2 changed files with 61 additions and 1 deletions
|
@ -1416,3 +1416,63 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
|
||||||
return rt_mutex_slowlock(lock, state);
|
return rt_mutex_slowlock(lock, state);
|
||||||
}
|
}
|
||||||
#endif /* RT_MUTEX_BUILD_MUTEX */
|
#endif /* RT_MUTEX_BUILD_MUTEX */
|
||||||
|
|
||||||
|
#ifdef RT_MUTEX_BUILD_SPINLOCKS
|
||||||
|
/*
|
||||||
|
* Functions required for spin/rw_lock substitution on RT kernels
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rtlock_slowlock_locked - Slow path lock acquisition for RT locks
|
||||||
|
* @lock: The underlying RT mutex
|
||||||
|
*/
|
||||||
|
static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
|
||||||
|
{
|
||||||
|
struct rt_mutex_waiter waiter;
|
||||||
|
|
||||||
|
lockdep_assert_held(&lock->wait_lock);
|
||||||
|
|
||||||
|
if (try_to_take_rt_mutex(lock, current, NULL))
|
||||||
|
return;
|
||||||
|
|
||||||
|
rt_mutex_init_rtlock_waiter(&waiter);
|
||||||
|
|
||||||
|
/* Save current state and set state to TASK_RTLOCK_WAIT */
|
||||||
|
current_save_and_set_rtlock_wait_state();
|
||||||
|
|
||||||
|
task_blocks_on_rt_mutex(lock, &waiter, current, RT_MUTEX_MIN_CHAINWALK);
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
/* Try to acquire the lock again */
|
||||||
|
if (try_to_take_rt_mutex(lock, current, &waiter))
|
||||||
|
break;
|
||||||
|
|
||||||
|
raw_spin_unlock_irq(&lock->wait_lock);
|
||||||
|
|
||||||
|
schedule_rtlock();
|
||||||
|
|
||||||
|
raw_spin_lock_irq(&lock->wait_lock);
|
||||||
|
set_current_state(TASK_RTLOCK_WAIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Restore the task state */
|
||||||
|
current_restore_rtlock_saved_state();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* try_to_take_rt_mutex() sets the waiter bit unconditionally.
|
||||||
|
* We might have to fix that up:
|
||||||
|
*/
|
||||||
|
fixup_rt_mutex_waiters(lock);
|
||||||
|
debug_rt_mutex_free_waiter(&waiter);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
||||||
|
rtlock_slowlock_locked(lock);
|
||||||
|
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* RT_MUTEX_BUILD_SPINLOCKS */
|
||||||
|
|
|
@ -181,7 +181,7 @@ static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
|
||||||
waiter->task = NULL;
|
waiter->task = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rtlock_init_rtmutex_waiter(struct rt_mutex_waiter *waiter)
|
static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
|
||||||
{
|
{
|
||||||
rt_mutex_init_waiter(waiter);
|
rt_mutex_init_waiter(waiter);
|
||||||
waiter->wake_state = TASK_RTLOCK_WAIT;
|
waiter->wake_state = TASK_RTLOCK_WAIT;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue