mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-28 01:21:58 +00:00
locking/rtmutex: Remove rt_mutex_timed_lock()
rt_mutex_timed_lock() has no callers since:
c051b21f71
("rtmutex: Confine deadlock logic to futex")
Remove it.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210326153943.061103415@linutronix.de
This commit is contained in:
parent
feecb81732
commit
c15380b72d
2 changed files with 0 additions and 49 deletions
|
@ -115,9 +115,6 @@ extern void rt_mutex_lock(struct rt_mutex *lock);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
|
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
|
||||||
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
|
|
||||||
struct hrtimer_sleeper *timeout);
|
|
||||||
|
|
||||||
extern int rt_mutex_trylock(struct rt_mutex *lock);
|
extern int rt_mutex_trylock(struct rt_mutex *lock);
|
||||||
|
|
||||||
extern void rt_mutex_unlock(struct rt_mutex *lock);
|
extern void rt_mutex_unlock(struct rt_mutex *lock);
|
||||||
|
|
|
@ -1394,21 +1394,6 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
|
||||||
return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
|
return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
|
||||||
rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
|
|
||||||
struct hrtimer_sleeper *timeout,
|
|
||||||
enum rtmutex_chainwalk chwalk,
|
|
||||||
int (*slowfn)(struct rt_mutex *lock, int state,
|
|
||||||
struct hrtimer_sleeper *timeout,
|
|
||||||
enum rtmutex_chainwalk chwalk))
|
|
||||||
{
|
|
||||||
if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
|
|
||||||
likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return slowfn(lock, state, timeout, chwalk);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
rt_mutex_fasttrylock(struct rt_mutex *lock,
|
rt_mutex_fasttrylock(struct rt_mutex *lock,
|
||||||
int (*slowfn)(struct rt_mutex *lock))
|
int (*slowfn)(struct rt_mutex *lock))
|
||||||
|
@ -1516,37 +1501,6 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
|
||||||
return __rt_mutex_slowtrylock(lock);
|
return __rt_mutex_slowtrylock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* rt_mutex_timed_lock - lock a rt_mutex interruptible
|
|
||||||
* the timeout structure is provided
|
|
||||||
* by the caller
|
|
||||||
*
|
|
||||||
* @lock: the rt_mutex to be locked
|
|
||||||
* @timeout: timeout structure or NULL (no timeout)
|
|
||||||
*
|
|
||||||
* Returns:
|
|
||||||
* 0 on success
|
|
||||||
* -EINTR when interrupted by a signal
|
|
||||||
* -ETIMEDOUT when the timeout expired
|
|
||||||
*/
|
|
||||||
int
|
|
||||||
rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
||||||
ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
|
|
||||||
RT_MUTEX_MIN_CHAINWALK,
|
|
||||||
rt_mutex_slowlock);
|
|
||||||
if (ret)
|
|
||||||
mutex_release(&lock->dep_map, _RET_IP_);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rt_mutex_trylock - try to lock a rt_mutex
|
* rt_mutex_trylock - try to lock a rt_mutex
|
||||||
*
|
*
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue