mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-05 22:28:00 +00:00
mutex: add atomic_dec_and_mutex_lock(), fix
include/linux/mutex.h:136: warning: 'mutex_lock' declared inline after being called include/linux/mutex.h:136: warning: previous declaration of 'mutex_lock' was here uninline it. [ Impact: clean up and uninline, address compiler warning ] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de> Cc: Eric Paris <eparis@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <200904292318.n3TNIsi6028340@imap1.linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
23b94b967f
commit
a511e3f968
2 changed files with 25 additions and 24 deletions
|
@ -150,28 +150,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
|
||||||
*/
|
*/
|
||||||
extern int mutex_trylock(struct mutex *lock);
|
extern int mutex_trylock(struct mutex *lock);
|
||||||
extern void mutex_unlock(struct mutex *lock);
|
extern void mutex_unlock(struct mutex *lock);
|
||||||
|
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
||||||
/**
|
|
||||||
* atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
|
|
||||||
* @cnt: the atomic which we are to dec
|
|
||||||
* @lock: the mutex to return holding if we dec to 0
|
|
||||||
*
|
|
||||||
* return true and hold lock if we dec to 0, return false otherwise
|
|
||||||
*/
|
|
||||||
static inline int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
|
|
||||||
{
|
|
||||||
/* dec if we can't possibly hit 0 */
|
|
||||||
if (atomic_add_unless(cnt, -1, 1))
|
|
||||||
return 0;
|
|
||||||
/* we might hit 0, so take the lock */
|
|
||||||
mutex_lock(lock);
|
|
||||||
if (!atomic_dec_and_test(cnt)) {
|
|
||||||
/* when we actually did the dec, we didn't hit 0 */
|
|
||||||
mutex_unlock(lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
/* we hit 0, and we hold the lock */
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -471,5 +471,28 @@ int __sched mutex_trylock(struct mutex *lock)
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(mutex_trylock);
|
EXPORT_SYMBOL(mutex_trylock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
|
||||||
|
* @cnt: the atomic which we are to dec
|
||||||
|
* @lock: the mutex to return holding if we dec to 0
|
||||||
|
*
|
||||||
|
* return true and hold lock if we dec to 0, return false otherwise
|
||||||
|
*/
|
||||||
|
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
|
||||||
|
{
|
||||||
|
/* dec if we can't possibly hit 0 */
|
||||||
|
if (atomic_add_unless(cnt, -1, 1))
|
||||||
|
return 0;
|
||||||
|
/* we might hit 0, so take the lock */
|
||||||
|
mutex_lock(lock);
|
||||||
|
if (!atomic_dec_and_test(cnt)) {
|
||||||
|
/* when we actually did the dec, we didn't hit 0 */
|
||||||
|
mutex_unlock(lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
/* we hit 0, and we hold the lock */
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
|
||||||
|
|
Loading…
Add table
Reference in a new issue