mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-20 05:31:15 +00:00
locking: Move mcs_spinlock.h into kernel/locking/
The mcs_spinlock code is not meant (or suitable) as a generic locking primitive, therefore take it away from the normal includes and place it in kernel/locking/. This way the locking primitives implemented there can use it as part of their implementation but we do not risk it getting used inapropriately. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-byirmpamgr7h25m5kyavwpzx@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
e571c58f31
commit
c9122da1e2
2 changed files with 1 additions and 1 deletions
114
kernel/locking/mcs_spinlock.h
Normal file
114
kernel/locking/mcs_spinlock.h
Normal file
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* MCS lock defines
|
||||
*
|
||||
* This file contains the main data structure and API definitions of MCS lock.
|
||||
*
|
||||
* The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
|
||||
* with the desirable properties of being fair, and with each cpu trying
|
||||
* to acquire the lock spinning on a local variable.
|
||||
* It avoids expensive cache bouncings that common test-and-set spin-lock
|
||||
* implementations incur.
|
||||
*/
|
||||
#ifndef __LINUX_MCS_SPINLOCK_H
|
||||
#define __LINUX_MCS_SPINLOCK_H
|
||||
|
||||
#include <asm/mcs_spinlock.h>
|
||||
|
||||
struct mcs_spinlock {
|
||||
struct mcs_spinlock *next;
|
||||
int locked; /* 1 if lock acquired */
|
||||
};
|
||||
|
||||
#ifndef arch_mcs_spin_lock_contended
|
||||
/*
|
||||
* Using smp_load_acquire() provides a memory barrier that ensures
|
||||
* subsequent operations happen after the lock is acquired.
|
||||
*/
|
||||
#define arch_mcs_spin_lock_contended(l) \
|
||||
do { \
|
||||
while (!(smp_load_acquire(l))) \
|
||||
arch_mutex_cpu_relax(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef arch_mcs_spin_unlock_contended
|
||||
/*
|
||||
* smp_store_release() provides a memory barrier to ensure all
|
||||
* operations in the critical section has been completed before
|
||||
* unlocking.
|
||||
*/
|
||||
#define arch_mcs_spin_unlock_contended(l) \
|
||||
smp_store_release((l), 1)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Note: the smp_load_acquire/smp_store_release pair is not
|
||||
* sufficient to form a full memory barrier across
|
||||
* cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
|
||||
* For applications that need a full barrier across multiple cpus
|
||||
* with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
|
||||
* used after mcs_lock.
|
||||
*/
|
||||
|
||||
/*
|
||||
* In order to acquire the lock, the caller should declare a local node and
|
||||
* pass a reference of the node to this function in addition to the lock.
|
||||
* If the lock has already been acquired, then this will proceed to spin
|
||||
* on this node->locked until the previous lock holder sets the node->locked
|
||||
* in mcs_spin_unlock().
|
||||
*
|
||||
* We don't inline mcs_spin_lock() so that perf can correctly account for the
|
||||
* time spent in this lock function.
|
||||
*/
|
||||
static inline
|
||||
void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
|
||||
{
|
||||
struct mcs_spinlock *prev;
|
||||
|
||||
/* Init node */
|
||||
node->locked = 0;
|
||||
node->next = NULL;
|
||||
|
||||
prev = xchg(lock, node);
|
||||
if (likely(prev == NULL)) {
|
||||
/*
|
||||
* Lock acquired, don't need to set node->locked to 1. Threads
|
||||
* only spin on its own node->locked value for lock acquisition.
|
||||
* However, since this thread can immediately acquire the lock
|
||||
* and does not proceed to spin on its own node->locked, this
|
||||
* value won't be used. If a debug mode is needed to
|
||||
* audit lock status, then set node->locked value here.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
ACCESS_ONCE(prev->next) = node;
|
||||
|
||||
/* Wait until the lock holder passes the lock down. */
|
||||
arch_mcs_spin_lock_contended(&node->locked);
|
||||
}
|
||||
|
||||
/*
|
||||
* Releases the lock. The caller should pass in the corresponding node that
|
||||
* was used to acquire the lock.
|
||||
*/
|
||||
static inline
|
||||
void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
|
||||
{
|
||||
struct mcs_spinlock *next = ACCESS_ONCE(node->next);
|
||||
|
||||
if (likely(!next)) {
|
||||
/*
|
||||
* Release the lock by setting it to NULL
|
||||
*/
|
||||
if (likely(cmpxchg(lock, node, NULL) == node))
|
||||
return;
|
||||
/* Wait until the next pointer is set */
|
||||
while (!(next = ACCESS_ONCE(node->next)))
|
||||
arch_mutex_cpu_relax();
|
||||
}
|
||||
|
||||
/* Pass lock to next waiter. */
|
||||
arch_mcs_spin_unlock_contended(&next->locked);
|
||||
}
|
||||
|
||||
#endif /* __LINUX_MCS_SPINLOCK_H */
|
Loading…
Add table
Add a link
Reference in a new issue