mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 23:32:14 +00:00
locking: Convert raw_spinlock to arch_spinlock
The raw_spin* namespace was taken by lockdep for the architecture specific implementations. raw_spin_* would be the ideal name space for the spinlocks which are not converted to sleeping locks in preempt-rt. Linus suggested to convert the raw_ to arch_ locks and cleanup the name space instead of using an artifical name like core_spin, atomic_spin or whatever No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
This commit is contained in:
parent
6b6b4792f8
commit
445c89514b
51 changed files with 164 additions and 164 deletions
|
@ -34,7 +34,7 @@
|
|||
* becomes equal to the the initial value of the tail.
|
||||
*/
|
||||
|
||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
||||
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int counters = ACCESS_ONCE(lock->lock);
|
||||
|
||||
|
@ -45,7 +45,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
|||
#define __raw_spin_unlock_wait(x) \
|
||||
while (__raw_spin_is_locked(x)) { cpu_relax(); }
|
||||
|
||||
static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
|
||||
static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int counters = ACCESS_ONCE(lock->lock);
|
||||
|
||||
|
@ -53,7 +53,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
|
|||
}
|
||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
||||
|
||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||
static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
int my_ticket;
|
||||
int tmp;
|
||||
|
@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
|||
smp_llsc_mb();
|
||||
}
|
||||
|
||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
|
@ -174,7 +174,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
|||
}
|
||||
}
|
||||
|
||||
static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||
static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
int tmp, tmp2, tmp3;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue