mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-24 23:52:40 +00:00
locking: Implement new raw_spinlock
Now that the raw_spin name space is freed up, we can implement raw_spinlock and the related functions which are used to annotate the locks which are not converted to sleeping spinlocks in preempt-rt. A side effect is that only such locks can be used with the low level lock fsunctions which circumvent lockdep. For !rt spin_* functions are mapped to the raw_spin* implementations. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
e5931943d0
commit
c2f21ce2e3
8 changed files with 297 additions and 135 deletions
|
@ -13,8 +13,8 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
void __spin_lock_init(spinlock_t *lock, const char *name,
|
||||
struct lock_class_key *key)
|
||||
void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
/*
|
||||
|
@ -29,7 +29,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
|
|||
lock->owner_cpu = -1;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__spin_lock_init);
|
||||
EXPORT_SYMBOL(__raw_spin_lock_init);
|
||||
|
||||
void __rwlock_init(rwlock_t *lock, const char *name,
|
||||
struct lock_class_key *key)
|
||||
|
@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
|
|||
|
||||
EXPORT_SYMBOL(__rwlock_init);
|
||||
|
||||
static void spin_bug(spinlock_t *lock, const char *msg)
|
||||
static void spin_bug(raw_spinlock_t *lock, const char *msg)
|
||||
{
|
||||
struct task_struct *owner = NULL;
|
||||
|
||||
|
@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
|
|||
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
|
||||
|
||||
static inline void
|
||||
debug_spin_lock_before(spinlock_t *lock)
|
||||
debug_spin_lock_before(raw_spinlock_t *lock)
|
||||
{
|
||||
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
|
||||
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
|
||||
|
@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
|
|||
lock, "cpu recursion");
|
||||
}
|
||||
|
||||
static inline void debug_spin_lock_after(spinlock_t *lock)
|
||||
static inline void debug_spin_lock_after(raw_spinlock_t *lock)
|
||||
{
|
||||
lock->owner_cpu = raw_smp_processor_id();
|
||||
lock->owner = current;
|
||||
}
|
||||
|
||||
static inline void debug_spin_unlock(spinlock_t *lock)
|
||||
static inline void debug_spin_unlock(raw_spinlock_t *lock)
|
||||
{
|
||||
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
|
||||
SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
|
||||
SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
|
||||
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
|
||||
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
|
||||
lock, "wrong CPU");
|
||||
|
@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
|
|||
lock->owner_cpu = -1;
|
||||
}
|
||||
|
||||
static void __spin_lock_debug(spinlock_t *lock)
|
||||
static void __spin_lock_debug(raw_spinlock_t *lock)
|
||||
{
|
||||
u64 i;
|
||||
u64 loops = loops_per_jiffy * HZ;
|
||||
|
@ -125,7 +125,7 @@ static void __spin_lock_debug(spinlock_t *lock)
|
|||
}
|
||||
}
|
||||
|
||||
void _raw_spin_lock(spinlock_t *lock)
|
||||
void _raw_spin_lock(raw_spinlock_t *lock)
|
||||
{
|
||||
debug_spin_lock_before(lock);
|
||||
if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
|
||||
|
@ -133,7 +133,7 @@ void _raw_spin_lock(spinlock_t *lock)
|
|||
debug_spin_lock_after(lock);
|
||||
}
|
||||
|
||||
int _raw_spin_trylock(spinlock_t *lock)
|
||||
int _raw_spin_trylock(raw_spinlock_t *lock)
|
||||
{
|
||||
int ret = arch_spin_trylock(&lock->raw_lock);
|
||||
|
||||
|
@ -148,7 +148,7 @@ int _raw_spin_trylock(spinlock_t *lock)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void _raw_spin_unlock(spinlock_t *lock)
|
||||
void _raw_spin_unlock(raw_spinlock_t *lock)
|
||||
{
|
||||
debug_spin_unlock(lock);
|
||||
arch_spin_unlock(&lock->raw_lock);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue