mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
spinlock: lockbreak cleanup
The break_lock data structure and code for spinlocks is quite nasty. Not only does it double the size of a spinlock but it changes locking to a potentially less optimal trylock. Put all of that under CONFIG_GENERIC_LOCKBREAK, and introduce a __raw_spin_is_contended that uses the lock data itself to determine whether there are waiters on the lock, to be used if CONFIG_GENERIC_LOCKBREAK is not set. Rename need_lockbreak to spin_needbreak, make it use spin_is_contended to decouple it from the spinlock implementation, and make it typesafe (rwlocks do not have any need_lockbreak sites -- why do they even get bloated up with that break_lock then?). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
a95d67f87e
commit
95c354fe9f
19 changed files with 72 additions and 37 deletions
|
@ -91,6 +91,11 @@ config GENERIC_IRQ_PROBE
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
|
config GENERIC_LOCKBREAK
|
||||||
|
bool
|
||||||
|
default y
|
||||||
|
depends on SMP && PREEMPT
|
||||||
|
|
||||||
config RWSEM_GENERIC_SPINLOCK
|
config RWSEM_GENERIC_SPINLOCK
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
|
@ -42,6 +42,11 @@ config MMU
|
||||||
config SWIOTLB
|
config SWIOTLB
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
config GENERIC_LOCKBREAK
|
||||||
|
bool
|
||||||
|
default y
|
||||||
|
depends on SMP && PREEMPT
|
||||||
|
|
||||||
config RWSEM_XCHGADD_ALGORITHM
|
config RWSEM_XCHGADD_ALGORITHM
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
|
@ -235,6 +235,11 @@ config IRAM_SIZE
|
||||||
# Define implied options from the CPU selection here
|
# Define implied options from the CPU selection here
|
||||||
#
|
#
|
||||||
|
|
||||||
|
config GENERIC_LOCKBREAK
|
||||||
|
bool
|
||||||
|
default y
|
||||||
|
depends on SMP && PREEMPT
|
||||||
|
|
||||||
config RWSEM_GENERIC_SPINLOCK
|
config RWSEM_GENERIC_SPINLOCK
|
||||||
bool
|
bool
|
||||||
depends on M32R
|
depends on M32R
|
||||||
|
|
|
@ -694,6 +694,11 @@ source "arch/mips/vr41xx/Kconfig"
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
|
config GENERIC_LOCKBREAK
|
||||||
|
bool
|
||||||
|
default y
|
||||||
|
depends on SMP && PREEMPT
|
||||||
|
|
||||||
config RWSEM_GENERIC_SPINLOCK
|
config RWSEM_GENERIC_SPINLOCK
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
|
@ -19,6 +19,11 @@ config MMU
|
||||||
config STACK_GROWSUP
|
config STACK_GROWSUP
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
config GENERIC_LOCKBREAK
|
||||||
|
bool
|
||||||
|
default y
|
||||||
|
depends on SMP && PREEMPT
|
||||||
|
|
||||||
config RWSEM_GENERIC_SPINLOCK
|
config RWSEM_GENERIC_SPINLOCK
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
|
|
@ -53,6 +53,11 @@ config RWSEM_XCHGADD_ALGORITHM
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
|
config GENERIC_LOCKBREAK
|
||||||
|
bool
|
||||||
|
default y
|
||||||
|
depends on SMP && PREEMPT
|
||||||
|
|
||||||
config ARCH_HAS_ILOG2_U32
|
config ARCH_HAS_ILOG2_U32
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
|
@ -200,6 +200,11 @@ config US2E_FREQ
|
||||||
If in doubt, say N.
|
If in doubt, say N.
|
||||||
|
|
||||||
# Global things across all Sun machines.
|
# Global things across all Sun machines.
|
||||||
|
config GENERIC_LOCKBREAK
|
||||||
|
bool
|
||||||
|
default y
|
||||||
|
depends on SMP && PREEMPT
|
||||||
|
|
||||||
config RWSEM_GENERIC_SPINLOCK
|
config RWSEM_GENERIC_SPINLOCK
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,10 @@ config X86_64
|
||||||
config X86
|
config X86
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
config GENERIC_LOCKBREAK
|
||||||
|
def_bool y
|
||||||
|
depends on SMP && PREEMPT
|
||||||
|
|
||||||
config GENERIC_TIME
|
config GENERIC_TIME
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
|
|
@ -347,7 +347,8 @@ restart:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
retry = __process_buffer(journal, jh, bhs,&batch_count);
|
retry = __process_buffer(journal, jh, bhs,&batch_count);
|
||||||
if (!retry && lock_need_resched(&journal->j_list_lock)){
|
if (!retry && (need_resched() ||
|
||||||
|
spin_needbreak(&journal->j_list_lock))) {
|
||||||
spin_unlock(&journal->j_list_lock);
|
spin_unlock(&journal->j_list_lock);
|
||||||
retry = 1;
|
retry = 1;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -265,7 +265,7 @@ write_out_data:
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lock_need_resched(&journal->j_list_lock)) {
|
if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
|
||||||
spin_unlock(&journal->j_list_lock);
|
spin_unlock(&journal->j_list_lock);
|
||||||
goto write_out_data;
|
goto write_out_data;
|
||||||
}
|
}
|
||||||
|
|
|
@ -353,7 +353,8 @@ restart:
|
||||||
}
|
}
|
||||||
retry = __process_buffer(journal, jh, bhs, &batch_count,
|
retry = __process_buffer(journal, jh, bhs, &batch_count,
|
||||||
transaction);
|
transaction);
|
||||||
if (!retry && lock_need_resched(&journal->j_list_lock)){
|
if (!retry && (need_resched() ||
|
||||||
|
spin_needbreak(&journal->j_list_lock))) {
|
||||||
spin_unlock(&journal->j_list_lock);
|
spin_unlock(&journal->j_list_lock);
|
||||||
retry = 1;
|
retry = 1;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -341,7 +341,7 @@ write_out_data:
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lock_need_resched(&journal->j_list_lock)) {
|
if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
|
||||||
spin_unlock(&journal->j_list_lock);
|
spin_unlock(&journal->j_list_lock);
|
||||||
goto write_out_data;
|
goto write_out_data;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1922,23 +1922,16 @@ extern int cond_resched_softirq(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Does a critical section need to be broken due to another
|
* Does a critical section need to be broken due to another
|
||||||
* task waiting?:
|
* task waiting?: (technically does not depend on CONFIG_PREEMPT,
|
||||||
|
* but a general need for low latency)
|
||||||
*/
|
*/
|
||||||
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
|
static inline int spin_needbreak(spinlock_t *lock)
|
||||||
# define need_lockbreak(lock) ((lock)->break_lock)
|
|
||||||
#else
|
|
||||||
# define need_lockbreak(lock) 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Does a critical section need to be broken due to another
|
|
||||||
* task waiting or preemption being signalled:
|
|
||||||
*/
|
|
||||||
static inline int lock_need_resched(spinlock_t *lock)
|
|
||||||
{
|
{
|
||||||
if (need_lockbreak(lock) || need_resched())
|
#ifdef CONFIG_PREEMPT
|
||||||
return 1;
|
return spin_is_contended(lock);
|
||||||
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -120,6 +120,12 @@ do { \
|
||||||
|
|
||||||
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
|
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
|
||||||
|
|
||||||
|
#ifdef CONFIG_GENERIC_LOCKBREAK
|
||||||
|
#define spin_is_contended(lock) ((lock)->break_lock)
|
||||||
|
#else
|
||||||
|
#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* spin_unlock_wait - wait until the spinlock gets unlocked
|
* spin_unlock_wait - wait until the spinlock gets unlocked
|
||||||
* @lock: the spinlock in question.
|
* @lock: the spinlock in question.
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
raw_spinlock_t raw_lock;
|
raw_spinlock_t raw_lock;
|
||||||
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
|
#ifdef CONFIG_GENERIC_LOCKBREAK
|
||||||
unsigned int break_lock;
|
unsigned int break_lock;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
|
@ -35,7 +35,7 @@ typedef struct {
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
raw_rwlock_t raw_lock;
|
raw_rwlock_t raw_lock;
|
||||||
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
|
#ifdef CONFIG_GENERIC_LOCKBREAK
|
||||||
unsigned int break_lock;
|
unsigned int break_lock;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
|
|
|
@ -64,6 +64,8 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
|
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
|
||||||
#endif /* DEBUG_SPINLOCK */
|
#endif /* DEBUG_SPINLOCK */
|
||||||
|
|
||||||
|
#define __raw_spin_is_contended(lock) (((void)(lock), 0))
|
||||||
|
|
||||||
#define __raw_read_can_lock(lock) (((void)(lock), 1))
|
#define __raw_read_can_lock(lock) (((void)(lock), 1))
|
||||||
#define __raw_write_can_lock(lock) (((void)(lock), 1))
|
#define __raw_write_can_lock(lock) (((void)(lock), 1))
|
||||||
|
|
||||||
|
|
|
@ -4945,19 +4945,15 @@ EXPORT_SYMBOL(_cond_resched);
|
||||||
*/
|
*/
|
||||||
int cond_resched_lock(spinlock_t *lock)
|
int cond_resched_lock(spinlock_t *lock)
|
||||||
{
|
{
|
||||||
|
int resched = need_resched() && system_state == SYSTEM_RUNNING;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (need_lockbreak(lock)) {
|
if (spin_needbreak(lock) || resched) {
|
||||||
spin_unlock(lock);
|
spin_unlock(lock);
|
||||||
cpu_relax();
|
if (resched && need_resched())
|
||||||
ret = 1;
|
__cond_resched();
|
||||||
spin_lock(lock);
|
else
|
||||||
}
|
cpu_relax();
|
||||||
if (need_resched() && system_state == SYSTEM_RUNNING) {
|
|
||||||
spin_release(&lock->dep_map, 1, _THIS_IP_);
|
|
||||||
_raw_spin_unlock(lock);
|
|
||||||
preempt_enable_no_resched();
|
|
||||||
__cond_resched();
|
|
||||||
ret = 1;
|
ret = 1;
|
||||||
spin_lock(lock);
|
spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,8 +65,7 @@ EXPORT_SYMBOL(_write_trylock);
|
||||||
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
|
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
|
||||||
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
|
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
|
||||||
*/
|
*/
|
||||||
#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
|
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
|
||||||
defined(CONFIG_DEBUG_LOCK_ALLOC)
|
|
||||||
|
|
||||||
void __lockfunc _read_lock(rwlock_t *lock)
|
void __lockfunc _read_lock(rwlock_t *lock)
|
||||||
{
|
{
|
||||||
|
|
|
@ -513,8 +513,7 @@ again:
|
||||||
if (progress >= 32) {
|
if (progress >= 32) {
|
||||||
progress = 0;
|
progress = 0;
|
||||||
if (need_resched() ||
|
if (need_resched() ||
|
||||||
need_lockbreak(src_ptl) ||
|
spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
|
||||||
need_lockbreak(dst_ptl))
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (pte_none(*src_pte)) {
|
if (pte_none(*src_pte)) {
|
||||||
|
@ -853,7 +852,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
|
||||||
tlb_finish_mmu(*tlbp, tlb_start, start);
|
tlb_finish_mmu(*tlbp, tlb_start, start);
|
||||||
|
|
||||||
if (need_resched() ||
|
if (need_resched() ||
|
||||||
(i_mmap_lock && need_lockbreak(i_mmap_lock))) {
|
(i_mmap_lock && spin_needbreak(i_mmap_lock))) {
|
||||||
if (i_mmap_lock) {
|
if (i_mmap_lock) {
|
||||||
*tlbp = NULL;
|
*tlbp = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1768,8 +1767,7 @@ again:
|
||||||
|
|
||||||
restart_addr = zap_page_range(vma, start_addr,
|
restart_addr = zap_page_range(vma, start_addr,
|
||||||
end_addr - start_addr, details);
|
end_addr - start_addr, details);
|
||||||
need_break = need_resched() ||
|
need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
|
||||||
need_lockbreak(details->i_mmap_lock);
|
|
||||||
|
|
||||||
if (restart_addr >= end_addr) {
|
if (restart_addr >= end_addr) {
|
||||||
/* We have now completed this vma: mark it so */
|
/* We have now completed this vma: mark it so */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue