mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-24 15:42:32 +00:00
mutex: preemption fixes
The problem is that dropping the spinlock right before schedule is a voluntary preemption point and can cause a schedule, right after which we schedule again. Fix this inefficiency by keeping preemption disabled until we schedule, do this by explicity disabling preemption and providing a schedule() variant that assumes preemption is already disabled. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
93d81d1aca
commit
41719b0309
3 changed files with 12 additions and 4 deletions
|
@ -131,6 +131,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||
struct mutex_waiter waiter;
|
||||
unsigned long flags;
|
||||
|
||||
preempt_disable();
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
debug_mutex_lock_common(lock, &waiter);
|
||||
|
@ -170,13 +171,14 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
preempt_enable();
|
||||
return -EINTR;
|
||||
}
|
||||
__set_task_state(task, state);
|
||||
|
||||
/* didnt get the lock, go to sleep: */
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
schedule();
|
||||
__schedule();
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -193,6 +195,7 @@ done:
|
|||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue