mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-01 12:04:08 +00:00
locking/rwbase: Optimize rwbase_read_trylock
Instead of a full barrier around the Rmw insn, micro-optimize for weakly ordered archs such that we only provide the required ACQUIRE semantics when taking the read lock. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Link: https://lkml.kernel.org/r/20210920052031.54220-2-dave@stgolabs.net
This commit is contained in:
parent
3f48565beb
commit
c78416d122
1 changed files with 2 additions and 3 deletions
|
@ -59,8 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
|
||||||
* set.
|
* set.
|
||||||
*/
|
*/
|
||||||
for (r = atomic_read(&rwb->readers); r < 0;) {
|
for (r = atomic_read(&rwb->readers); r < 0;) {
|
||||||
/* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
|
if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1)))
|
||||||
if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -187,7 +186,7 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* _release() is needed in case that reader is in fast path, pairing
|
* _release() is needed in case that reader is in fast path, pairing
|
||||||
* with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
|
* with atomic_try_cmpxchg_acquire() in rwbase_read_trylock().
|
||||||
*/
|
*/
|
||||||
(void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
|
(void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
|
||||||
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
|
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
|
||||||
|
|
Loading…
Add table
Reference in a new issue