mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 23:32:14 +00:00
locking/barriers: Introduce smp_cond_load_relaxed() and atomic_cond_read_relaxed()
Whilst we currently provide smp_cond_load_acquire() and atomic_cond_read_acquire(), there are cases where the ACQUIRE semantics are not required because of a subsequent fence or release operation once the conditional loop has exited. This patch adds relaxed versions of the conditional spinning primitives to avoid unnecessary barrier overhead on architectures such as arm64. Signed-off-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-2-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
0644f186fc
commit
fcfdfe30e3
3 changed files with 25 additions and 6 deletions
|
@ -654,6 +654,7 @@ static inline int atomic_dec_if_positive(atomic_t *v)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
|
||||
#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
|
||||
|
||||
#ifdef CONFIG_GENERIC_ATOMIC64
|
||||
|
@ -1075,6 +1076,7 @@ static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v
|
|||
}
|
||||
#endif
|
||||
|
||||
#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
|
||||
#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
|
||||
|
||||
#include <asm-generic/atomic-long.h>
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue