mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-21 14:11:20 +00:00
locking: Remove ACCESS_ONCE() usage
With the new standardized functions, we can replace all ACCESS_ONCE() calls across relevant locking - this includes lockref and seqlock while at it. ACCESS_ONCE() does not work reliably on non-scalar types. For example gcc 4.6 and 4.7 might remove the volatile tag for such accesses during the SRA (scalar replacement of aggregates) step: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 Update the new calls regardless of if it is a scalar type, this is cleaner than having three alternatives. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1424662301.6539.18.camel@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
2ae7902681
commit
4d3199e4ca
6 changed files with 23 additions and 23 deletions
|
@ -279,7 +279,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
|
|||
*/
|
||||
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
|
||||
{
|
||||
long old, count = ACCESS_ONCE(sem->count);
|
||||
long old, count = READ_ONCE(sem->count);
|
||||
|
||||
while (true) {
|
||||
if (!(count == 0 || count == RWSEM_WAITING_BIAS))
|
||||
|
@ -304,9 +304,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
|||
return false;
|
||||
|
||||
rcu_read_lock();
|
||||
owner = ACCESS_ONCE(sem->owner);
|
||||
owner = READ_ONCE(sem->owner);
|
||||
if (!owner) {
|
||||
long count = ACCESS_ONCE(sem->count);
|
||||
long count = READ_ONCE(sem->count);
|
||||
/*
|
||||
* If sem->owner is not set, yet we have just recently entered the
|
||||
* slowpath with the lock being active, then there is a possibility
|
||||
|
@ -385,7 +385,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
|
|||
goto done;
|
||||
|
||||
while (true) {
|
||||
owner = ACCESS_ONCE(sem->owner);
|
||||
owner = READ_ONCE(sem->owner);
|
||||
if (owner && !rwsem_spin_on_owner(sem, owner))
|
||||
break;
|
||||
|
||||
|
@ -459,7 +459,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
|
|||
|
||||
/* we're now waiting on the lock, but no longer actively locking */
|
||||
if (waiting) {
|
||||
count = ACCESS_ONCE(sem->count);
|
||||
count = READ_ONCE(sem->count);
|
||||
|
||||
/*
|
||||
* If there were already threads queued before us and there are
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue