diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 87bb235c3448..78f04a232d3a 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -54,6 +54,11 @@ struct pv_node { u8 state; }; +/* + * Include queued spinlock statistics code + */ +#include "qspinlock_stat.h" + /* * By replacing the regular queued_spin_trylock() with the function below, * it will be called once when a lock waiter enter the PV slowpath before @@ -65,9 +70,11 @@ struct pv_node { static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock) { struct __qspinlock *l = (void *)lock; + int ret = !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) && + (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0); - return !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) && - (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0); + qstat_inc(qstat_pv_lock_stealing, ret); + return ret; } /* @@ -137,11 +144,6 @@ static __always_inline int trylock_clear_pending(struct qspinlock *lock) } #endif /* _Q_PENDING_BITS == 8 */ -/* - * Include queued spinlock statistics code - */ -#include "qspinlock_stat.h" - /* * Lock and MCS node addresses hash table for fast lookup * diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h index 640dcecdd1df..869988d46124 100644 --- a/kernel/locking/qspinlock_stat.h +++ b/kernel/locking/qspinlock_stat.h @@ -279,19 +279,6 @@ static inline void __pv_wait(u8 *ptr, u8 val) #define pv_kick(c) __pv_kick(c) #define pv_wait(p, v) __pv_wait(p, v) -/* - * PV unfair trylock count tracking function - */ -static inline int qstat_spin_steal_lock(struct qspinlock *lock) -{ - int ret = pv_queued_spin_steal_lock(lock); - - qstat_inc(qstat_pv_lock_stealing, ret); - return ret; -} -#undef queued_spin_trylock -#define queued_spin_trylock(l) qstat_spin_steal_lock(l) - #else /* CONFIG_QUEUED_LOCK_STAT */ static inline void qstat_inc(enum qlock_stats stat, bool cond) { }