mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-03-30 10:55:03 +00:00
locking/refcount: Define constants for saturation and max refcount values
The REFCOUNT_FULL implementation uses a different saturation point than the x86 implementation, which means that the shared refcount code in lib/refcount.c (e.g. refcount_dec_not_one()) needs to be aware of the difference. Rather than duplicate the definitions from the lkdtm driver, instead move them into <linux/refcount.h> and update all references accordingly. Signed-off-by: Will Deacon <will@kernel.org> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Kees Cook <keescook@chromium.org> Tested-by: Hanjun Guo <guohanjun@huawei.com> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Elena Reshetova <elena.reshetova@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20191121115902.2551-2-will@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
3ef240eaff
commit
23e6b169c9
3 changed files with 29 additions and 26 deletions
|
@ -6,14 +6,6 @@
|
||||||
#include "lkdtm.h"
|
#include "lkdtm.h"
|
||||||
#include <linux/refcount.h>
|
#include <linux/refcount.h>
|
||||||
|
|
||||||
#ifdef CONFIG_REFCOUNT_FULL
|
|
||||||
#define REFCOUNT_MAX (UINT_MAX - 1)
|
|
||||||
#define REFCOUNT_SATURATED UINT_MAX
|
|
||||||
#else
|
|
||||||
#define REFCOUNT_MAX INT_MAX
|
|
||||||
#define REFCOUNT_SATURATED (INT_MIN / 2)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void overflow_check(refcount_t *ref)
|
static void overflow_check(refcount_t *ref)
|
||||||
{
|
{
|
||||||
switch (refcount_read(ref)) {
|
switch (refcount_read(ref)) {
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
|
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
|
#include <linux/limits.h>
|
||||||
#include <linux/spinlock_types.h>
|
#include <linux/spinlock_types.h>
|
||||||
|
|
||||||
struct mutex;
|
struct mutex;
|
||||||
|
@ -12,7 +13,7 @@ struct mutex;
|
||||||
* struct refcount_t - variant of atomic_t specialized for reference counts
|
* struct refcount_t - variant of atomic_t specialized for reference counts
|
||||||
* @refs: atomic_t counter field
|
* @refs: atomic_t counter field
|
||||||
*
|
*
|
||||||
* The counter saturates at UINT_MAX and will not move once
|
* The counter saturates at REFCOUNT_SATURATED and will not move once
|
||||||
* there. This avoids wrapping the counter and causing 'spurious'
|
* there. This avoids wrapping the counter and causing 'spurious'
|
||||||
* use-after-free bugs.
|
* use-after-free bugs.
|
||||||
*/
|
*/
|
||||||
|
@ -56,6 +57,9 @@ extern void refcount_dec_checked(refcount_t *r);
|
||||||
|
|
||||||
#ifdef CONFIG_REFCOUNT_FULL
|
#ifdef CONFIG_REFCOUNT_FULL
|
||||||
|
|
||||||
|
#define REFCOUNT_MAX (UINT_MAX - 1)
|
||||||
|
#define REFCOUNT_SATURATED UINT_MAX
|
||||||
|
|
||||||
#define refcount_add_not_zero refcount_add_not_zero_checked
|
#define refcount_add_not_zero refcount_add_not_zero_checked
|
||||||
#define refcount_add refcount_add_checked
|
#define refcount_add refcount_add_checked
|
||||||
|
|
||||||
|
@ -68,6 +72,10 @@ extern void refcount_dec_checked(refcount_t *r);
|
||||||
#define refcount_dec refcount_dec_checked
|
#define refcount_dec refcount_dec_checked
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
#define REFCOUNT_MAX INT_MAX
|
||||||
|
#define REFCOUNT_SATURATED (INT_MIN / 2)
|
||||||
|
|
||||||
# ifdef CONFIG_ARCH_HAS_REFCOUNT
|
# ifdef CONFIG_ARCH_HAS_REFCOUNT
|
||||||
# include <asm/refcount.h>
|
# include <asm/refcount.h>
|
||||||
# else
|
# else
|
||||||
|
|
|
@ -5,8 +5,8 @@
|
||||||
* The interface matches the atomic_t interface (to aid in porting) but only
|
* The interface matches the atomic_t interface (to aid in porting) but only
|
||||||
* provides the few functions one should use for reference counting.
|
* provides the few functions one should use for reference counting.
|
||||||
*
|
*
|
||||||
* It differs in that the counter saturates at UINT_MAX and will not move once
|
* It differs in that the counter saturates at REFCOUNT_SATURATED and will not
|
||||||
* there. This avoids wrapping the counter and causing 'spurious'
|
* move once there. This avoids wrapping the counter and causing 'spurious'
|
||||||
* use-after-free issues.
|
* use-after-free issues.
|
||||||
*
|
*
|
||||||
* Memory ordering rules are slightly relaxed wrt regular atomic_t functions
|
* Memory ordering rules are slightly relaxed wrt regular atomic_t functions
|
||||||
|
@ -48,7 +48,7 @@
|
||||||
* @i: the value to add to the refcount
|
* @i: the value to add to the refcount
|
||||||
* @r: the refcount
|
* @r: the refcount
|
||||||
*
|
*
|
||||||
* Will saturate at UINT_MAX and WARN.
|
* Will saturate at REFCOUNT_SATURATED and WARN.
|
||||||
*
|
*
|
||||||
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
||||||
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
||||||
|
@ -69,16 +69,17 @@ bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
|
||||||
if (!val)
|
if (!val)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (unlikely(val == UINT_MAX))
|
if (unlikely(val == REFCOUNT_SATURATED))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
new = val + i;
|
new = val + i;
|
||||||
if (new < val)
|
if (new < val)
|
||||||
new = UINT_MAX;
|
new = REFCOUNT_SATURATED;
|
||||||
|
|
||||||
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
|
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
|
||||||
|
|
||||||
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
|
WARN_ONCE(new == REFCOUNT_SATURATED,
|
||||||
|
"refcount_t: saturated; leaking memory.\n");
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -89,7 +90,7 @@ EXPORT_SYMBOL(refcount_add_not_zero_checked);
|
||||||
* @i: the value to add to the refcount
|
* @i: the value to add to the refcount
|
||||||
* @r: the refcount
|
* @r: the refcount
|
||||||
*
|
*
|
||||||
* Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
|
* Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
|
||||||
*
|
*
|
||||||
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
||||||
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
||||||
|
@ -110,7 +111,8 @@ EXPORT_SYMBOL(refcount_add_checked);
|
||||||
* refcount_inc_not_zero_checked - increment a refcount unless it is 0
|
* refcount_inc_not_zero_checked - increment a refcount unless it is 0
|
||||||
* @r: the refcount to increment
|
* @r: the refcount to increment
|
||||||
*
|
*
|
||||||
* Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
|
* Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
|
||||||
|
* and WARN.
|
||||||
*
|
*
|
||||||
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
||||||
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
||||||
|
@ -133,7 +135,8 @@ bool refcount_inc_not_zero_checked(refcount_t *r)
|
||||||
|
|
||||||
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
|
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
|
||||||
|
|
||||||
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
|
WARN_ONCE(new == REFCOUNT_SATURATED,
|
||||||
|
"refcount_t: saturated; leaking memory.\n");
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -143,7 +146,7 @@ EXPORT_SYMBOL(refcount_inc_not_zero_checked);
|
||||||
* refcount_inc_checked - increment a refcount
|
* refcount_inc_checked - increment a refcount
|
||||||
* @r: the refcount to increment
|
* @r: the refcount to increment
|
||||||
*
|
*
|
||||||
* Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
|
* Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
|
||||||
*
|
*
|
||||||
* Provides no memory ordering, it is assumed the caller already has a
|
* Provides no memory ordering, it is assumed the caller already has a
|
||||||
* reference on the object.
|
* reference on the object.
|
||||||
|
@ -164,7 +167,7 @@ EXPORT_SYMBOL(refcount_inc_checked);
|
||||||
*
|
*
|
||||||
* Similar to atomic_dec_and_test(), but it will WARN, return false and
|
* Similar to atomic_dec_and_test(), but it will WARN, return false and
|
||||||
* ultimately leak on underflow and will fail to decrement when saturated
|
* ultimately leak on underflow and will fail to decrement when saturated
|
||||||
* at UINT_MAX.
|
* at REFCOUNT_SATURATED.
|
||||||
*
|
*
|
||||||
* Provides release memory ordering, such that prior loads and stores are done
|
* Provides release memory ordering, such that prior loads and stores are done
|
||||||
* before, and provides an acquire ordering on success such that free()
|
* before, and provides an acquire ordering on success such that free()
|
||||||
|
@ -182,7 +185,7 @@ bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
|
||||||
unsigned int new, val = atomic_read(&r->refs);
|
unsigned int new, val = atomic_read(&r->refs);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (unlikely(val == UINT_MAX))
|
if (unlikely(val == REFCOUNT_SATURATED))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
new = val - i;
|
new = val - i;
|
||||||
|
@ -207,7 +210,7 @@ EXPORT_SYMBOL(refcount_sub_and_test_checked);
|
||||||
* @r: the refcount
|
* @r: the refcount
|
||||||
*
|
*
|
||||||
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
|
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
|
||||||
* decrement when saturated at UINT_MAX.
|
* decrement when saturated at REFCOUNT_SATURATED.
|
||||||
*
|
*
|
||||||
* Provides release memory ordering, such that prior loads and stores are done
|
* Provides release memory ordering, such that prior loads and stores are done
|
||||||
* before, and provides an acquire ordering on success such that free()
|
* before, and provides an acquire ordering on success such that free()
|
||||||
|
@ -226,7 +229,7 @@ EXPORT_SYMBOL(refcount_dec_and_test_checked);
|
||||||
* @r: the refcount
|
* @r: the refcount
|
||||||
*
|
*
|
||||||
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
|
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
|
||||||
* when saturated at UINT_MAX.
|
* when saturated at REFCOUNT_SATURATED.
|
||||||
*
|
*
|
||||||
* Provides release memory ordering, such that prior loads and stores are done
|
* Provides release memory ordering, such that prior loads and stores are done
|
||||||
* before.
|
* before.
|
||||||
|
@ -277,7 +280,7 @@ bool refcount_dec_not_one(refcount_t *r)
|
||||||
unsigned int new, val = atomic_read(&r->refs);
|
unsigned int new, val = atomic_read(&r->refs);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (unlikely(val == UINT_MAX))
|
if (unlikely(val == REFCOUNT_SATURATED))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (val == 1)
|
if (val == 1)
|
||||||
|
@ -302,7 +305,7 @@ EXPORT_SYMBOL(refcount_dec_not_one);
|
||||||
* @lock: the mutex to be locked
|
* @lock: the mutex to be locked
|
||||||
*
|
*
|
||||||
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
|
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
|
||||||
* to decrement when saturated at UINT_MAX.
|
* to decrement when saturated at REFCOUNT_SATURATED.
|
||||||
*
|
*
|
||||||
* Provides release memory ordering, such that prior loads and stores are done
|
* Provides release memory ordering, such that prior loads and stores are done
|
||||||
* before, and provides a control dependency such that free() must come after.
|
* before, and provides a control dependency such that free() must come after.
|
||||||
|
@ -333,7 +336,7 @@ EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
|
||||||
* @lock: the spinlock to be locked
|
* @lock: the spinlock to be locked
|
||||||
*
|
*
|
||||||
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
|
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
|
||||||
* decrement when saturated at UINT_MAX.
|
* decrement when saturated at REFCOUNT_SATURATED.
|
||||||
*
|
*
|
||||||
* Provides release memory ordering, such that prior loads and stores are done
|
* Provides release memory ordering, such that prior loads and stores are done
|
||||||
* before, and provides a control dependency such that free() must come after.
|
* before, and provides a control dependency such that free() must come after.
|
||||||
|
|
Loading…
Add table
Reference in a new issue