locking/atomics: Flip fallbacks and instrumentation

Currently instrumentation of atomic primitives is done at the architecture
level, while composites or fallbacks are provided at the generic level.

The result is that there are no uninstrumented variants of the
fallbacks. Since there is now need of such variants to isolate text poke
from any form of instrumentation invert this ordering.

Doing this means moving the instrumentation into the generic code as
well as having (for now) two variants of the fallbacks.

Notes:

 - the various *cond_read* primitives are not proper fallbacks
   and got moved into linux/atomic.c. No arch_ variants are
   generated because the base primitives smp_cond_load*()
   are instrumented.

 - once all architectures are moved over to arch_atomic_ one of the
   fallback variants can be removed and some 2300 lines reclaimed.

 - atomic_{read,set}*() are no longer double-instrumented

Reported-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lkml.kernel.org/r/20200505134058.769149955@linutronix.de
This commit is contained in:
Peter Zijlstra 2020-01-24 22:13:03 +01:00 committed by Thomas Gleixner
parent 765dcd2099
commit 37f8173dd8
28 changed files with 2403 additions and 82 deletions

View file

@ -1,8 +1,8 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}_acquire(${params})
${arch}${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
${ret} ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_acquire_fence();
return ret;
}

View file

@ -1,6 +1,6 @@
cat <<EOF
/**
* ${atomic}_add_negative - add and test if negative
* ${arch}${atomic}_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type ${atomic}_t
*
@ -9,8 +9,8 @@ cat <<EOF
* result is greater than or equal to zero.
*/
static __always_inline bool
${atomic}_add_negative(${int} i, ${atomic}_t *v)
${arch}${atomic}_add_negative(${int} i, ${atomic}_t *v)
{
return ${atomic}_add_return(i, v) < 0;
return ${arch}${atomic}_add_return(i, v) < 0;
}
EOF

View file

@ -1,6 +1,6 @@
cat << EOF
/**
* ${atomic}_add_unless - add unless the number is already a given value
* ${arch}${atomic}_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@ -9,8 +9,8 @@ cat << EOF
* Returns true if the addition was done.
*/
static __always_inline bool
${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
${arch}${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
return ${atomic}_fetch_add_unless(v, a, u) != u;
return ${arch}${atomic}_fetch_add_unless(v, a, u) != u;
}
EOF

View file

@ -1,7 +1,7 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
${arch}${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
${retstmt}${arch}${atomic}_${pfx}and${sfx}${order}(~i, v);
}
EOF

View file

@ -1,7 +1,7 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
${arch}${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
${retstmt}${arch}${atomic}_${pfx}sub${sfx}${order}(1, v);
}
EOF

View file

@ -1,6 +1,6 @@
cat <<EOF
/**
* ${atomic}_dec_and_test - decrement and test
* ${arch}${atomic}_dec_and_test - decrement and test
* @v: pointer of type ${atomic}_t
*
* Atomically decrements @v by 1 and
@ -8,8 +8,8 @@ cat <<EOF
* cases.
*/
static __always_inline bool
${atomic}_dec_and_test(${atomic}_t *v)
${arch}${atomic}_dec_and_test(${atomic}_t *v)
{
return ${atomic}_dec_return(v) == 0;
return ${arch}${atomic}_dec_return(v) == 0;
}
EOF

View file

@ -1,14 +1,14 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_dec_if_positive(${atomic}_t *v)
${arch}${atomic}_dec_if_positive(${atomic}_t *v)
{
${int} dec, c = ${atomic}_read(v);
${int} dec, c = ${arch}${atomic}_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
break;
} while (!${atomic}_try_cmpxchg(v, &c, dec));
} while (!${arch}${atomic}_try_cmpxchg(v, &c, dec));
return dec;
}

View file

@ -1,13 +1,13 @@
cat <<EOF
static __always_inline bool
${atomic}_dec_unless_positive(${atomic}_t *v)
${arch}${atomic}_dec_unless_positive(${atomic}_t *v)
{
${int} c = ${atomic}_read(v);
${int} c = ${arch}${atomic}_read(v);
do {
if (unlikely(c > 0))
return false;
} while (!${atomic}_try_cmpxchg(v, &c, c - 1));
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c - 1));
return true;
}

View file

@ -1,10 +1,10 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}(${params})
${arch}${atomic}_${pfx}${name}${sfx}(${params})
{
${ret} ret;
__atomic_pre_full_fence();
ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_post_full_fence();
return ret;
}

View file

@ -1,6 +1,6 @@
cat << EOF
/**
* ${atomic}_fetch_add_unless - add unless the number is already a given value
* ${arch}${atomic}_fetch_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@ -9,14 +9,14 @@ cat << EOF
* Returns original value of @v
*/
static __always_inline ${int}
${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
${arch}${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
${int} c = ${atomic}_read(v);
${int} c = ${arch}${atomic}_read(v);
do {
if (unlikely(c == u))
break;
} while (!${atomic}_try_cmpxchg(v, &c, c + a));
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c + a));
return c;
}

View file

@ -1,7 +1,7 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
${arch}${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);
${retstmt}${arch}${atomic}_${pfx}add${sfx}${order}(1, v);
}
EOF

View file

@ -1,6 +1,6 @@
cat <<EOF
/**
* ${atomic}_inc_and_test - increment and test
* ${arch}${atomic}_inc_and_test - increment and test
* @v: pointer of type ${atomic}_t
*
* Atomically increments @v by 1
@ -8,8 +8,8 @@ cat <<EOF
* other cases.
*/
static __always_inline bool
${atomic}_inc_and_test(${atomic}_t *v)
${arch}${atomic}_inc_and_test(${atomic}_t *v)
{
return ${atomic}_inc_return(v) == 0;
return ${arch}${atomic}_inc_return(v) == 0;
}
EOF

View file

@ -1,14 +1,14 @@
cat <<EOF
/**
* ${atomic}_inc_not_zero - increment unless the number is zero
* ${arch}${atomic}_inc_not_zero - increment unless the number is zero
* @v: pointer of type ${atomic}_t
*
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
static __always_inline bool
${atomic}_inc_not_zero(${atomic}_t *v)
${arch}${atomic}_inc_not_zero(${atomic}_t *v)
{
return ${atomic}_add_unless(v, 1, 0);
return ${arch}${atomic}_add_unless(v, 1, 0);
}
EOF

View file

@ -1,13 +1,13 @@
cat <<EOF
static __always_inline bool
${atomic}_inc_unless_negative(${atomic}_t *v)
${arch}${atomic}_inc_unless_negative(${atomic}_t *v)
{
${int} c = ${atomic}_read(v);
${int} c = ${arch}${atomic}_read(v);
do {
if (unlikely(c < 0))
return false;
} while (!${atomic}_try_cmpxchg(v, &c, c + 1));
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c + 1));
return true;
}

View file

@ -1,6 +1,6 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_read_acquire(const ${atomic}_t *v)
${arch}${atomic}_read_acquire(const ${atomic}_t *v)
{
return smp_load_acquire(&(v)->counter);
}

View file

@ -1,8 +1,8 @@
cat <<EOF
static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}_release(${params})
${arch}${atomic}_${pfx}${name}${sfx}_release(${params})
{
__atomic_release_fence();
${retstmt}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
${retstmt}${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
}
EOF

View file

@ -1,6 +1,6 @@
cat <<EOF
static __always_inline void
${atomic}_set_release(${atomic}_t *v, ${int} i)
${arch}${atomic}_set_release(${atomic}_t *v, ${int} i)
{
smp_store_release(&(v)->counter, i);
}

View file

@ -1,6 +1,6 @@
cat <<EOF
/**
* ${atomic}_sub_and_test - subtract value from variable and test result
* ${arch}${atomic}_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type ${atomic}_t
*
@ -9,8 +9,8 @@ cat <<EOF
* other cases.
*/
static __always_inline bool
${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
${arch}${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
{
return ${atomic}_sub_return(i, v) == 0;
return ${arch}${atomic}_sub_return(i, v) == 0;
}
EOF

View file

@ -1,9 +1,9 @@
cat <<EOF
static __always_inline bool
${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
${arch}${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
{
${int} r, o = *old;
r = ${atomic}_cmpxchg${order}(v, o, new);
r = ${arch}${atomic}_cmpxchg${order}(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);

View file

@ -2,10 +2,11 @@
# SPDX-License-Identifier: GPL-2.0
ATOMICDIR=$(dirname $0)
ARCH=$2
. ${ATOMICDIR}/atomic-tbl.sh
#gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...)
#gen_template_fallback(template, meta, pfx, name, sfx, order, arch, atomic, int, args...)
gen_template_fallback()
{
local template="$1"; shift
@ -14,10 +15,11 @@ gen_template_fallback()
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local arch="$1"; shift
local atomic="$1"; shift
local int="$1"; shift
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
local atomicname="${arch}${atomic}_${pfx}${name}${sfx}${order}"
local ret="$(gen_ret_type "${meta}" "${int}")"
local retstmt="$(gen_ret_stmt "${meta}")"
@ -32,7 +34,7 @@ gen_template_fallback()
fi
}
#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
#gen_proto_fallback(meta, pfx, name, sfx, order, arch, atomic, int, args...)
gen_proto_fallback()
{
local meta="$1"; shift
@ -56,16 +58,17 @@ cat << EOF
EOF
}
#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
#gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...)
gen_proto_order_variants()
{
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local atomic="$1"
local arch="$1"
local atomic="$2"
local basename="${atomic}_${pfx}${name}${sfx}"
local basename="${arch}${atomic}_${pfx}${name}${sfx}"
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
@ -94,7 +97,7 @@ gen_proto_order_variants()
gen_basic_fallbacks "${basename}"
if [ ! -z "${template}" ]; then
printf "#endif /* ${atomic}_${pfx}${name}${sfx} */\n\n"
printf "#endif /* ${arch}${atomic}_${pfx}${name}${sfx} */\n\n"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
@ -153,18 +156,15 @@ cat << EOF
EOF
for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
for xchg in "${ARCH}xchg" "${ARCH}cmpxchg" "${ARCH}cmpxchg64"; do
gen_xchg_fallbacks "${xchg}"
done
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic" "int" ${args}
gen_proto "${meta}" "${name}" "${ARCH}" "atomic" "int" ${args}
done
cat <<EOF
#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
@ -172,12 +172,9 @@ cat <<EOF
EOF
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
gen_proto "${meta}" "${name}" "${ARCH}" "atomic64" "s64" ${args}
done
cat <<EOF
#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
#endif /* _LINUX_ATOMIC_FALLBACK_H */
EOF

View file

@ -10,10 +10,11 @@ LINUXDIR=${ATOMICDIR}/../..
cat <<EOF |
gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h
gen-atomic-long.sh asm-generic/atomic-long.h
gen-atomic-fallback.sh linux/atomic-arch-fallback.h arch_
gen-atomic-fallback.sh linux/atomic-fallback.h
EOF
while read script header; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
while read script header args; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
HASH="$(sha1sum ${LINUXDIR}/include/${header})"
HASH="${HASH%% *}"
printf "// %s\n" "${HASH}" >> ${LINUXDIR}/include/${header}