Commit f3e615b4 authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra

locking/atomic: remove ARCH_ATOMIC remanants

Now that gen-atomic-fallback.sh is only used to generate the arch_*
fallbacks, we don't need to also generate the non-arch_* forms, and can
removethe infrastructure this needed.

There is no change to any of the generated headers as a result of this
patch.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210713105253.7615-3-mark.rutland@arm.com
parent 47401d94
cat <<EOF
static __always_inline ${ret}
${arch}${atomic}_${pfx}${name}${sfx}_acquire(${params})
arch_${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
${ret} ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_acquire_fence();
return ret;
}
......
cat <<EOF
/**
* ${arch}${atomic}_add_negative - add and test if negative
* arch_${atomic}_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type ${atomic}_t
*
......@@ -9,8 +9,8 @@ cat <<EOF
* result is greater than or equal to zero.
*/
static __always_inline bool
${arch}${atomic}_add_negative(${int} i, ${atomic}_t *v)
arch_${atomic}_add_negative(${int} i, ${atomic}_t *v)
{
return ${arch}${atomic}_add_return(i, v) < 0;
return arch_${atomic}_add_return(i, v) < 0;
}
EOF
cat << EOF
/**
* ${arch}${atomic}_add_unless - add unless the number is already a given value
* arch_${atomic}_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
......@@ -9,8 +9,8 @@ cat << EOF
* Returns true if the addition was done.
*/
static __always_inline bool
${arch}${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
return ${arch}${atomic}_fetch_add_unless(v, a, u) != u;
return arch_${atomic}_fetch_add_unless(v, a, u) != u;
}
EOF
cat <<EOF
static __always_inline ${ret}
${arch}${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
arch_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{
${retstmt}${arch}${atomic}_${pfx}and${sfx}${order}(~i, v);
${retstmt}arch_${atomic}_${pfx}and${sfx}${order}(~i, v);
}
EOF
cat <<EOF
static __always_inline ${ret}
${arch}${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
arch_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{
${retstmt}${arch}${atomic}_${pfx}sub${sfx}${order}(1, v);
${retstmt}arch_${atomic}_${pfx}sub${sfx}${order}(1, v);
}
EOF
cat <<EOF
/**
* ${arch}${atomic}_dec_and_test - decrement and test
* arch_${atomic}_dec_and_test - decrement and test
* @v: pointer of type ${atomic}_t
*
* Atomically decrements @v by 1 and
......@@ -8,8 +8,8 @@ cat <<EOF
* cases.
*/
static __always_inline bool
${arch}${atomic}_dec_and_test(${atomic}_t *v)
arch_${atomic}_dec_and_test(${atomic}_t *v)
{
return ${arch}${atomic}_dec_return(v) == 0;
return arch_${atomic}_dec_return(v) == 0;
}
EOF
cat <<EOF
static __always_inline ${ret}
${arch}${atomic}_dec_if_positive(${atomic}_t *v)
arch_${atomic}_dec_if_positive(${atomic}_t *v)
{
${int} dec, c = ${arch}${atomic}_read(v);
${int} dec, c = arch_${atomic}_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
break;
} while (!${arch}${atomic}_try_cmpxchg(v, &c, dec));
} while (!arch_${atomic}_try_cmpxchg(v, &c, dec));
return dec;
}
......
cat <<EOF
static __always_inline bool
${arch}${atomic}_dec_unless_positive(${atomic}_t *v)
arch_${atomic}_dec_unless_positive(${atomic}_t *v)
{
${int} c = ${arch}${atomic}_read(v);
${int} c = arch_${atomic}_read(v);
do {
if (unlikely(c > 0))
return false;
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c - 1));
} while (!arch_${atomic}_try_cmpxchg(v, &c, c - 1));
return true;
}
......
cat <<EOF
static __always_inline ${ret}
${arch}${atomic}_${pfx}${name}${sfx}(${params})
arch_${atomic}_${pfx}${name}${sfx}(${params})
{
${ret} ret;
__atomic_pre_full_fence();
ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_post_full_fence();
return ret;
}
......
cat << EOF
/**
* ${arch}${atomic}_fetch_add_unless - add unless the number is already a given value
* arch_${atomic}_fetch_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
......@@ -9,14 +9,14 @@ cat << EOF
* Returns original value of @v
*/
static __always_inline ${int}
${arch}${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
arch_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
${int} c = ${arch}${atomic}_read(v);
${int} c = arch_${atomic}_read(v);
do {
if (unlikely(c == u))
break;
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c + a));
} while (!arch_${atomic}_try_cmpxchg(v, &c, c + a));
return c;
}
......
cat <<EOF
static __always_inline ${ret}
${arch}${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
arch_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
{
${retstmt}${arch}${atomic}_${pfx}add${sfx}${order}(1, v);
${retstmt}arch_${atomic}_${pfx}add${sfx}${order}(1, v);
}
EOF
cat <<EOF
/**
* ${arch}${atomic}_inc_and_test - increment and test
* arch_${atomic}_inc_and_test - increment and test
* @v: pointer of type ${atomic}_t
*
* Atomically increments @v by 1
......@@ -8,8 +8,8 @@ cat <<EOF
* other cases.
*/
static __always_inline bool
${arch}${atomic}_inc_and_test(${atomic}_t *v)
arch_${atomic}_inc_and_test(${atomic}_t *v)
{
return ${arch}${atomic}_inc_return(v) == 0;
return arch_${atomic}_inc_return(v) == 0;
}
EOF
cat <<EOF
/**
* ${arch}${atomic}_inc_not_zero - increment unless the number is zero
* arch_${atomic}_inc_not_zero - increment unless the number is zero
* @v: pointer of type ${atomic}_t
*
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
static __always_inline bool
${arch}${atomic}_inc_not_zero(${atomic}_t *v)
arch_${atomic}_inc_not_zero(${atomic}_t *v)
{
return ${arch}${atomic}_add_unless(v, 1, 0);
return arch_${atomic}_add_unless(v, 1, 0);
}
EOF
cat <<EOF
static __always_inline bool
${arch}${atomic}_inc_unless_negative(${atomic}_t *v)
arch_${atomic}_inc_unless_negative(${atomic}_t *v)
{
${int} c = ${arch}${atomic}_read(v);
${int} c = arch_${atomic}_read(v);
do {
if (unlikely(c < 0))
return false;
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c + 1));
} while (!arch_${atomic}_try_cmpxchg(v, &c, c + 1));
return true;
}
......
cat <<EOF
static __always_inline ${ret}
${arch}${atomic}_read_acquire(const ${atomic}_t *v)
arch_${atomic}_read_acquire(const ${atomic}_t *v)
{
return smp_load_acquire(&(v)->counter);
}
......
cat <<EOF
static __always_inline ${ret}
${arch}${atomic}_${pfx}${name}${sfx}_release(${params})
arch_${atomic}_${pfx}${name}${sfx}_release(${params})
{
__atomic_release_fence();
${retstmt}${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
${retstmt}arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
}
EOF
cat <<EOF
static __always_inline void
${arch}${atomic}_set_release(${atomic}_t *v, ${int} i)
arch_${atomic}_set_release(${atomic}_t *v, ${int} i)
{
smp_store_release(&(v)->counter, i);
}
......
cat <<EOF
/**
* ${arch}${atomic}_sub_and_test - subtract value from variable and test result
* arch_${atomic}_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type ${atomic}_t
*
......@@ -9,8 +9,8 @@ cat <<EOF
* other cases.
*/
static __always_inline bool
${arch}${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
arch_${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
{
return ${arch}${atomic}_sub_return(i, v) == 0;
return arch_${atomic}_sub_return(i, v) == 0;
}
EOF
cat <<EOF
static __always_inline bool
${arch}${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
arch_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
{
${int} r, o = *old;
r = ${arch}${atomic}_cmpxchg${order}(v, o, new);
r = arch_${atomic}_cmpxchg${order}(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
......
......@@ -2,11 +2,10 @@
# SPDX-License-Identifier: GPL-2.0
ATOMICDIR=$(dirname $0)
ARCH=$2
. ${ATOMICDIR}/atomic-tbl.sh
#gen_template_fallback(template, meta, pfx, name, sfx, order, arch, atomic, int, args...)
#gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...)
gen_template_fallback()
{
local template="$1"; shift
......@@ -15,11 +14,10 @@ gen_template_fallback()
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local arch="$1"; shift
local atomic="$1"; shift
local int="$1"; shift
local atomicname="${arch}${atomic}_${pfx}${name}${sfx}${order}"
local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
local ret="$(gen_ret_type "${meta}" "${int}")"
local retstmt="$(gen_ret_stmt "${meta}")"
......@@ -34,7 +32,7 @@ gen_template_fallback()
fi
}
#gen_proto_fallback(meta, pfx, name, sfx, order, arch, atomic, int, args...)
#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
gen_proto_fallback()
{
local meta="$1"; shift
......@@ -65,44 +63,26 @@ gen_proto_order_variant()
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local arch="$1"
local atomic="$2"
local atomic="$1"
local basename="${arch}${atomic}_${pfx}${name}${sfx}"
local basename="arch_${atomic}_${pfx}${name}${sfx}"
printf "#define arch_${basename}${order} ${basename}${order}\n"
printf "#define ${basename}${order} ${basename}${order}\n"
}
#gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...)
#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
gen_proto_order_variants()
{
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local arch="$1"
local atomic="$2"
local atomic="$1"
local basename="${arch}${atomic}_${pfx}${name}${sfx}"
local basename="arch_${atomic}_${pfx}${name}${sfx}"
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
if [ -z "$arch" ]; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
if meta_has_acquire "${meta}"; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
fi
if meta_has_release "${meta}"; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
fi
if meta_has_relaxed "${meta}"; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
fi
echo ""
fi
# If we don't have relaxed atomics, then we don't bother with ordering fallbacks
# read_acquire and set_release need to be templated, though
if ! meta_has_relaxed "${meta}"; then
......@@ -187,38 +167,38 @@ gen_try_cmpxchg_fallback()
local order="$1"; shift;
cat <<EOF
#ifndef ${ARCH}try_cmpxchg${order}
#define ${ARCH}try_cmpxchg${order}(_ptr, _oldp, _new) \\
#ifndef arch_try_cmpxchg${order}
#define arch_try_cmpxchg${order}(_ptr, _oldp, _new) \\
({ \\
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \\
___r = ${ARCH}cmpxchg${order}((_ptr), ___o, (_new)); \\
___r = arch_cmpxchg${order}((_ptr), ___o, (_new)); \\
if (unlikely(___r != ___o)) \\
*___op = ___r; \\
likely(___r == ___o); \\
})
#endif /* ${ARCH}try_cmpxchg${order} */
#endif /* arch_try_cmpxchg${order} */
EOF
}
gen_try_cmpxchg_fallbacks()
{
printf "#ifndef ${ARCH}try_cmpxchg_relaxed\n"
printf "#ifdef ${ARCH}try_cmpxchg\n"
printf "#ifndef arch_try_cmpxchg_relaxed\n"
printf "#ifdef arch_try_cmpxchg\n"
gen_basic_fallbacks "${ARCH}try_cmpxchg"
gen_basic_fallbacks "arch_try_cmpxchg"
printf "#endif /* ${ARCH}try_cmpxchg */\n\n"
printf "#endif /* arch_try_cmpxchg */\n\n"
for order in "" "_acquire" "_release" "_relaxed"; do
gen_try_cmpxchg_fallback "${order}"
done
printf "#else /* ${ARCH}try_cmpxchg_relaxed */\n"
printf "#else /* arch_try_cmpxchg_relaxed */\n"
gen_order_fallbacks "${ARCH}try_cmpxchg"
gen_order_fallbacks "arch_try_cmpxchg"
printf "#endif /* ${ARCH}try_cmpxchg_relaxed */\n\n"
printf "#endif /* arch_try_cmpxchg_relaxed */\n\n"
}
cat << EOF
......@@ -234,14 +214,14 @@ cat << EOF
EOF
for xchg in "${ARCH}xchg" "${ARCH}cmpxchg" "${ARCH}cmpxchg64"; do
for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64"; do
gen_xchg_fallbacks "${xchg}"
done
gen_try_cmpxchg_fallbacks
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "${ARCH}" "atomic" "int" ${args}
gen_proto "${meta}" "${name}" "atomic" "int" ${args}
done
cat <<EOF
......@@ -252,7 +232,7 @@ cat <<EOF
EOF
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "${ARCH}" "atomic64" "s64" ${args}
gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
done
cat <<EOF
......
......@@ -10,7 +10,7 @@ LINUXDIR=${ATOMICDIR}/../..
cat <<EOF |
gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h
gen-atomic-long.sh asm-generic/atomic-long.h
gen-atomic-fallback.sh linux/atomic-arch-fallback.h arch_
gen-atomic-fallback.sh linux/atomic-arch-fallback.h
EOF
while read script header args; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment