[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 10/54] accel/tcg: Use have_atomic16 in ldst_atomicity.c.inc
From: |
Richard Henderson |
Subject: |
[PATCH v5 10/54] accel/tcg: Use have_atomic16 in ldst_atomicity.c.inc |
Date: |
Mon, 15 May 2023 07:32:29 -0700 |
Hosts using Intel and AMD AVX cpus are quite common.
Add fast paths through ldst_atomicity using this.
Only enable with CONFIG_INT128; some older clang versions do not
support __int128_t, and the inline assembly won't work on structures.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/ldst_atomicity.c.inc | 76 +++++++++++++++++++++++++++-------
1 file changed, 60 insertions(+), 16 deletions(-)
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
index dd387c9bdd..69c1c61997 100644
--- a/accel/tcg/ldst_atomicity.c.inc
+++ b/accel/tcg/ldst_atomicity.c.inc
@@ -35,6 +35,14 @@
#if defined(CONFIG_ATOMIC128)
# define HAVE_al16_fast true
+#elif defined(CONFIG_TCG_INTERPRETER)
+/*
+ * FIXME: host specific detection for this is in tcg/$host/,
+ * but we're using tcg/tci/ instead.
+ */
+# define HAVE_al16_fast false
+#elif defined(__x86_64__) && defined(CONFIG_INT128)
+# define HAVE_al16_fast likely(have_atomic16)
#else
# define HAVE_al16_fast false
#endif
@@ -178,6 +186,12 @@ load_atomic16(void *pv)
r.u = qatomic_read__nocheck(p);
return r.s;
+#elif defined(__x86_64__) && defined(CONFIG_INT128)
+ Int128Alias r;
+
+ /* Via HAVE_al16_fast, have_atomic16 is true. */
+ asm("vmovdqa %1, %0" : "=x" (r.u) : "m" (*(Int128 *)pv));
+ return r.s;
#else
qemu_build_not_reached();
#endif
@@ -399,6 +413,24 @@ load_atom_extract_al16_or_al8(void *pv, int s)
r = qatomic_read__nocheck(p16);
}
return r >> shr;
+#elif defined(__x86_64__) && defined(CONFIG_INT128)
+ uintptr_t pi = (uintptr_t)pv;
+ int shr = (pi & 7) * 8;
+ uint64_t a, b;
+
+ /* Via HAVE_al16_fast, have_atomic16 is true. */
+ pv = (void *)(pi & ~7);
+ if (pi & 8) {
+ uint64_t *p8 = __builtin_assume_aligned(pv, 16, 8);
+ a = qatomic_read__nocheck(p8);
+ b = qatomic_read__nocheck(p8 + 1);
+ } else {
+ asm("vmovdqa %2, %0\n\tvpextrq $1, %0, %1"
+ : "=x"(a), "=r"(b) : "m" (*(__uint128_t *)pv));
+ }
+ asm("shrd %b2, %1, %0" : "+r"(a) : "r"(b), "c"(shr));
+
+ return a;
#else
qemu_build_not_reached();
#endif
@@ -715,23 +747,35 @@ static inline void ATTRIBUTE_ATOMIC128_OPT
store_atomic16(void *pv, Int128Alias val)
{
#if defined(CONFIG_ATOMIC128)
- __uint128_t *pu = __builtin_assume_aligned(pv, 16);
- qatomic_set__nocheck(pu, val.u);
-#elif defined(CONFIG_CMPXCHG128)
- __uint128_t *pu = __builtin_assume_aligned(pv, 16);
- __uint128_t o;
-
- /*
- * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
- * defer to libatomic, so we must use __sync_*_compare_and_swap_16
- * and accept the sequential consistency that comes with it.
- */
- do {
- o = *pu;
- } while (!__sync_bool_compare_and_swap_16(pu, o, val.u));
-#else
- qemu_build_not_reached();
+ {
+ __uint128_t *pu = __builtin_assume_aligned(pv, 16);
+ qatomic_set__nocheck(pu, val.u);
+ return;
+ }
#endif
+#if defined(__x86_64__) && defined(CONFIG_INT128)
+ if (HAVE_al16_fast) {
+ asm("vmovdqa %1, %0" : "=m"(*(__uint128_t *)pv) : "x" (val.u));
+ return;
+ }
+#endif
+#if defined(CONFIG_CMPXCHG128)
+ {
+ __uint128_t *pu = __builtin_assume_aligned(pv, 16);
+ __uint128_t o;
+
+ /*
+ * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
+ * defer to libatomic, so we must use __sync_*_compare_and_swap_16
+ * and accept the sequential consistency that comes with it.
+ */
+ do {
+ o = *pu;
+ } while (!__sync_bool_compare_and_swap_16(pu, o, val.u));
+ return;
+ }
+#endif
+ qemu_build_not_reached();
}
/**
--
2.34.1
- [PATCH v5 09/54] tcg/i386: Add have_atomic16, (continued)
[PATCH v5 13/54] tcg/aarch64: Detect have_lse, have_lse2 for darwin, Richard Henderson, 2023/05/15
[PATCH v5 19/54] tcg/riscv: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/15
[PATCH v5 10/54] accel/tcg: Use have_atomic16 in ldst_atomicity.c.inc,
Richard Henderson <=
[PATCH v5 14/54] accel/tcg: Add have_lse2 support in ldst_atomicity, Richard Henderson, 2023/05/15
[PATCH v5 16/54] tcg/aarch64: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/15
[PATCH v5 18/54] tcg/loongarch64: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/15
[PATCH v5 24/54] tcg/sparc64: Allocate %g2 as a third temporary, Richard Henderson, 2023/05/15
[PATCH v5 15/54] tcg/i386: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/15
[PATCH v5 22/54] tcg/mips: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/15