[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 39/54] tcg: Introduce atom_and_align_for_opc
From: |
Richard Henderson |
Subject: |
[PATCH v5 39/54] tcg: Introduce atom_and_align_for_opc |
Date: |
Mon, 15 May 2023 07:32:58 -0700 |
Examine MemOp for atomicity and alignment, adjusting alignment
as required to implement atomicity on the host.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
v5: Dropped r-b because of MO_ATOM_* reorg
Return a struct with the result pair (phil).
---
tcg/tcg.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 95 insertions(+)
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 10429be039..279ba2ca6a 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -220,6 +220,15 @@ static void * const qemu_st_helpers[MO_SIZE + 1]
__attribute__((unused)) = {
#endif
};
+typedef struct {
+ MemOp atom; /* lg2 bits of atomicity required */
+ MemOp align; /* lg2 bits of alignment to use */
+} TCGAtomAlign;
+
+static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
+ MemOp host_atom, bool allow_two_ops)
+ __attribute__((unused));
+
TCGContext tcg_init_ctx;
__thread TCGContext *tcg_ctx;
@@ -5226,6 +5235,92 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
}
}
+/**
+ * atom_and_align_for_opc:
+ * @s: tcg context
+ * @opc: memory operation code
+ * @host_atom: MO_ATOM_{IFALIGN,WITHIN16,SUBALIGN} for host operations
+ * @allow_two_ops: true if we are prepared to issue two operations
+ *
+ * Return the alignment and atomicity to use for the inline fast path
+ * for the given memory operation. The alignment may be larger than
+ * that specified in @opc, and the correct alignment will be diagnosed
+ * by the slow path helper.
+ *
+ * If @allow_two_ops, the host is prepared to test for 2x alignment,
+ * and issue two loads or stores for subalignment.
+ */
+static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
+ MemOp host_atom, bool allow_two_ops)
+{
+ MemOp align = get_alignment_bits(opc);
+ MemOp size = opc & MO_SIZE;
+ MemOp half = size ? size - 1 : 0;
+ MemOp atmax;
+ MemOp atom;
+
+ /* When serialized, no further atomicity required. */
+ if (s->gen_tb->cflags & CF_PARALLEL) {
+ atom = opc & MO_ATOM_MASK;
+ } else {
+ atom = MO_ATOM_NONE;
+ }
+
+ switch (atom) {
+ case MO_ATOM_NONE:
+ /* The operation requires no specific atomicity. */
+ atmax = MO_8;
+ break;
+
+ case MO_ATOM_IFALIGN:
+ atmax = size;
+ break;
+
+ case MO_ATOM_IFALIGN_PAIR:
+ atmax = half;
+ break;
+
+ case MO_ATOM_WITHIN16:
+ atmax = size;
+ if (size == MO_128) {
+ /* Misalignment implies !within16, and therefore no atomicity. */
+ } else if (host_atom != MO_ATOM_WITHIN16) {
+ /* The host does not implement within16, so require alignment. */
+ align = MAX(align, size);
+ }
+ break;
+
+ case MO_ATOM_WITHIN16_PAIR:
+ atmax = size;
+ /*
+ * Misalignment implies !within16, and therefore half atomicity.
+ * Any host prepared for two operations can implement this with
+ * half alignment.
+ */
+ if (host_atom != MO_ATOM_WITHIN16 && allow_two_ops) {
+ align = MAX(align, half);
+ }
+ break;
+
+ case MO_ATOM_SUBALIGN:
+ atmax = size;
+ if (host_atom != MO_ATOM_SUBALIGN) {
+ /* If unaligned but not odd, there are subobjects up to half. */
+ if (allow_two_ops) {
+ align = MAX(align, half);
+ } else {
+ align = MAX(align, size);
+ }
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ return (TCGAtomAlign){ .atom = atmax, .align = align };
+}
+
/*
* Similarly for qemu_ld/st slow path helpers.
* We must re-implement tcg_gen_callN and tcg_reg_alloc_call simultaneously,
--
2.34.1
- [PATCH v5 36/54] tcg: Introduce tcg_out_movext3, (continued)
- [PATCH v5 36/54] tcg: Introduce tcg_out_movext3, Richard Henderson, 2023/05/15
- [PATCH v5 20/54] tcg/arm: Adjust constraints on qemu_ld/st, Richard Henderson, 2023/05/15
- [PATCH v5 27/54] tcg/sparc64: Rename tcg_out_movi_imm32 to tcg_out_movi_u32, Richard Henderson, 2023/05/15
- [PATCH v5 32/54] tcg/loongarch64: Support softmmu unaligned accesses, Richard Henderson, 2023/05/15
- [PATCH v5 26/54] target/sparc64: Remove tcg_out_movi_s13 case from tcg_out_movi_imm32, Richard Henderson, 2023/05/15
- [PATCH v5 39/54] tcg: Introduce atom_and_align_for_opc,
Richard Henderson <=
- [PATCH v5 37/54] tcg: Merge tcg_out_helper_load_regs into caller, Richard Henderson, 2023/05/15
- [PATCH v5 38/54] tcg: Support TCG_TYPE_I128 in tcg_out_{ld, st}_helper_{args, ret}, Richard Henderson, 2023/05/15
- [PATCH v5 28/54] tcg/sparc64: Split out tcg_out_movi_s32, Richard Henderson, 2023/05/15
- [PATCH v5 23/54] tcg/s390x: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/15
- [PATCH v5 33/54] tcg/riscv: Support softmmu unaligned accesses, Richard Henderson, 2023/05/15
- [PATCH v5 29/54] tcg/sparc64: Use standard slow path for softmmu, Richard Henderson, 2023/05/15
- [PATCH v5 31/54] tcg/loongarch64: Check the host supports unaligned accesses, Richard Henderson, 2023/05/15