[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 41/54] tcg/aarch64: Use atom_and_align_for_opc
From: |
Richard Henderson |
Subject: |
[PATCH v5 41/54] tcg/aarch64: Use atom_and_align_for_opc |
Date: |
Mon, 15 May 2023 07:33:00 -0700 |
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/aarch64/tcg-target.c.inc | 36 ++++++++++++++++++------------------
1 file changed, 18 insertions(+), 18 deletions(-)
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 0cc719d799..ea4108d59c 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -1593,6 +1593,7 @@ typedef struct {
TCGReg base;
TCGReg index;
TCGType index_ext;
+ TCGAtomAlign aa;
} HostAddress;
bool tcg_target_has_memory_bswap(MemOp memop)
@@ -1646,8 +1647,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
TCGLabelQemuLdst *ldst = NULL;
MemOp opc = get_memop(oi);
- unsigned a_bits = get_alignment_bits(opc);
- unsigned a_mask = (1u << a_bits) - 1;
+ unsigned a_mask;
+
+ h->aa = atom_and_align_for_opc(s, opc,
+ have_lse2 ? MO_ATOM_WITHIN16
+ : MO_ATOM_IFALIGN,
+ false);
+ a_mask = (1 << h->aa.align) - 1;
#ifdef CONFIG_SOFTMMU
unsigned s_bits = opc & MO_SIZE;
@@ -1693,7 +1699,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s,
HostAddress *h,
* bits within the address. For unaligned access, we check that we don't
* cross pages using the address of the last byte of the access.
*/
- if (a_bits >= s_bits) {
+ if (a_mask >= s_mask) {
x3 = addr_reg;
} else {
tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64,
@@ -1713,11 +1719,9 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
ldst->label_ptr[0] = s->code_ptr;
tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
- *h = (HostAddress){
- .base = TCG_REG_X1,
- .index = addr_reg,
- .index_ext = addr_type
- };
+ h->base = TCG_REG_X1,
+ h->index = addr_reg;
+ h->index_ext = addr_type;
#else
if (a_mask) {
ldst = new_ldst_label(s);
@@ -1735,17 +1739,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
}
if (USE_GUEST_BASE) {
- *h = (HostAddress){
- .base = TCG_REG_GUEST_BASE,
- .index = addr_reg,
- .index_ext = addr_type
- };
+ h->base = TCG_REG_GUEST_BASE;
+ h->index = addr_reg;
+ h->index_ext = addr_type;
} else {
- *h = (HostAddress){
- .base = addr_reg,
- .index = TCG_REG_XZR,
- .index_ext = TCG_TYPE_I64
- };
+ h->base = addr_reg;
+ h->index = TCG_REG_XZR;
+ h->index_ext = TCG_TYPE_I64;
}
#endif
--
2.34.1
- Re: [PATCH v5 29/54] tcg/sparc64: Use standard slow path for softmmu, (continued)
- [PATCH v5 31/54] tcg/loongarch64: Check the host supports unaligned accesses, Richard Henderson, 2023/05/15
- [PATCH v5 35/54] tcg: Add INDEX_op_qemu_{ld,st}_i128, Richard Henderson, 2023/05/15
- [PATCH v5 34/54] tcg: Introduce tcg_target_has_memory_bswap, Richard Henderson, 2023/05/15
- [PATCH v5 30/54] accel/tcg: Remove helper_unaligned_{ld,st}, Richard Henderson, 2023/05/15
- [PATCH v5 42/54] tcg/arm: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 40/54] tcg/i386: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 43/54] tcg/loongarch64: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 41/54] tcg/aarch64: Use atom_and_align_for_opc,
Richard Henderson <=
- [PATCH v5 44/54] tcg/mips: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 45/54] tcg/ppc: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 50/54] tcg/i386: Support 128-bit load/store with have_atomic16, Richard Henderson, 2023/05/15
- [PATCH v5 51/54] tcg/aarch64: Rename temporaries, Richard Henderson, 2023/05/15
- [PATCH v5 47/54] tcg/s390x: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 46/54] tcg/riscv: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 48/54] tcg/sparc64: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15