[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 44/57] tcg/aarch64: Use atom_and_align_for_opc
From: |
Richard Henderson |
Subject: |
[PATCH v4 44/57] tcg/aarch64: Use atom_and_align_for_opc |
Date: |
Wed, 3 May 2023 08:06:43 +0100 |
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/aarch64/tcg-target.c.inc | 38 +++++++++++++++++++-----------------
1 file changed, 20 insertions(+), 18 deletions(-)
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 8e5f3d3688..1d6d382edd 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -1593,6 +1593,8 @@ typedef struct {
TCGReg base;
TCGReg index;
TCGType index_ext;
+ MemOp align;
+ MemOp atom;
} HostAddress;
bool tcg_target_has_memory_bswap(MemOp memop)
@@ -1646,8 +1648,14 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
TCGLabelQemuLdst *ldst = NULL;
MemOp opc = get_memop(oi);
- unsigned a_bits = get_alignment_bits(opc);
- unsigned a_mask = (1u << a_bits) - 1;
+ MemOp atom_u;
+ unsigned a_mask;
+
+ h->align = atom_and_align_for_opc(s, &h->atom, &atom_u, opc,
+ have_lse2 ? MO_ATOM_WITHIN16
+ : MO_ATOM_IFALIGN,
+ false);
+ a_mask = (1 << h->align) - 1;
#ifdef CONFIG_SOFTMMU
unsigned s_bits = opc & MO_SIZE;
@@ -1693,7 +1701,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s,
HostAddress *h,
* bits within the address. For unaligned access, we check that we don't
* cross pages using the address of the last byte of the access.
*/
- if (a_bits >= s_bits) {
+ if (a_mask >= s_mask) {
x3 = addr_reg;
} else {
tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64,
@@ -1713,11 +1721,9 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
ldst->label_ptr[0] = s->code_ptr;
tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
- *h = (HostAddress){
- .base = TCG_REG_X1,
- .index = addr_reg,
- .index_ext = addr_type
- };
+ h->base = TCG_REG_X1,
+ h->index = addr_reg;
+ h->index_ext = addr_type;
#else
if (a_mask) {
ldst = new_ldst_label(s);
@@ -1735,17 +1741,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
}
if (USE_GUEST_BASE) {
- *h = (HostAddress){
- .base = TCG_REG_GUEST_BASE,
- .index = addr_reg,
- .index_ext = addr_type
- };
+ h->base = TCG_REG_GUEST_BASE;
+ h->index = addr_reg;
+ h->index_ext = addr_type;
} else {
- *h = (HostAddress){
- .base = addr_reg,
- .index = TCG_REG_XZR,
- .index_ext = TCG_TYPE_I64
- };
+ h->base = addr_reg;
+ h->index = TCG_REG_XZR;
+ h->index_ext = TCG_TYPE_I64;
}
#endif
--
2.34.1
- [PATCH v4 38/57] tcg/riscv: Support softmmu unaligned accesses, (continued)
- [PATCH v4 38/57] tcg/riscv: Support softmmu unaligned accesses, Richard Henderson, 2023/05/03
- [PATCH v4 39/57] tcg: Introduce tcg_target_has_memory_bswap, Richard Henderson, 2023/05/03
- [PATCH v4 43/57] tcg/i386: Use atom_and_align_for_opc, Richard Henderson, 2023/05/03
- [PATCH v4 40/57] tcg: Add INDEX_op_qemu_{ld,st}_i128, Richard Henderson, 2023/05/03
- [PATCH v4 47/57] tcg/mips: Use atom_and_align_for_opc, Richard Henderson, 2023/05/03
- [PATCH v4 44/57] tcg/aarch64: Use atom_and_align_for_opc,
Richard Henderson <=
- [PATCH v4 48/57] tcg/ppc: Use atom_and_align_for_opc, Richard Henderson, 2023/05/03
- [PATCH v4 46/57] tcg/loongarch64: Use atom_and_align_for_opc, Richard Henderson, 2023/05/03
- [PATCH v4 41/57] tcg: Support TCG_TYPE_I128 in tcg_out_{ld, st}_helper_{args, ret}, Richard Henderson, 2023/05/03
- [PATCH v4 50/57] tcg/s390x: Use atom_and_align_for_opc, Richard Henderson, 2023/05/03