[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 40/54] tcg/i386: Use atom_and_align_for_opc
From: |
Richard Henderson |
Subject: |
[PATCH v5 40/54] tcg/i386: Use atom_and_align_for_opc |
Date: |
Mon, 15 May 2023 07:32:59 -0700 |
No change to the ultimate load/store routines yet, so some atomicity
conditions not yet honored, but plumbs the change to alignment through
the relevant functions.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
v5: Removed r-b for changes to HostAddress.
---
tcg/i386/tcg-target.c.inc | 27 +++++++++++++++------------
1 file changed, 15 insertions(+), 12 deletions(-)
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 6d55ba5a1c..3b8528e332 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -1774,6 +1774,7 @@ typedef struct {
int index;
int ofs;
int seg;
+ TCGAtomAlign aa;
} HostAddress;
bool tcg_target_has_memory_bswap(MemOp memop)
@@ -1895,8 +1896,18 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
{
TCGLabelQemuLdst *ldst = NULL;
MemOp opc = get_memop(oi);
- unsigned a_bits = get_alignment_bits(opc);
- unsigned a_mask = (1 << a_bits) - 1;
+ unsigned a_mask;
+
+#ifdef CONFIG_SOFTMMU
+ h->index = TCG_REG_L0;
+ h->ofs = 0;
+ h->seg = 0;
+#else
+ *h = x86_guest_base;
+#endif
+ h->base = addrlo;
+ h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
+ a_mask = (1 << h->aa.align) - 1;
#ifdef CONFIG_SOFTMMU
int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read)
@@ -1946,7 +1957,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s,
HostAddress *h,
* copy the address and mask. For lesser alignments, check that we don't
* cross pages for the complete access.
*/
- if (a_bits >= s_bits) {
+ if (a_mask >= s_mask) {
tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
} else {
tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
@@ -1977,13 +1988,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
/* TLB Hit. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
offsetof(CPUTLBEntry, addend));
-
- *h = (HostAddress) {
- .base = addrlo,
- .index = TCG_REG_L0,
- };
#else
- if (a_bits) {
+ if (a_mask) {
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
@@ -1997,9 +2003,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s,
HostAddress *h,
ldst->label_ptr[0] = s->code_ptr;
s->code_ptr += 4;
}
-
- *h = x86_guest_base;
- h->base = addrlo;
#endif
return ldst;
--
2.34.1
- [PATCH v5 23/54] tcg/s390x: Use full load/store helpers in user-only mode, (continued)
- [PATCH v5 23/54] tcg/s390x: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/15
- [PATCH v5 33/54] tcg/riscv: Support softmmu unaligned accesses, Richard Henderson, 2023/05/15
- [PATCH v5 29/54] tcg/sparc64: Use standard slow path for softmmu, Richard Henderson, 2023/05/15
- [PATCH v5 31/54] tcg/loongarch64: Check the host supports unaligned accesses, Richard Henderson, 2023/05/15
- [PATCH v5 35/54] tcg: Add INDEX_op_qemu_{ld,st}_i128, Richard Henderson, 2023/05/15
- [PATCH v5 34/54] tcg: Introduce tcg_target_has_memory_bswap, Richard Henderson, 2023/05/15
- [PATCH v5 30/54] accel/tcg: Remove helper_unaligned_{ld,st}, Richard Henderson, 2023/05/15
- [PATCH v5 42/54] tcg/arm: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 40/54] tcg/i386: Use atom_and_align_for_opc,
Richard Henderson <=
- [PATCH v5 43/54] tcg/loongarch64: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 41/54] tcg/aarch64: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 44/54] tcg/mips: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 45/54] tcg/ppc: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 50/54] tcg/i386: Support 128-bit load/store with have_atomic16, Richard Henderson, 2023/05/15
- [PATCH v5 51/54] tcg/aarch64: Rename temporaries, Richard Henderson, 2023/05/15
- [PATCH v5 47/54] tcg/s390x: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15
- [PATCH v5 46/54] tcg/riscv: Use atom_and_align_for_opc, Richard Henderson, 2023/05/15