[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 43/57] tcg/i386: Use atom_and_align_for_opc
From: |
Richard Henderson |
Subject: |
[PATCH v4 43/57] tcg/i386: Use atom_and_align_for_opc |
Date: |
Wed, 3 May 2023 08:06:42 +0100 |
No change to the ultimate load/store routines yet, so some atomicity
conditions not yet honored, but plumbs the change to alignment through
the relevant functions.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/i386/tcg-target.c.inc | 34 ++++++++++++++++++++++------------
1 file changed, 22 insertions(+), 12 deletions(-)
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 7c72bf6684..3e21f067d6 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -1774,6 +1774,8 @@ typedef struct {
int index;
int ofs;
int seg;
+ MemOp align;
+ MemOp atom;
} HostAddress;
bool tcg_target_has_memory_bswap(MemOp memop)
@@ -1895,8 +1897,12 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
{
TCGLabelQemuLdst *ldst = NULL;
MemOp opc = get_memop(oi);
- unsigned a_bits = get_alignment_bits(opc);
- unsigned a_mask = (1 << a_bits) - 1;
+ MemOp atom_u;
+ unsigned a_mask;
+
+ h->align = atom_and_align_for_opc(s, &h->atom, &atom_u, opc,
+ MO_ATOM_IFALIGN, false);
+ a_mask = (1 << h->align) - 1;
#ifdef CONFIG_SOFTMMU
int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read)
@@ -1941,10 +1947,12 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
TLB_MASK_TABLE_OFS(mem_index) +
offsetof(CPUTLBDescFast, table));
- /* If the required alignment is at least as large as the access, simply
- copy the address and mask. For lesser alignments, check that we don't
- cross pages for the complete access. */
- if (a_bits >= s_bits) {
+ /*
+ * If the required alignment is at least as large as the access, simply
+ * copy the address and mask. For lesser alignments, check that we don't
+ * cross pages for the complete access.
+ */
+ if (a_mask >= s_mask) {
tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
} else {
tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
@@ -1976,12 +1984,12 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
offsetof(CPUTLBEntry, addend));
- *h = (HostAddress) {
- .base = addrlo,
- .index = TCG_REG_L0,
- };
+ h->base = addrlo;
+ h->index = TCG_REG_L0;
+ h->ofs = 0;
+ h->seg = 0;
#else
- if (a_bits) {
+ if (a_mask) {
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
@@ -1996,8 +2004,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
s->code_ptr += 4;
}
- *h = x86_guest_base;
h->base = addrlo;
+ h->index = x86_guest_base.index;
+ h->ofs = x86_guest_base.ofs;
+ h->seg = x86_guest_base.seg;
#endif
return ldst;
--
2.34.1
- [PATCH v4 35/57] accel/tcg: Remove helper_unaligned_{ld,st}, (continued)
- [PATCH v4 35/57] accel/tcg: Remove helper_unaligned_{ld,st}, Richard Henderson, 2023/05/03
- [PATCH v4 37/57] tcg/loongarch64: Support softmmu unaligned accesses, Richard Henderson, 2023/05/03
- [PATCH v4 34/57] tcg/sparc64: Use standard slow path for softmmu, Richard Henderson, 2023/05/03
- [PATCH v4 38/57] tcg/riscv: Support softmmu unaligned accesses, Richard Henderson, 2023/05/03
- [PATCH v4 39/57] tcg: Introduce tcg_target_has_memory_bswap, Richard Henderson, 2023/05/03
- [PATCH v4 43/57] tcg/i386: Use atom_and_align_for_opc,
Richard Henderson <=
- [PATCH v4 40/57] tcg: Add INDEX_op_qemu_{ld,st}_i128, Richard Henderson, 2023/05/03
- [PATCH v4 47/57] tcg/mips: Use atom_and_align_for_opc, Richard Henderson, 2023/05/03
- [PATCH v4 44/57] tcg/aarch64: Use atom_and_align_for_opc, Richard Henderson, 2023/05/03
- [PATCH v4 48/57] tcg/ppc: Use atom_and_align_for_opc, Richard Henderson, 2023/05/03