qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 24/37] target/i386: reimplement 0x0f 0x70-0x77, add AVX


From: Richard Henderson
Subject: Re: [PATCH 24/37] target/i386: reimplement 0x0f 0x70-0x77, add AVX
Date: Mon, 12 Sep 2022 15:29:23 +0100
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 Thunderbird/91.11.0

On 9/12/22 00:04, Paolo Bonzini wrote:
This includes shifts by immediate, which use bits 3-5 of the ModRM byte
as an opcode extension.  With the exception of 128-bit shifts, they are
implemented using gvec.

This also covers VZEROALL and VZEROUPPER, which use the same opcode
as EMMS.  If we were wanting to optimize out gen_clear_ymmh then this
would be one of the starting points.  The implementation of the VZEROALL
and VZEROUPPER helpers is by Paul Brook.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
  target/i386/helper.h             |   7 +
  target/i386/tcg/decode-new.c.inc |  76 ++++++++++
  target/i386/tcg/emit.c.inc       | 232 +++++++++++++++++++++++++++++++
  target/i386/tcg/fpu_helper.c     |  46 ++++++
  target/i386/tcg/translate.c      |   3 +-
  5 files changed, 362 insertions(+), 2 deletions(-)

diff --git a/target/i386/helper.h b/target/i386/helper.h
index 3da5df98b9..d7e6878263 100644
--- a/target/i386/helper.h
+++ b/target/i386/helper.h
@@ -221,6 +221,13 @@ DEF_HELPER_3(movq, void, env, ptr, ptr)
  #define SHIFT 2
  #include "ops_sse_header.h"
+DEF_HELPER_1(vzeroall, void, env)
+DEF_HELPER_1(vzeroupper, void, env)
+#ifdef TARGET_X86_64
+DEF_HELPER_1(vzeroall_hi8, void, env)
+DEF_HELPER_1(vzeroupper_hi8, void, env)
+#endif
+
  DEF_HELPER_3(rclb, tl, env, tl, tl)
  DEF_HELPER_3(rclw, tl, env, tl, tl)
  DEF_HELPER_3(rcll, tl, env, tl, tl)
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index 6aa8bac74f..0e2da85934 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -133,6 +133,19 @@ static uint8_t get_modrm(DisasContext *s, CPUX86State *env)
      return s->modrm;
  }
+static inline const X86OpEntry *decode_by_prefix(DisasContext *s, const X86OpEntry entries[4])
+{
+    if (s->prefix & PREFIX_REPNZ) {
+        return &entries[3];
+    } else if (s->prefix & PREFIX_REPZ) {
+        return &entries[2];
+    } else if (s->prefix & PREFIX_DATA) {
+        return &entries[1];
+    } else {
+        return &entries[0];
+    }
+}

This is the sort of thing I would have expected for some of the other insns for which the distiction was delayed until generation, like SSE4a_{R,I}.

+static void decode_group12_13_14(DisasContext *s, CPUX86State *env, X86OpEntry 
*entry, uint8_t *b)
+{
+    static const X86OpEntry group[3][8] = {
+        {
+            /* grp12 */
+            {},
+            {},
+            X86_OP_ENTRY3(PSRLW_i,  H,x, U,x, I,b, vex7 mmx avx2_256 p_00_66),
+            {},
+            X86_OP_ENTRY3(PSRAW_i,  H,x, U,x, I,b, vex7 mmx avx2_256 p_00_66),
+            {},
+            X86_OP_ENTRY3(PSLLW_i,  H,x, U,x, I,b, vex7 mmx avx2_256 p_00_66),
+            {},
+        },

Why combine these 3 groups?

+    *entry = group[*b - 0x71][op];

Split them and you avoid this magic number.

+static inline void gen_unary_imm_sse(DisasContext *s, CPUX86State *env, 
X86DecodedInsn *decode,
+                                     SSEFunc_0_ppi xmm, SSEFunc_0_ppi ymm)
+{
+    TCGv_i32 imm = tcg_const_i32(decode->immediate);

Use tcg_constant_i32, which need not be freed.

+static void gen_EMMS_VZERO(DisasContext *s, CPUX86State *env, X86DecodedInsn 
*decode)
+{
+    if (!(s->prefix & PREFIX_VEX)) {
+        gen_helper_emms(cpu_env);
+        return;
+    }

Split in decode?  That would make vex8 simpler too.

+static inline TCGv_ptr make_imm_mmx_vec(uint32_t imm)

Unused? Please do drop all of the inline markers, and/or do build testing with clang, which will Werror on this.

+static inline TCGv_ptr make_imm_xmm_vec(uint32_t imm, int vec_len)
+{
+    MemOp ot = vec_len == 16 ? MO_128 : MO_256;
+    TCGv_i32 imm_v = tcg_const_i32(imm);

tcg_constant_i32, however I think this use can go away too.

+static void gen_PSRLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn 
*decode)
+{
+    int vec_len = sse_vec_len(s, decode);
+    TCGv_ptr imm_vec = make_imm_xmm_vec(decode->immediate, vec_len);
+
+    if (s->vex_l) {
+        gen_helper_psrldq_ymm(cpu_env, s->ptr0, s->ptr1, imm_vec);
+    } else {
+        gen_helper_psrldq_xmm(cpu_env, s->ptr0, s->ptr1, imm_vec);
+    }
+    tcg_temp_free_ptr(imm_vec);

Let's just do this inline:

    int shift = decode->immediate * 8;

    if (shift >= 128) {
        zero;
        return;
    }

    for (lane = 0; lane <= s->vex_l; lane++) {
        TCGv_i64 q0 = tcg_temp_new_i64();
        TCGv_i64 q1 = tcg_temp_new_i64();

        tcg_gen_ld_i64(q0, cpu_env, offset + lane * 16 + offsetof(XMMReg, 
MMX_Q(0));
        tcg_gen_ld_i64(q1, ...);

        if (shift >= 64) {
            tcg_gen_shri_i64(q0, q1, shift - 64);
            tcg_gen_movi_i64(q1, 0);
        } else {
            tcg_gen_extract2_i64(q0, q0, q1, shift);
            tcg_gen_shri_i64(q1, q1, shift);
        }

        tcg_gen_st_i64(q0, cpu_env, offset + lane * 16 + offsetof(XMMReg, 
MMX_Q(0));
        tcg_gen_st_i64(q1, ...);
    }


+static void gen_PSLLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn 
*decode)
+{
+    int vec_len = sse_vec_len(s, decode);
+    TCGv_ptr imm_vec = make_imm_xmm_vec(decode->immediate, vec_len);
+
+    if (s->vex_l) {
+        gen_helper_pslldq_ymm(cpu_env, s->ptr0, s->ptr1, imm_vec);
+    } else {
+        gen_helper_pslldq_xmm(cpu_env, s->ptr0, s->ptr1, imm_vec);
+    }
+    tcg_temp_free_ptr(imm_vec);
+}

Similar, but the extract2 becomes

    tcg_gen_extract2_i64(q1, q0, q1, 64 - shift);

+void helper_vzeroall(CPUX86State *env)
+{
+    int i;
+
+    for (i = 0; i < 8; i++) {
+        env->xmm_regs[i].ZMM_Q(0) = 0;
+        env->xmm_regs[i].ZMM_Q(1) = 0;
+        env->xmm_regs[i].ZMM_Q(2) = 0;
+        env->xmm_regs[i].ZMM_Q(3) = 0;
+    }
+}

Better with memset, I think, available as gen_helper_memset().

+#ifdef TARGET_X86_64
+void helper_vzeroall_hi8(CPUX86State *env)
+{
+    int i;
+
+    for (i = 8; i < 16; i++) {
+        env->xmm_regs[i].ZMM_Q(0) = 0;
+        env->xmm_regs[i].ZMM_Q(1) = 0;
+        env->xmm_regs[i].ZMM_Q(2) = 0;
+        env->xmm_regs[i].ZMM_Q(3) = 0;
+    }
+}

Likewise.


+
+void helper_vzeroupper_hi8(CPUX86State *ense_new &&
-            ((b >= 0x150 && b <= 0x16f) ||
-             (b >= 0x178 && b <= 0x17f) ||
+            ((b >= 0x150 && b <= 0x17f) ||
               (b >= 0x1d8 && b <= 0x1ff && (b & 8)))) {
              return disas_insn_new(s, cpu, b + 0x100);
          }

More mailer lossage?


r~



reply via email to

[Prev in Thread] Current Thread [Next in Thread]