[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH for-next 8/8] tcg-arm: Rearrange slow-path qemu_
From: |
Aurelien Jarno |
Subject: |
Re: [Qemu-devel] [PATCH for-next 8/8] tcg-arm: Rearrange slow-path qemu_ld/st |
Date: |
Fri, 16 Aug 2013 10:36:05 +0200 |
User-agent: |
Mutt/1.5.21 (2010-09-15) |
On Mon, Aug 05, 2013 at 08:07:25AM -1000, Richard Henderson wrote:
> Instead of using a branch-call-branch sequence, arrange for a
> call-branch sequence, using the ARM's conditional call insn.
> This reduces the size of the slow-path within the TB, and makes
> the GETPC_EXT implementation identical for TCG and not-TCG.
>
> Signed-off-by: Richard Henderson <address@hidden>
> ---
> include/exec/exec-all.h | 23 +----
> tcg/arm/tcg-target.c | 269
> +++++++++++++++++++++++-------------------------
> 2 files changed, 133 insertions(+), 159 deletions(-)
>
> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
> index b70028a..b3402a1 100644
> --- a/include/exec/exec-all.h
> +++ b/include/exec/exec-all.h
> @@ -333,23 +333,8 @@ extern uintptr_t tci_tb_ptr;
> # define GETRA() ((uintptr_t)__builtin_return_address(0))
> # define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1))
> # elif defined(__arm__)
> -/* We define two insns between the return address and the branch back to
> - straight-line. Find and decode that branch insn. */
> -# define GETRA() ((uintptr_t)__builtin_return_address(0))
> -# define GETPC_LDST() tcg_getpc_ldst(GETRA())
> -static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
> -{
> - int32_t b;
> - ra += 8; /* skip the two insns */
> - b = *(int32_t *)ra; /* load the branch insn */
> - b = (b << 8) >> (8 - 2); /* extract the displacement */
> - ra += 8; /* branches are relative to pc+8 */
> - ra += b; /* apply the displacement */
> - ra -= 4; /* return a pointer into the current opcode,
> - not the start of the next opcode */
> - return ra;
> -}
> -#elif defined(__aarch64__)
> +# define GETPC_EXT() GETPC()
> +# elif defined(__aarch64__)
> # define GETRA() ((uintptr_t)__builtin_return_address(0))
> # define GETPC_LDST() tcg_getpc_ldst(GETRA())
> static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
> @@ -367,7 +352,9 @@ static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
> # error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
> # endif
> bool is_tcg_gen_code(uintptr_t pc_ptr);
> -# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
> +# ifndef GETPC_EXT
> +# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
> +# endif
> #else
> # define GETPC_EXT() GETPC()
> #endif
> diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c
> index 9a14a20..89917d6 100644
> --- a/tcg/arm/tcg-target.c
> +++ b/tcg/arm/tcg-target.c
> @@ -166,10 +166,26 @@ static int target_parse_constraint(TCGArgConstraint
> *ct, const char **pct_str)
> break;
>
> case 'r':
> +#ifndef CONFIG_SOFTMMU
> + case 'a':
> + case 'b':
> +#endif
> ct->ct |= TCG_CT_REG;
> tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
> break;
>
> +#ifdef CONFIG_SOFTMMU
> + /* qemu_ld data registers -- softmmu uses the return registers only. */
> + case 'a':
> + ct->ct |= TCG_CT_REG;
> + tcg_regset_set32(ct->u.regs, 0, 1);
> + break;
> + case 'b':
> + ct->ct |= TCG_CT_REG;
> + tcg_regset_set32(ct->u.regs, 0, 2);
> + break;
> +#endif
> +
> /* qemu_ld address */
> case 'l':
> ct->ct |= TCG_CT_REG;
> @@ -182,15 +198,6 @@ static int target_parse_constraint(TCGArgConstraint *ct,
> const char **pct_str)
> tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
> #endif
> break;
> - case 'L':
> - ct->ct |= TCG_CT_REG;
> - tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
> -#ifdef CONFIG_SOFTMMU
> - /* r1 is still needed to load data_reg or data_reg2,
> - so don't use it. */
> - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
> -#endif
> - break;
>
> /* qemu_st address & data_reg */
> case 's':
> @@ -382,13 +389,17 @@ static inline void tcg_out_b_noaddr(TCGContext *s, int
> cond)
> /* We pay attention here to not modify the branch target by skipping
> the corresponding bytes. This ensure that caches and memory are
> kept coherent during retranslation. */
> -#ifdef HOST_WORDS_BIGENDIAN
> - tcg_out8(s, (cond << 4) | 0x0a);
> - s->code_ptr += 3;
> -#else
> s->code_ptr += 3;
> tcg_out8(s, (cond << 4) | 0x0a);
> -#endif
> +}
> +
> +static inline void tcg_out_bl_noaddr(TCGContext *s, int cond)
> +{
> + /* We pay attention here to not modify the branch target by skipping
> + the corresponding bytes. This ensure that caches and memory are
> + kept coherent during retranslation. */
> + s->code_ptr += 3;
> + tcg_out8(s, (cond << 4) | 0x0b);
> }
>
> static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
> @@ -1002,34 +1013,27 @@ static inline void tcg_out_st8(TCGContext *s, int
> cond,
> tcg_out_st8_12(s, cond, rd, rn, offset);
> }
>
> -/* The _goto case is normally between TBs within the same code buffer,
> - * and with the code buffer limited to 16MB we shouldn't need the long
> - * case.
> - *
> - * .... except to the prologue that is in its own buffer.
> +/* The _goto case is normally between TBs within the same code buffer, and
> + * with the code buffer limited to 16MB we wouldn't need the long case.
> + * But we also use it for the tail-call to the qemu_ld/st helpers, which
> does.
> */
> static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
> {
> - int32_t val;
> + int32_t disp = addr - (tcg_target_long) s->code_ptr;
>
> - if (addr & 1) {
> - /* goto to a Thumb destination isn't supported */
> - tcg_abort();
> + if ((addr & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
> + tcg_out_b(s, cond, disp);
> + return;
> }
>
> - val = addr - (tcg_target_long) s->code_ptr;
> - if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
> - tcg_out_b(s, cond, val);
> - else {
> - if (cond == COND_AL) {
> - tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
> - tcg_out32(s, addr);
> - } else {
> - tcg_out_movi32(s, cond, TCG_REG_TMP, val - 8);
> - tcg_out_dat_reg(s, cond, ARITH_ADD,
> - TCG_REG_PC, TCG_REG_PC,
> - TCG_REG_TMP, SHIFT_IMM_LSL(0));
> + tcg_out_movi32(s, cond, TCG_REG_TMP, addr);
> + if (use_armv5t_instructions) {
> + tcg_out_bx(s, cond, TCG_REG_TMP);
> + } else {
> + if (addr & 1) {
> + tcg_abort();
> }
> + tcg_out_mov_reg(s, cond, TCG_REG_PC, TCG_REG_TMP);
> }
> }
>
> @@ -1087,17 +1091,44 @@ static inline void tcg_out_goto_label(TCGContext *s,
> int cond, int label_index)
>
> #include "exec/softmmu_defs.h"
>
> +static uint32_t arm_ldbu_mmu(CPUState *env, target_ulong addr, int idx)
> +{
> + return (uint8_t)helper_ret_ldb_mmu(env, addr, idx, GETPC());
> +}
> +
> +static uint32_t arm_ldbs_mmu(CPUState *env, target_ulong addr, int idx)
> +{
> + return (int8_t)helper_ret_ldb_mmu(env, addr, idx, GETPC());
> +}
> +
> +static uint32_t arm_ldwu_mmu(CPUState *env, target_ulong addr, int idx)
> +{
> + return (uint16_t)helper_ret_ldw_mmu(env, addr, idx, GETPC());
> +}
> +
> +static uint32_t arm_ldws_mmu(CPUState *env, target_ulong addr, int idx)
> +{
> + return (int16_t)helper_ret_ldw_mmu(env, addr, idx, GETPC());
> +}
> +
> /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
> - int mmu_idx) */
> -static const void * const qemu_ld_helpers[4] = {
> - helper_ldb_mmu,
> - helper_ldw_mmu,
> + * int mmu_idx)
> + */
> +static const void * const qemu_ld_helpers[8] = {
> + arm_ldbu_mmu,
> + arm_ldwu_mmu,
> + helper_ldl_mmu,
> + helper_ldq_mmu,
> +
> + arm_ldbs_mmu,
> + arm_ldws_mmu,
> helper_ldl_mmu,
> helper_ldq_mmu,
> };
>
> /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
> - uintxx_t val, int mmu_idx) */
> + * uintxx_t val, int mmu_idx)
> + */
> static const void * const qemu_st_helpers[4] = {
> helper_stb_mmu,
> helper_stw_mmu,
> @@ -1260,8 +1291,7 @@ static void add_qemu_ldst_label(TCGContext *s, int
> is_ld, int opc,
>
> static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
> {
> - TCGReg argreg, data_reg, data_reg2;
> - uint8_t *start;
> + TCGReg argreg;
>
> reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
>
> @@ -1272,59 +1302,18 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s,
> TCGLabelQemuLdst *lb)
> argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
> }
> argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
> - tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[lb->opc & 3]);
> -
> - data_reg = lb->datalo_reg;
> - data_reg2 = lb->datahi_reg;
> -
> - start = s->code_ptr;
> - switch (lb->opc) {
> - case 0 | 4:
> - tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
> - break;
> - case 1 | 4:
> - tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
> - break;
> - case 0:
> - case 1:
> - case 2:
> - default:
> - tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
> - break;
> - case 3:
> - tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
> - tcg_out_mov_reg(s, COND_AL, data_reg2, TCG_REG_R1);
> - break;
> - }
>
> - /* For GETPC_LDST in exec-all.h, we architect exactly 2 insns between
> - the call and the branch back to straight-line code. Note that the
> - moves above could be elided by register allocation, nor do we know
> - which code alternative we chose for extension. */
> - switch (s->code_ptr - start) {
> - case 0:
> - tcg_out_nop(s);
> - /* FALLTHRU */
> - case 4:
> - tcg_out_nop(s);
> - /* FALLTHRU */
> - case 8:
> - break;
> - default:
> - abort();
> - }
> -
> - tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr);
> + /* Tail-call to the helper, which will return to the fast path. */
> + tcg_out_goto(s, COND_AL, (tcg_target_long) qemu_ld_helpers[lb->opc]);
> }
>
> static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
> {
> - TCGReg argreg, data_reg, data_reg2;
> + TCGReg argreg;
>
> reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
>
> - argreg = TCG_REG_R0;
> - argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
> + argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
> if (TARGET_LONG_BITS == 64) {
> argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg,
> lb->addrhi_reg);
> } else {
> @@ -1349,13 +1338,9 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s,
> TCGLabelQemuLdst *lb)
> }
>
> argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
> - tcg_out_call(s, (tcg_target_long) qemu_st_helpers[lb->opc & 3]);
>
> - /* For GETPC_LDST in exec-all.h, we architect exactly 2 insns between
> - the call and the branch back to straight-line code. */
> - tcg_out_nop(s);
> - tcg_out_nop(s);
> - tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr);
> + /* Tail-call to the helper, which will return to the fast path. */
> + tcg_out_goto(s, COND_AL, (tcg_target_long) qemu_st_helpers[lb->opc & 3]);
> }
> #endif /* SOFTMMU */
>
> @@ -1385,57 +1370,58 @@ static void tcg_out_qemu_ld(TCGContext *s, const
> TCGArg *args, int opc)
> tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits,
> offsetof(CPUArchState,
> tlb_table[mem_index][0].addr_read));
>
> - label_ptr = s->code_ptr;
> - tcg_out_b_noaddr(s, COND_NE);
> -
> tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2,
> offsetof(CPUTLBEntry, addend)
> - offsetof(CPUTLBEntry, addr_read));
>
> switch (opc) {
> case 0:
> - tcg_out_ld8_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
> + tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
> break;
> case 0 | 4:
> - tcg_out_ld8s_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
> + tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
> break;
> case 1:
> - tcg_out_ld16u_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
> + tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
> if (bswap) {
> - tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
> + tcg_out_bswap16(s, COND_EQ, data_reg, data_reg);
> }
> break;
> case 1 | 4:
> if (bswap) {
> - tcg_out_ld16u_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
> - tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
> + tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
> + tcg_out_bswap16s(s, COND_EQ, data_reg, data_reg);
> } else {
> - tcg_out_ld16s_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
> + tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
> }
> break;
> case 2:
> default:
> - tcg_out_ld32_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
> + tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
> if (bswap) {
> - tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
> + tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
> }
> break;
> case 3:
> if (bswap) {
> - tcg_out_ld32_rwb(s, COND_AL, data_reg2, TCG_REG_R1, addr_reg);
> - tcg_out_ld32_12(s, COND_AL, data_reg, TCG_REG_R1, 4);
> - tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
> - tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
> + tcg_out_ld32_rwb(s, COND_EQ, data_reg2, TCG_REG_R1, addr_reg);
> + tcg_out_ld32_12(s, COND_EQ, data_reg, TCG_REG_R1, 4);
> + tcg_out_bswap32(s, COND_EQ, data_reg2, data_reg2);
> + tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
> } else if (use_armv6_instructions
> && (data_reg & 1) == 0 && data_reg2 == data_reg + 1) {
> - tcg_out_ldrd_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
> + tcg_out_ldrd_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
> } else {
> - tcg_out_ld32_rwb(s, COND_AL, data_reg, TCG_REG_R1, addr_reg);
> - tcg_out_ld32_12(s, COND_AL, data_reg2, TCG_REG_R1, 4);
> + tcg_out_ld32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
> + tcg_out_ld32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
> }
> break;
> }
>
> + /* The conditional call must come last, as we're going to return here.
> */
> + label_ptr = s->code_ptr;
> + tcg_out_bl_noaddr(s, COND_NE);
> +
> add_qemu_ldst_label(s, 1, opc, data_reg, data_reg2, addr_reg, addr_reg2,
> mem_index, s->code_ptr, label_ptr);
> #else /* !CONFIG_SOFTMMU */
> @@ -1531,50 +1517,51 @@ static void tcg_out_qemu_st(TCGContext *s, const
> TCGArg *args, int opc)
> offsetof(CPUArchState,
> tlb_table[mem_index][0].addr_write));
>
> - label_ptr = s->code_ptr;
> - tcg_out_b_noaddr(s, COND_NE);
> -
> tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2,
> offsetof(CPUTLBEntry, addend)
> - offsetof(CPUTLBEntry, addr_write));
>
> switch (opc) {
> case 0:
> - tcg_out_st8_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
> + tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
> break;
> case 1:
> if (bswap) {
> - tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, data_reg);
> - tcg_out_st16_r(s, COND_AL, TCG_REG_R0, addr_reg, TCG_REG_R1);
> + tcg_out_bswap16st(s, COND_EQ, TCG_REG_R0, data_reg);
> + tcg_out_st16_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
> } else {
> - tcg_out_st16_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
> + tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
> }
> break;
> case 2:
> default:
> if (bswap) {
> - tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
> - tcg_out_st32_r(s, COND_AL, TCG_REG_R0, addr_reg, TCG_REG_R1);
> + tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
> + tcg_out_st32_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
> } else {
> - tcg_out_st32_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
> + tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
> }
> break;
> case 3:
> if (bswap) {
> - tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
> - tcg_out_st32_rwb(s, COND_AL, TCG_REG_R0, TCG_REG_R1, addr_reg);
> - tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
> - tcg_out_st32_12(s, COND_AL, TCG_REG_R0, TCG_REG_R1, 4);
> + tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg2);
> + tcg_out_st32_rwb(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, addr_reg);
> + tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
> + tcg_out_st32_12(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, 4);
> } else if (use_armv6_instructions
> && (data_reg & 1) == 0 && data_reg2 == data_reg + 1) {
> - tcg_out_strd_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
> + tcg_out_strd_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
> } else {
> - tcg_out_st32_rwb(s, COND_AL, data_reg, TCG_REG_R1, addr_reg);
> - tcg_out_st32_12(s, COND_AL, data_reg2, TCG_REG_R1, 4);
> + tcg_out_st32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
> + tcg_out_st32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
> }
> break;
> }
>
> + /* The conditional call must come last, as we're going to return here.
> */
> + label_ptr = s->code_ptr;
> + tcg_out_bl_noaddr(s, COND_NE);
> +
> add_qemu_ldst_label(s, 0, opc, data_reg, data_reg2, addr_reg, addr_reg2,
> mem_index, s->code_ptr, label_ptr);
> #else /* !CONFIG_SOFTMMU */
> @@ -2026,24 +2013,24 @@ static const TCGTargetOpDef arm_op_defs[] = {
> { INDEX_op_setcond2_i32, { "r", "r", "r", "rIN", "rIN" } },
>
> #if TARGET_LONG_BITS == 32
> - { INDEX_op_qemu_ld8u, { "r", "l" } },
> - { INDEX_op_qemu_ld8s, { "r", "l" } },
> - { INDEX_op_qemu_ld16u, { "r", "l" } },
> - { INDEX_op_qemu_ld16s, { "r", "l" } },
> - { INDEX_op_qemu_ld32, { "r", "l" } },
> - { INDEX_op_qemu_ld64, { "L", "L", "l" } },
> + { INDEX_op_qemu_ld8u, { "a", "l" } },
> + { INDEX_op_qemu_ld8s, { "a", "l" } },
> + { INDEX_op_qemu_ld16u, { "a", "l" } },
> + { INDEX_op_qemu_ld16s, { "a", "l" } },
> + { INDEX_op_qemu_ld32, { "a", "l" } },
> + { INDEX_op_qemu_ld64, { "a", "b", "l" } },
>
> { INDEX_op_qemu_st8, { "s", "s" } },
> { INDEX_op_qemu_st16, { "s", "s" } },
> { INDEX_op_qemu_st32, { "s", "s" } },
> { INDEX_op_qemu_st64, { "s", "s", "s" } },
> #else
> - { INDEX_op_qemu_ld8u, { "r", "l", "l" } },
> - { INDEX_op_qemu_ld8s, { "r", "l", "l" } },
> - { INDEX_op_qemu_ld16u, { "r", "l", "l" } },
> - { INDEX_op_qemu_ld16s, { "r", "l", "l" } },
> - { INDEX_op_qemu_ld32, { "r", "l", "l" } },
> - { INDEX_op_qemu_ld64, { "L", "L", "l", "l" } },
> + { INDEX_op_qemu_ld8u, { "a", "l", "l" } },
> + { INDEX_op_qemu_ld8s, { "a", "l", "l" } },
> + { INDEX_op_qemu_ld16u, { "a", "l", "l" } },
> + { INDEX_op_qemu_ld16s, { "a", "l", "l" } },
> + { INDEX_op_qemu_ld32, { "a", "l", "l" } },
> + { INDEX_op_qemu_ld64, { "a", "b", "l", "l" } },
>
> { INDEX_op_qemu_st8, { "s", "s", "s" } },
> { INDEX_op_qemu_st16, { "s", "s", "s" } },
I am not able to build at least the mips-softmmu target with this patch
applied:
| /home/aurel32/qemu/tcg/arm/tcg-target.c: In function ‘arm_ldbu_mmu’:
| /home/aurel32/qemu/tcg/arm/tcg-target.c:1096:5: error: passing argument 1 of
‘helper_ret_ldb_mmu’ from incompatible pointer type [-Werror]
| /home/aurel32/qemu/include/exec/softmmu_defs.h:12:9: note: expected ‘struct
CPUMIPSState *’ but argument is of type ‘struct CPUState *’
| /home/aurel32/qemu/tcg/arm/tcg-target.c: In function ‘arm_ldbs_mmu’:
| /home/aurel32/qemu/tcg/arm/tcg-target.c:1101:5: error: passing argument 1 of
‘helper_ret_ldb_mmu’ from incompatible pointer type [-Werror]
| /home/aurel32/qemu/include/exec/softmmu_defs.h:12:9: note: expected ‘struct
CPUMIPSState *’ but argument is of type ‘struct CPUState *’
| /home/aurel32/qemu/tcg/arm/tcg-target.c: In function ‘arm_ldwu_mmu’:
| /home/aurel32/qemu/tcg/arm/tcg-target.c:1106:5: error: passing argument 1 of
‘helper_ret_ldw_mmu’ from incompatible pointer type [-Werror]
| /home/aurel32/qemu/include/exec/softmmu_defs.h:14:10: note: expected ‘struct
CPUMIPSState *’ but argument is of type ‘struct CPUState *’
| /home/aurel32/qemu/tcg/arm/tcg-target.c: In function ‘arm_ldws_mmu’:
| /home/aurel32/qemu/tcg/arm/tcg-target.c:1111:5: error: passing argument 1 of
‘helper_ret_ldw_mmu’ from incompatible pointer type [-Werror]
| /home/aurel32/qemu/include/exec/softmmu_defs.h:14:10: note: expected ‘struct
CPUMIPSState *’ but argument is of type ‘struct CPUState *’
| In file included from /home/aurel32/qemu/tcg/tcg.c:198:0:
| /home/aurel32/qemu/tcg/arm/tcg-target.c: In function
‘tcg_out_qemu_st_slow_path’:
| /home/aurel32/qemu/tcg/arm/tcg-target.c:1323:5: error: ‘data_reg’ undeclared
(first use in this function)
| /home/aurel32/qemu/tcg/arm/tcg-target.c:1323:5: note: each undeclared
identifier is reported only once for each function it appears in
| /home/aurel32/qemu/tcg/arm/tcg-target.c:1324:5: error: ‘data_reg2’ undeclared
(first use in this function)
--
Aurelien Jarno GPG: 1024D/F1BCDB73
address@hidden http://www.aurel32.net