[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Stable-7.2.9 10/20] target/i386: Fix 32-bit wrapping of pc/eip computat
From: |
Michael Tokarev |
Subject: |
[Stable-7.2.9 10/20] target/i386: Fix 32-bit wrapping of pc/eip computation |
Date: |
Tue, 23 Jan 2024 11:48:20 +0300 |
From: Richard Henderson <richard.henderson@linaro.org>
In 32-bit mode, pc = eip + cs_base is also 32-bit, and must wrap.
Failure to do so results in incorrect memory exceptions to the guest.
Before 732d548732ed, this was implicitly done via truncation to
target_ulong but only in qemu-system-i386, not qemu-system-x86_64.
To fix this, we must add conditional zero-extensions.
Since we have to test for 32 vs 64-bit anyway, note that cs_base
is always zero in 64-bit mode.
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2022
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20231212172510.103305-1-richard.henderson@linaro.org>
(cherry picked from commit b5e0d5d22fbffc3d8f7d3e86d7a2d05a1a974e27)
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
(Mjt: context fix in target/i386/tcg/tcg-cpu.c for v8.1.0-1190-gb77af26e97
"accel/tcg: Replace CPUState.env_ptr with cpu_env()")
(Mjt: fixup in target/i386/tcg/tcg-cpu.c for v7.2.0-1854-g34a39c2443
"target/i386: Replace `tb_pc()` with `tb->pc`")
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index d4bc19577a..f67cee477a 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -2217,10 +2217,15 @@ static inline int cpu_mmu_index_kernel(CPUX86State *env)
static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
{
- *cs_base = env->segs[R_CS].base;
- *pc = *cs_base + env->eip;
*flags = env->hflags |
(env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
+ if (env->hflags & HF_CS64_MASK) {
+ *cs_base = 0;
+ *pc = env->eip;
+ } else {
+ *cs_base = env->segs[R_CS].base;
+ *pc = (uint32_t)(*cs_base + env->eip);
+ }
}
void do_cpu_init(X86CPU *cpu);
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
index 79ac5908f7..563f42ee4e 100644
--- a/target/i386/tcg/tcg-cpu.c
+++ b/target/i386/tcg/tcg-cpu.c
@@ -52,7 +52,12 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
/* The instruction pointer is always up to date with TARGET_TB_PCREL. */
if (!TARGET_TB_PCREL) {
CPUX86State *env = cs->env_ptr;
- env->eip = tb_pc(tb) - tb->cs_base;
+
+ if (tb->flags & HF_CS64_MASK) {
+ env->eip = tb_pc(tb);
+ } else {
+ env->eip = (uint32_t)(tb_pc(tb) - tb->cs_base);
+ }
}
}
@@ -66,8 +71,10 @@ static void x86_restore_state_to_opc(CPUState *cs,
if (TARGET_TB_PCREL) {
env->eip = (env->eip & TARGET_PAGE_MASK) | data[0];
+ } else if (tb->flags & HF_CS64_MASK) {
+ env->eip = data[0];
} else {
- env->eip = data[0] - tb->cs_base;
+ env->eip = (uint32_t)(data[0] - tb->cs_base);
}
if (cc_op != CC_OP_DYNAMIC) {
env->cc_op = cc_op;
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index 7e0b2a709a..15b030d84b 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -547,8 +547,10 @@ static void gen_update_eip_cur(DisasContext *s)
assert(s->pc_save != -1);
if (TARGET_TB_PCREL) {
tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
+ } else if (CODE64(s)) {
+ tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
} else {
- tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base);
+ tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
}
s->pc_save = s->base.pc_next;
}
@@ -558,8 +560,10 @@ static void gen_update_eip_next(DisasContext *s)
assert(s->pc_save != -1);
if (TARGET_TB_PCREL) {
tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
+ } else if (CODE64(s)) {
+ tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
} else {
- tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base);
+ tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
}
s->pc_save = s->pc;
}
@@ -605,8 +609,10 @@ static TCGv eip_next_tl(DisasContext *s)
TCGv ret = tcg_temp_new();
tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
return ret;
+ } else if (CODE64(s)) {
+ return tcg_constant_tl(s->pc);
} else {
- return tcg_constant_tl(s->pc - s->cs_base);
+ return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
}
}
@@ -617,8 +623,10 @@ static TCGv eip_cur_tl(DisasContext *s)
TCGv ret = tcg_temp_new();
tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
return ret;
+ } else if (CODE64(s)) {
+ return tcg_constant_tl(s->base.pc_next);
} else {
- return tcg_constant_tl(s->base.pc_next - s->cs_base);
+ return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
}
}
@@ -2875,6 +2883,10 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int
diff, int tb_num)
}
}
new_eip &= mask;
+ new_pc = new_eip + s->cs_base;
+ if (!CODE64(s)) {
+ new_pc = (uint32_t)new_pc;
+ }
gen_update_cc_op(s);
set_cc_op(s, CC_OP_DYNAMIC);
@@ -2892,8 +2904,7 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int
diff, int tb_num)
}
}
- if (use_goto_tb &&
- translator_use_goto_tb(&s->base, new_eip + s->cs_base)) {
+ if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
/* jump to same page: we can use a direct jump */
tcg_gen_goto_tb(tb_num);
if (!TARGET_TB_PCREL) {
--
2.39.2
- [Stable-7.2.9 00/20] Patch Round-up for stable 7.2.9, freeze on 2024-01-27, Michael Tokarev, 2024/01/23
- [Stable-7.2.9 09/20] load_elf: fix iterator's type for elf file processing, Michael Tokarev, 2024/01/23
- [Stable-7.2.9 11/20] target/i386: Do not re-compute new pc with CF_PCREL, Michael Tokarev, 2024/01/23
- [Stable-7.2.9 10/20] target/i386: Fix 32-bit wrapping of pc/eip computation,
Michael Tokarev <=
- [Stable-7.2.9 12/20] target/i386: fix incorrect EIP in PC-relative translation blocks, Michael Tokarev, 2024/01/23
- [Stable-7.2.9 13/20] target/i386: pcrel: store low bits of physical address in data[0], Michael Tokarev, 2024/01/23
- [Stable-7.2.9 14/20] hw/pflash: refactor pflash_data_write(), Michael Tokarev, 2024/01/23
- [Stable-7.2.9 15/20] hw/pflash: use ldn_{be,le}_p and stn_{be,le}_p, Michael Tokarev, 2024/01/23
- [Stable-7.2.9 16/20] hw/pflash: implement update buffer for block writes, Michael Tokarev, 2024/01/23
- [Stable-7.2.9 17/20] hw/scsi/esp-pci: use correct address register for PCI DMA transfers, Michael Tokarev, 2024/01/23
- [Stable-7.2.9 18/20] hw/scsi/esp-pci: generate PCI interrupt from separate ESP and PCI sources, Michael Tokarev, 2024/01/23
- [Stable-7.2.9 19/20] hw/scsi/esp-pci: synchronise setting of DMA_STAT_DONE with ESP completion interrupt, Michael Tokarev, 2024/01/23
- [Stable-7.2.9 20/20] hw/scsi/esp-pci: set DMA_STAT_BCMBLT when BLAST command issued, Michael Tokarev, 2024/01/23
- Re: [Stable-7.2.9 00/20] Patch Round-up for stable 7.2.9, freeze on 2024-01-27, Paolo Bonzini, 2024/01/23