[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [linux-user] Fixed Qemu crash using Gdbstub
From: |
Lionel Landwerlin |
Subject: |
[Qemu-devel] [linux-user] Fixed Qemu crash using Gdbstub |
Date: |
Sat, 13 Dec 2008 00:52:24 +0100 |
I know the commit is pretty big... Hope it apply well.
Cpu(s) shared data should probably be better locked.
Regards,
>From 2b3fe65ea3f2ee8dd3efbb52b66a2f4e53b788ea Mon Sep 17 00:00:00 2001
From: Lionel Landwerlin <address@hidden>
Date: Sat, 13 Dec 2008 00:32:04 +0100
Subject: [PATCH] [linux-user] Fixed Qemu crash using Gdbstub
When using gdb with qemu (via gdbstub), if your emulated
application is multithreaded and does a segfault then qemu
crashes.
Qemu crashes because the break/watch points are shared between
cpus. The TAILQ structure which handles the list of break/watch
points is copied inside each CPUState structure. When the last
breakpoint is removed (this happens on a segfault), it is
removed across all cpus but because of the copied TAILQ
structure a same breakpoint can be freed N times with N the
current number of cpus.
Signed-off-by: Lionel Landwerlin <address@hidden>
---
cpu-defs.h | 10 ++--
cpu-exec.c | 12 ++--
exec.c | 59 ++++++++++---------
target-alpha/translate.c | 4 +-
target-arm/translate.c | 4 +-
target-cris/translate.c | 80 +++++++++++++-------------
target-i386/helper.c | 26 ++++----
target-i386/translate.c | 146 +++++++++++++++++++++++-----------------------
target-m68k/translate.c | 4 +-
target-mips/translate.c | 4 +-
target-ppc/translate.c | 4 +-
target-sh4/translate.c | 5 +-
target-sparc/translate.c | 4 +-
13 files changed, 182 insertions(+), 180 deletions(-)
diff --git a/cpu-defs.h b/cpu-defs.h
index ed8c001..17732fa 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -123,9 +123,9 @@ typedef struct CPUTLBEntry {
target_phys_addr_t addend;
#endif
/* padding to get a power of two size */
- uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
- (sizeof(target_ulong) * 3 +
- ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t)
- 1)) +
+ uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
+ (sizeof(target_ulong) * 3 +
+ ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t)
- 1)) +
sizeof(target_phys_addr_t))];
} CPUTLBEntry;
@@ -189,10 +189,10 @@ typedef struct CPUWatchpoint {
\
/* from this point: preserved by CPU reset */ \
/* ice debug support */ \
- TAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \
+ TAILQ_HEAD(breakpoints_head, CPUBreakpoint) *breakpoints; \
int singlestep_enabled; \
\
- TAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
+ TAILQ_HEAD(watchpoints_head, CPUWatchpoint) *watchpoints; \
CPUWatchpoint *watchpoint_hit; \
\
struct GDBRegisterState *gdb_regs; \
diff --git a/cpu-exec.c b/cpu-exec.c
index 9a35a59..8950a0a 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -198,7 +198,7 @@ static void cpu_handle_debug_exception(CPUState *env)
CPUWatchpoint *wp;
if (!env->watchpoint_hit)
- TAILQ_FOREACH(wp, &env->watchpoints, entry)
+ TAILQ_FOREACH(wp, env->watchpoints, entry)
wp->flags &= ~BP_WATCHPOINT_HIT;
if (debug_excp_handler)
@@ -378,10 +378,10 @@ int cpu_exec(CPUState *env1)
do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
next_tb = 0;
} else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
- (((env->hflags2 & HF2_VINTR_MASK) &&
+ (((env->hflags2 & HF2_VINTR_MASK) &&
(env->hflags2 & HF2_HIF_MASK)) ||
- (!(env->hflags2 & HF2_VINTR_MASK) &&
- (env->eflags & IF_MASK &&
+ (!(env->hflags2 & HF2_VINTR_MASK) &&
+ (env->eflags & IF_MASK &&
!(env->hflags & HF_INHIBIT_IRQ_MASK)))))
{
int intno;
svm_check_intercept(SVM_EXIT_INTR);
@@ -396,7 +396,7 @@ int cpu_exec(CPUState *env1)
next_tb = 0;
#if !defined(CONFIG_USER_ONLY)
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
- (env->eflags & IF_MASK) &&
+ (env->eflags & IF_MASK) &&
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
int intno;
/* FIXME: this should respect TPR */
@@ -1485,7 +1485,7 @@ int cpu_signal_handler(int host_signum, void *pinfo,
pc = uc->uc_mcontext.sc_iaoq[0];
/* FIXME: compute is_write */
is_write = 0;
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
+ return handle_cpu_signal(pc, (unsigned long)info->si_addr,
is_write,
&uc->uc_sigmask, puc);
}
diff --git a/exec.c b/exec.c
index 105812f..4c7219a 100644
--- a/exec.c
+++ b/exec.c
@@ -209,21 +209,21 @@ static void map_exec(void *addr, long size)
DWORD old_protect;
VirtualProtect(addr, size,
PAGE_EXECUTE_READWRITE, &old_protect);
-
+
}
#else
static void map_exec(void *addr, long size)
{
unsigned long start, end, page_size;
-
+
page_size = getpagesize();
start = (unsigned long)addr;
start &= ~(page_size - 1);
-
+
end = (unsigned long)addr + size;
end += page_size - 1;
end &= ~(page_size - 1);
-
+
mprotect((void *)start, end - start,
PROT_READ | PROT_WRITE | PROT_EXEC);
}
@@ -273,7 +273,7 @@ static void page_init(void)
(1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
page_set_flags(startaddr & TARGET_PAGE_MASK,
TARGET_PAGE_ALIGN(endaddr),
- PAGE_RESERVED);
+ PAGE_RESERVED);
}
} while (!feof(f));
fclose(f);
@@ -314,7 +314,7 @@ static inline PageDesc *page_find_alloc(target_ulong index)
unsigned long addr = h2g(p);
page_set_flags(addr & TARGET_PAGE_MASK,
TARGET_PAGE_ALIGN(addr + len),
- PAGE_RESERVED);
+ PAGE_RESERVED);
}
#else
p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
@@ -420,7 +420,7 @@ static void code_gen_alloc(unsigned long tb_size)
code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
/* The code gen buffer location may have constraints depending on
the host cpu and OS */
-#if defined(__linux__)
+#if defined(__linux__)
{
int flags;
void *start = NULL;
@@ -467,7 +467,7 @@ static void code_gen_alloc(unsigned long tb_size)
code_gen_buffer_size = (800 * 1024 * 1024);
#endif
code_gen_buffer = mmap(addr, code_gen_buffer_size,
- PROT_WRITE | PROT_READ | PROT_EXEC,
+ PROT_WRITE | PROT_READ | PROT_EXEC,
flags, -1, 0);
if (code_gen_buffer == MAP_FAILED) {
fprintf(stderr, "Could not allocate dynamic translator buffer\n");
@@ -484,7 +484,7 @@ static void code_gen_alloc(unsigned long tb_size)
#endif
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
map_exec(code_gen_prologue, sizeof(code_gen_prologue));
- code_gen_buffer_max_size = code_gen_buffer_size -
+ code_gen_buffer_max_size = code_gen_buffer_size -
code_gen_max_block_size();
code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
@@ -535,6 +535,7 @@ void cpu_exec_init(CPUState *env)
{
CPUState **penv;
int cpu_index;
+ fprintf (stderr, "\n\nin %s\n\n", __func__);
env->next_cpu = NULL;
penv = &first_cpu;
@@ -544,8 +545,10 @@ void cpu_exec_init(CPUState *env)
cpu_index++;
}
env->cpu_index = cpu_index;
- TAILQ_INIT(&env->breakpoints);
- TAILQ_INIT(&env->watchpoints);
+ env->breakpoints = malloc (sizeof (*env->breakpoints));
+ env->watchpoints = malloc (sizeof (*env->watchpoints));
+ TAILQ_INIT(env->breakpoints);
+ TAILQ_INIT(env->watchpoints);
*penv = env;
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
@@ -1329,9 +1332,9 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong
addr, target_ulong len,
/* keep all GDB-injected watchpoints in front */
if (flags & BP_GDB)
- TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
+ TAILQ_INSERT_HEAD(env->watchpoints, wp, entry);
else
- TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
+ TAILQ_INSERT_TAIL(env->watchpoints, wp, entry);
tlb_flush_page(env, addr);
@@ -1347,7 +1350,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong
addr, target_ulong len,
target_ulong len_mask = ~(len - 1);
CPUWatchpoint *wp;
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ TAILQ_FOREACH(wp, env->watchpoints, entry) {
if (addr == wp->vaddr && len_mask == wp->len_mask
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
cpu_watchpoint_remove_by_ref(env, wp);
@@ -1360,7 +1363,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong
addr, target_ulong len,
/* Remove a specific watchpoint by reference. */
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
{
- TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
+ TAILQ_REMOVE(env->watchpoints, watchpoint, entry);
tlb_flush_page(env, watchpoint->vaddr);
@@ -1372,7 +1375,7 @@ void cpu_watchpoint_remove_all(CPUState *env, int mask)
{
CPUWatchpoint *wp, *next;
- TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
+ TAILQ_FOREACH_SAFE(wp, env->watchpoints, entry, next) {
if (wp->flags & mask)
cpu_watchpoint_remove_by_ref(env, wp);
}
@@ -1394,9 +1397,9 @@ int cpu_breakpoint_insert(CPUState *env, target_ulong pc,
int flags,
/* keep all GDB-injected breakpoints in front */
if (flags & BP_GDB)
- TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
+ TAILQ_INSERT_HEAD(env->breakpoints, bp, entry);
else
- TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
+ TAILQ_INSERT_TAIL(env->breakpoints, bp, entry);
breakpoint_invalidate(env, pc);
@@ -1414,7 +1417,7 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc,
int flags)
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == pc && bp->flags == flags) {
cpu_breakpoint_remove_by_ref(env, bp);
return 0;
@@ -1430,10 +1433,8 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong
pc, int flags)
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
{
#if defined(TARGET_HAS_ICE)
- TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
-
+ TAILQ_REMOVE(env->breakpoints, breakpoint, entry);
breakpoint_invalidate(env, breakpoint->pc);
-
qemu_free(breakpoint);
#endif
}
@@ -1444,7 +1445,7 @@ void cpu_breakpoint_remove_all(CPUState *env, int mask)
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp, *next;
- TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
+ TAILQ_FOREACH_SAFE(bp, env->breakpoints, entry, next) {
if (bp->flags & mask)
cpu_breakpoint_remove_by_ref(env, bp);
}
@@ -1672,11 +1673,11 @@ static inline void tlb_flush_jmp_cache(CPUState *env,
target_ulong addr)
/* Discard jump cache entries for any tb which might potentially
overlap the flushed page. */
i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
- memset (&env->tb_jmp_cache[i], 0,
+ memset (&env->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
i = tb_jmp_cache_hash_page(addr);
- memset (&env->tb_jmp_cache[i], 0,
+ memset (&env->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
}
@@ -1981,7 +1982,7 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
code_address = address;
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ TAILQ_FOREACH(wp, env->watchpoints, entry) {
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
iotlb = io_mem_watch + paddr;
/* TODO: The memory case can be optimized by not trapping
@@ -2542,7 +2543,7 @@ static void check_watchpoint(int offset, int len_mask,
int flags)
return;
}
vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ TAILQ_FOREACH(wp, env->watchpoints, entry) {
if ((vaddr == (wp->vaddr & len_mask) ||
(vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
wp->flags |= BP_WATCHPOINT_HIT;
@@ -3267,7 +3268,7 @@ void cpu_io_recompile(CPUState *env, void *retaddr)
tb = tb_find_pc((unsigned long)retaddr);
if (!tb) {
- cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
+ cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
retaddr);
}
n = env->icount_decr.u16.low + tb->icount;
@@ -3345,7 +3346,7 @@ void dump_exec_info(FILE *f,
cpu_fprintf(f, "Translation buffer state:\n");
cpu_fprintf(f, "gen code size %ld/%ld\n",
code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
- cpu_fprintf(f, "TB count %d/%d\n",
+ cpu_fprintf(f, "TB count %d/%d\n",
nb_tbs, code_gen_max_blocks);
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
nb_tbs ? target_code_size / nb_tbs : 0,
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 7e8e644..62aa5f0 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -2363,8 +2363,8 @@ static always_inline void gen_intermediate_code_internal
(CPUState *env,
gen_icount_start();
for (ret = 0; ret == 0;) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == ctx.pc) {
gen_excp(&ctx, EXCP_DEBUG, 0);
break;
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 0650bc3..8ac1f6b 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -8651,8 +8651,8 @@ static inline void
gen_intermediate_code_internal(CPUState *env,
}
#endif
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_set_condexec(dc);
gen_set_pc_im(dc->pc);
diff --git a/target-cris/translate.c b/target-cris/translate.c
index 242ef9c..ae976b1 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -116,7 +116,7 @@ typedef struct DisasContext {
#define JMP_NOJMP 0
#define JMP_DIRECT 1
#define JMP_INDIRECT 2
- int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
+ int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
uint32_t jmp_pc;
int delayed_branch;
@@ -214,9 +214,9 @@ static inline void t_gen_mov_preg_TN(DisasContext *dc, int
r, TCGv tn)
else if (r == PR_SRS)
tcg_gen_andi_tl(cpu_PR[r], tn, 3);
else {
- if (r == PR_PID)
+ if (r == PR_PID)
gen_helper_tlb_flush_pid(tn);
- if (dc->tb_flags & S_FLAG && r == PR_SPC)
+ if (dc->tb_flags & S_FLAG && r == PR_SPC)
gen_helper_spc_write(tn);
else if (r == PR_CCS)
dc->cpustate_changed = 1;
@@ -452,7 +452,7 @@ static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
l1 = gen_new_label();
- /*
+ /*
* d <<= 1
* if (d >= s)
* d -= s;
@@ -483,7 +483,7 @@ static inline void t_gen_addx_carry(DisasContext *dc, TCGv
d)
if (dc->flagx_known) {
if (dc->flags_x) {
TCGv c;
-
+
c = tcg_temp_new();
t_gen_mov_TN_preg(c, PR_CCS);
/* C flag is already at bit 0. */
@@ -505,7 +505,7 @@ static inline void t_gen_addx_carry(DisasContext *dc, TCGv
d)
tcg_gen_shri_tl(x, x, 4);
tcg_gen_and_tl(x, x, c);
- tcg_gen_add_tl(d, d, x);
+ tcg_gen_add_tl(d, d, x);
tcg_temp_free(x);
tcg_temp_free(c);
}
@@ -516,7 +516,7 @@ static inline void t_gen_subx_carry(DisasContext *dc, TCGv
d)
if (dc->flagx_known) {
if (dc->flags_x) {
TCGv c;
-
+
c = tcg_temp_new();
t_gen_mov_TN_preg(c, PR_CCS);
/* C flag is already at bit 0. */
@@ -744,10 +744,10 @@ static void cris_evaluate_flags(DisasContext *dc)
}
if (dc->flagx_known) {
if (dc->flags_x)
- tcg_gen_ori_tl(cpu_PR[PR_CCS],
+ tcg_gen_ori_tl(cpu_PR[PR_CCS],
cpu_PR[PR_CCS], X_FLAG);
else
- tcg_gen_andi_tl(cpu_PR[PR_CCS],
+ tcg_gen_andi_tl(cpu_PR[PR_CCS],
cpu_PR[PR_CCS], ~X_FLAG);
}
@@ -762,9 +762,9 @@ static void cris_cc_mask(DisasContext *dc, unsigned int
mask)
if (!mask) {
dc->update_cc = 0;
return;
- }
+ }
- /* Check if we need to evaluate the condition codes due to
+ /* Check if we need to evaluate the condition codes due to
CC overlaying. */
ovl = (dc->cc_mask ^ mask) & ~mask;
if (ovl) {
@@ -798,7 +798,7 @@ static inline void cris_update_cc_x(DisasContext *dc)
}
/* Update cc prior to executing ALU op. Needs source operands untouched. */
-static void cris_pre_alu_update_cc(DisasContext *dc, int op,
+static void cris_pre_alu_update_cc(DisasContext *dc, int op,
TCGv dst, TCGv src, int size)
{
if (dc->update_cc) {
@@ -822,7 +822,7 @@ static void cris_pre_alu_update_cc(DisasContext *dc, int op,
static inline void cris_update_result(DisasContext *dc, TCGv res)
{
if (dc->update_cc) {
- if (dc->cc_size == 4 &&
+ if (dc->cc_size == 4 &&
(dc->cc_op == CC_OP_SUB
|| dc->cc_op == CC_OP_ADD))
return;
@@ -831,7 +831,7 @@ static inline void cris_update_result(DisasContext *dc,
TCGv res)
}
/* Returns one if the write back stage should execute. */
-static void cris_alu_op_exec(DisasContext *dc, int op,
+static void cris_alu_op_exec(DisasContext *dc, int op,
TCGv dst, TCGv a, TCGv b, int size)
{
/* Emit the ALU insns. */
@@ -1003,19 +1003,19 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int
cond)
switch (cond) {
case CC_EQ:
if (arith_opt || move_opt) {
- /* If cc_result is zero, T0 should be
+ /* If cc_result is zero, T0 should be
non-zero otherwise T0 should be zero. */
int l1;
l1 = gen_new_label();
tcg_gen_movi_tl(cc, 0);
- tcg_gen_brcondi_tl(TCG_COND_NE, cc_result,
+ tcg_gen_brcondi_tl(TCG_COND_NE, cc_result,
0, l1);
tcg_gen_movi_tl(cc, 1);
gen_set_label(l1);
}
else {
cris_evaluate_flags(dc);
- tcg_gen_andi_tl(cc,
+ tcg_gen_andi_tl(cc,
cpu_PR[PR_CCS], Z_FLAG);
}
break;
@@ -1055,7 +1055,7 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int
cond)
if (dc->cc_size == 1)
bits = 7;
else if (dc->cc_size == 2)
- bits = 15;
+ bits = 15;
tcg_gen_shri_tl(cc, cc_result, bits);
tcg_gen_xori_tl(cc, cc, 1);
@@ -1073,7 +1073,7 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int
cond)
if (dc->cc_size == 1)
bits = 7;
else if (dc->cc_size == 2)
- bits = 15;
+ bits = 15;
tcg_gen_shri_tl(cc, cc_result, 31);
}
@@ -1188,7 +1188,7 @@ static void cris_store_direct_jmp(DisasContext *dc)
}
}
-static void cris_prepare_cc_branch (DisasContext *dc,
+static void cris_prepare_cc_branch (DisasContext *dc,
int offset, int cond)
{
/* This helps us re-schedule the micro-code to insns in delay-slots
@@ -1232,7 +1232,7 @@ static void gen_load64(DisasContext *dc, TCGv_i64 dst,
TCGv addr)
tcg_gen_qemu_ld64(dst, addr, mem_index);
}
-static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
+static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
unsigned int size, int sign)
{
int mem_index = cpu_mmu_index(dc->env);
@@ -1407,7 +1407,7 @@ static int dec_prep_move_m(DisasContext *dc, int s_ext,
int memsize,
}
} else
imm = ldl_code(dc->pc + 2);
-
+
tcg_gen_movi_tl(dst, imm);
dc->postinc = 0;
} else {
@@ -2703,7 +2703,7 @@ static unsigned int dec_move_pm(DisasContext *dc)
memsize = preg_sizes[dc->op2];
DIS(fprintf (logfile, "move.%c $p%u, [$r%u%s\n",
- memsize_char(memsize),
+ memsize_char(memsize),
dc->op2, dc->op1, dc->postinc ? "+]" : "]"));
/* prepare store. Address in T0, value in T1. */
@@ -2993,7 +2993,7 @@ static unsigned int dec_rfe_etc(DisasContext *dc)
tcg_gen_movi_tl(env_pc, dc->pc + 2);
/* Breaks start at 16 in the exception vector. */
- t_gen_mov_env_TN(trap_vector,
+ t_gen_mov_env_TN(trap_vector,
tcg_const_tl(dc->op1 + 16));
t_gen_raise_exception(EXCP_BREAK);
dc->is_jmp = DISAS_UPDATE;
@@ -3189,8 +3189,8 @@ static void check_breakpoint(CPUState *env, DisasContext
*dc)
{
CPUBreakpoint *bp;
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == dc->pc) {
cris_evaluate_flags (dc);
tcg_gen_movi_tl(env_pc, dc->pc);
@@ -3210,27 +3210,27 @@ static void check_breakpoint(CPUState *env,
DisasContext *dc)
* to give SW a hint that the exception actually hit on the dslot.
*
* CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
- * the core and any jmp to an odd addresses will mask off that lsb. It is
+ * the core and any jmp to an odd addresses will mask off that lsb. It is
* simply there to let sw know there was an exception on a dslot.
*
* When the software returns from an exception, the branch will re-execute.
* On QEMU care needs to be taken when a branch+delayslot sequence is broken
* and the branch and delayslot dont share pages.
*
- * The TB contaning the branch insn will set up env->btarget and evaluate
- * env->btaken. When the translation loop exits we will note that the branch
+ * The TB contaning the branch insn will set up env->btarget and evaluate
+ * env->btaken. When the translation loop exits we will note that the branch
* sequence is broken and let env->dslot be the size of the branch insn (those
* vary in length).
*
* The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
- * set). It will also expect to have env->dslot setup with the size of the
- * delay slot so that env->pc - env->dslot point to the branch insn. This TB
- * will execute the dslot and take the branch, either to btarget or just one
+ * set). It will also expect to have env->dslot setup with the size of the
+ * delay slot so that env->pc - env->dslot point to the branch insn. This TB
+ * will execute the dslot and take the branch, either to btarget or just one
* insn ahead.
*
- * When exceptions occur, we check for env->dslot in do_interrupt to detect
+ * When exceptions occur, we check for env->dslot in do_interrupt to detect
* broken branch sequences and setup $erp accordingly (i.e let it point to the
- * branch and set lsb). Then env->dslot gets cleared so that the exception
+ * branch and set lsb). Then env->dslot gets cleared so that the exception
* handler can enter. When returning from exceptions (jump $erp) the lsb gets
* masked off and we will reexecute the branch insn.
*
@@ -3299,7 +3299,7 @@ gen_intermediate_code_internal(CPUState *env,
TranslationBlock *tb,
search_pc, dc->pc, dc->ppc,
(unsigned long long)tb->flags,
env->btarget, (unsigned)tb->flags & 7,
- env->pregs[PR_CCS],
+ env->pregs[PR_CCS],
env->pregs[PR_PID], env->pregs[PR_USP],
env->regs[0], env->regs[1], env->regs[2], env->regs[3],
env->regs[4], env->regs[5], env->regs[6], env->regs[7],
@@ -3345,7 +3345,7 @@ gen_intermediate_code_internal(CPUState *env,
TranslationBlock *tb,
gen_io_start();
dc->clear_x = 1;
- insn_len = cris_decoder(dc);
+ insn_len = cris_decoder(dc);
dc->ppc = dc->pc;
dc->pc += insn_len;
if (dc->clear_x)
@@ -3360,12 +3360,12 @@ gen_intermediate_code_internal(CPUState *env,
TranslationBlock *tb,
if (dc->delayed_branch == 0)
{
if (tb->flags & 7)
- t_gen_mov_env_TN(dslot,
+ t_gen_mov_env_TN(dslot,
tcg_const_tl(0));
if (dc->jmp == JMP_DIRECT) {
dc->is_jmp = DISAS_NEXT;
} else {
- t_gen_cc_jmp(env_btarget,
+ t_gen_cc_jmp(env_btarget,
tcg_const_tl(dc->pc));
dc->is_jmp = DISAS_JUMP;
}
@@ -3390,7 +3390,7 @@ gen_intermediate_code_internal(CPUState *env,
TranslationBlock *tb,
gen_io_end();
/* Force an update if the per-tb cpu state has changed. */
if (dc->is_jmp == DISAS_NEXT
- && (dc->cpustate_changed || !dc->flagx_known
+ && (dc->cpustate_changed || !dc->flagx_known
|| (dc->flags_x != (tb->flags & X_FLAG)))) {
dc->is_jmp = DISAS_UPDATE;
tcg_gen_movi_tl(env_pc, npc);
@@ -3539,7 +3539,7 @@ CPUCRISState *cpu_cris_init (const char *cpu_model)
offsetof(CPUState, cc_mask),
"cc_mask");
- env_pc = tcg_global_mem_new(TCG_AREG0,
+ env_pc = tcg_global_mem_new(TCG_AREG0,
offsetof(CPUState, pc),
"pc");
env_btarget = tcg_global_mem_new(TCG_AREG0,
diff --git a/target-i386/helper.c b/target-i386/helper.c
index f2d91df..103bad2 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -34,14 +34,14 @@
//#define DEBUG_MMU
-static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
- uint32_t *ext_features,
- uint32_t *ext2_features,
+static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
+ uint32_t *ext_features,
+ uint32_t *ext2_features,
uint32_t *ext3_features)
{
int i;
/* feature flags taken from "Intel Processor Identification and the CPUID
- * Instruction" and AMD's "CPUID Specification". In cases of disagreement
+ * Instruction" and AMD's "CPUID Specification". In cases of disagreement
* about feature names, the Linux name is used. */
static const char *feature_name[] = {
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
@@ -68,22 +68,22 @@ static void add_flagname_to_bitmaps(char *flagname,
uint32_t *features,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
- for ( i = 0 ; i < 32 ; i++ )
+ for ( i = 0 ; i < 32 ; i++ )
if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
*features |= 1 << i;
return;
}
- for ( i = 0 ; i < 32 ; i++ )
+ for ( i = 0 ; i < 32 ; i++ )
if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
*ext_features |= 1 << i;
return;
}
- for ( i = 0 ; i < 32 ; i++ )
+ for ( i = 0 ; i < 32 ; i++ )
if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
*ext2_features |= 1 << i;
return;
}
- for ( i = 0 ; i < 32 ; i++ )
+ for ( i = 0 ; i < 32 ; i++ )
if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
*ext3_features |= 1 << i;
return;
@@ -125,13 +125,13 @@ static x86_def_t x86_defs[] = {
.family = 6,
.model = 2,
.stepping = 3,
- .features = PPRO_FEATURES |
+ .features = PPRO_FEATURES |
/* these features are needed for Win64 and aren't fully implemented */
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
/* this feature is needed for Solaris and isn't fully implemented */
CPUID_PSE36,
.ext_features = CPUID_EXT_SSE3,
- .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
+ .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
.ext3_features = CPUID_EXT3_SVM,
@@ -1174,7 +1174,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env,
target_ulong addr,
error_code |= PG_ERROR_I_D_MASK;
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
/* cr2 is not modified in case of exceptions */
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
addr);
} else {
env->cr[2] = addr;
@@ -1364,7 +1364,7 @@ static void breakpoint_handler(CPUState *env)
cpu_resume_from_signal(env, NULL);
}
} else {
- TAILQ_FOREACH(bp, &env->breakpoints, entry)
+ TAILQ_FOREACH(bp, env->breakpoints, entry)
if (bp->pc == env->eip) {
if (bp->flags & BP_CPU) {
check_hw_breakpoints(env, 1);
@@ -1575,7 +1575,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
break;
case 0x80000008:
/* virtual & phys address size in low 2 bytes. */
-/* XXX: This value must match the one used in the MMU code. */
+/* XXX: This value must match the one used in the MMU code. */
if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
/* 64 bit processor */
#if defined(USE_KQEMU)
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 423fca3..2ecf029 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -381,7 +381,7 @@ static inline void gen_op_addq_A0_im(int64_t val)
tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
}
#endif
-
+
static void gen_add_A0_im(DisasContext *s, int val)
{
#ifdef TARGET_X86_64
@@ -462,7 +462,7 @@ static inline void gen_op_set_cc_op(int32_t val)
static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
{
tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
- if (shift != 0)
+ if (shift != 0)
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
#ifdef TARGET_X86_64
@@ -504,7 +504,7 @@ static inline void gen_op_movq_A0_reg(int reg)
static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
{
tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
- if (shift != 0)
+ if (shift != 0)
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
}
@@ -661,7 +661,7 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s)
}
}
-static inline void gen_op_movl_T0_Dshift(int ot)
+static inline void gen_op_movl_T0_Dshift(int ot)
{
tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
@@ -953,7 +953,7 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int
b, int l1)
case CC_OP_SUBW:
case CC_OP_SUBL:
case CC_OP_SUBQ:
-
+
size = cc_op - CC_OP_SUBB;
switch(jcc_op) {
case JCC_Z:
@@ -984,28 +984,28 @@ static inline void gen_jcc1(DisasContext *s, int cc_op,
int b, int l1)
switch(size) {
case 0:
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
- tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
0, l1);
break;
case 1:
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
- tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
0, l1);
break;
#ifdef TARGET_X86_64
case 2:
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
- tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
0, l1);
break;
#endif
default:
- tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT,
cpu_cc_dst,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
0, l1);
break;
}
break;
-
+
case JCC_B:
cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
goto fast_jcc_b;
@@ -1037,7 +1037,7 @@ static inline void gen_jcc1(DisasContext *s, int cc_op,
int b, int l1)
}
tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
break;
-
+
case JCC_L:
cond = inv ? TCG_COND_GE : TCG_COND_LT;
goto fast_jcc_l;
@@ -1069,48 +1069,48 @@ static inline void gen_jcc1(DisasContext *s, int cc_op,
int b, int l1)
}
tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
break;
-
+
default:
goto slow_jcc;
}
break;
-
+
/* some jumps are easy to compute */
case CC_OP_ADDB:
case CC_OP_ADDW:
case CC_OP_ADDL:
case CC_OP_ADDQ:
-
+
case CC_OP_ADCB:
case CC_OP_ADCW:
case CC_OP_ADCL:
case CC_OP_ADCQ:
-
+
case CC_OP_SBBB:
case CC_OP_SBBW:
case CC_OP_SBBL:
case CC_OP_SBBQ:
-
+
case CC_OP_LOGICB:
case CC_OP_LOGICW:
case CC_OP_LOGICL:
case CC_OP_LOGICQ:
-
+
case CC_OP_INCB:
case CC_OP_INCW:
case CC_OP_INCL:
case CC_OP_INCQ:
-
+
case CC_OP_DECB:
case CC_OP_DECW:
case CC_OP_DECL:
case CC_OP_DECQ:
-
+
case CC_OP_SHLB:
case CC_OP_SHLW:
case CC_OP_SHLL:
case CC_OP_SHLQ:
-
+
case CC_OP_SARB:
case CC_OP_SARW:
case CC_OP_SARL:
@@ -1129,7 +1129,7 @@ static inline void gen_jcc1(DisasContext *s, int cc_op,
int b, int l1)
default:
slow_jcc:
gen_setcc_slow_T0(s, jcc_op);
- tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
cpu_T[0], 0, l1);
break;
}
@@ -1421,7 +1421,7 @@ static void gen_inc(DisasContext *s1, int ot, int d, int
c)
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
}
-static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
+static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
int is_right, int is_arith)
{
target_ulong mask;
@@ -1463,7 +1463,7 @@ static void gen_shift_rm_T1(DisasContext *s, int ot, int
op1,
gen_op_st_T0_A0(ot + s->mem_index);
else
gen_op_mov_reg_T0(ot, op1);
-
+
/* update eflags if non zero shift */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
@@ -1484,7 +1484,7 @@ static void gen_shift_rm_T1(DisasContext *s, int ot, int
op1,
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
else
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
-
+
gen_set_label(shift_label);
s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
@@ -1496,7 +1496,7 @@ static void gen_shift_rm_im(DisasContext *s, int ot, int
op1, int op2,
int is_right, int is_arith)
{
int mask;
-
+
if (ot == OT_QUAD)
mask = 0x3f;
else
@@ -1531,7 +1531,7 @@ static void gen_shift_rm_im(DisasContext *s, int ot, int
op1, int op2,
gen_op_st_T0_A0(ot + s->mem_index);
else
gen_op_mov_reg_T0(ot, op1);
-
+
/* update eflags if non zero shift */
if (op2 != 0) {
tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
@@ -1552,7 +1552,7 @@ static inline void tcg_gen_lshift(TCGv ret, TCGv arg1,
target_long arg2)
}
/* XXX: add faster immediate case */
-static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
+static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
int is_right)
{
target_ulong mask;
@@ -1586,12 +1586,12 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int
op1,
shifts. */
label1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
-
+
if (ot <= OT_WORD)
tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
else
tcg_gen_mov_tl(cpu_tmp0, t1);
-
+
gen_extu(ot, t0);
tcg_gen_mov_tl(t2, t0);
@@ -1616,7 +1616,7 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int
op1,
} else {
gen_op_mov_reg_v(ot, op1, t0);
}
-
+
/* update eflags */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
@@ -1635,10 +1635,10 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int
op1,
}
tcg_gen_andi_tl(t0, t0, CC_C);
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
-
+
tcg_gen_discard_tl(cpu_cc_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
-
+
gen_set_label(label2);
s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
@@ -1649,7 +1649,7 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int
op1,
}
/* XXX: add faster immediate = 1 case */
-static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
+static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
int is_right)
{
int label1;
@@ -1662,7 +1662,7 @@ static void gen_rotc_rm_T1(DisasContext *s, int ot, int
op1,
gen_op_ld_T0_A0(ot + s->mem_index);
else
gen_op_mov_TN_reg(ot, 0, op1);
-
+
if (is_right) {
switch (ot) {
case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
@@ -1695,13 +1695,13 @@ static void gen_rotc_rm_T1(DisasContext *s, int ot, int
op1,
tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
tcg_gen_discard_tl(cpu_cc_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
-
+
gen_set_label(label1);
s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
}
/* XXX: add faster immediate case */
-static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
+static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
int is_right)
{
int label1, label2, data_bits;
@@ -1735,7 +1735,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot,
int op1,
shifts. */
label1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
-
+
tcg_gen_addi_tl(cpu_tmp5, t2, -1);
if (ot == OT_WORD) {
/* Note: we implement the Intel behaviour for shift count > 16 */
@@ -1746,7 +1746,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot,
int op1,
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
-
+
/* only needed if count > 16, but a test would complicate */
tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
@@ -1760,7 +1760,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot,
int op1,
tcg_gen_shli_tl(t1, t1, 16);
tcg_gen_or_tl(t1, t1, t0);
tcg_gen_ext32u_tl(t1, t1);
-
+
tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(32), cpu_tmp5);
tcg_gen_shr_tl(cpu_tmp6, t1, cpu_tmp0);
@@ -1783,13 +1783,13 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int
ot, int op1,
tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
tcg_gen_shl_tl(t1, t1, cpu_tmp5);
tcg_gen_or_tl(t0, t0, t1);
-
+
} else {
if (ot == OT_LONG)
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
-
+
tcg_gen_shl_tl(t0, t0, t2);
tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
tcg_gen_shr_tl(t1, t1, cpu_tmp5);
@@ -1805,7 +1805,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot,
int op1,
} else {
gen_op_mov_reg_v(ot, op1, t0);
}
-
+
/* update eflags */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
@@ -2234,7 +2234,7 @@ static inline void gen_jcc(DisasContext *s, int b,
if (s->jmp_opt) {
l1 = gen_new_label();
gen_jcc1(s, cc_op, b, l1);
-
+
gen_goto_tb(s, 0, next_eip);
gen_set_label(l1);
@@ -2287,17 +2287,17 @@ static void gen_setcc(DisasContext *s, int b)
static inline void gen_op_movl_T0_seg(int seg_reg)
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,segs[seg_reg].selector));
}
static inline void gen_op_movl_seg_T0_vm(int seg_reg)
{
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
- tcg_gen_st32_tl(cpu_T[0], cpu_env,
+ tcg_gen_st32_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,segs[seg_reg].selector));
tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
- tcg_gen_st_tl(cpu_T[0], cpu_env,
+ tcg_gen_st_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,segs[seg_reg].base));
}
@@ -2600,7 +2600,7 @@ static void gen_interrupt(DisasContext *s, int intno,
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(cur_eip);
- gen_helper_raise_interrupt(tcg_const_i32(intno),
+ gen_helper_raise_interrupt(tcg_const_i32(intno),
tcg_const_i32(next_eip - cur_eip));
s->is_jmp = 3;
}
@@ -3091,7 +3091,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong
pc_start, int rex_r)
#endif
{
gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
@@ -3101,14 +3101,14 @@ static void gen_sse(DisasContext *s, int b,
target_ulong pc_start, int rex_r)
#ifdef TARGET_X86_64
if (s->dflag == 2) {
gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
} else
#endif
{
gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
@@ -3240,13 +3240,13 @@ static void gen_sse(DisasContext *s, int b,
target_ulong pc_start, int rex_r)
case 0x7e: /* movd ea, mm */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
- tcg_gen_ld_i64(cpu_T[0], cpu_env,
+ tcg_gen_ld_i64(cpu_T[0], cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
} else
#endif
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
}
@@ -3254,13 +3254,13 @@ static void gen_sse(DisasContext *s, int b,
target_ulong pc_start, int rex_r)
case 0x17e: /* movd ea, xmm */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
- tcg_gen_ld_i64(cpu_T[0], cpu_env,
+ tcg_gen_ld_i64(cpu_T[0], cpu_env,
offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
} else
#endif
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
}
@@ -3376,7 +3376,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong
pc_start, int rex_r)
break;
case 0x050: /* movmskps */
rm = (modrm & 7) | REX_B(s);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]));
gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
@@ -3384,7 +3384,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong
pc_start, int rex_r)
break;
case 0x150: /* movmskpd */
rm = (modrm & 7) | REX_B(s);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]));
gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
@@ -4521,12 +4521,12 @@ static target_ulong disas_insn(DisasContext *s,
target_ulong pc_start)
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
- tcg_const_i32(dflag),
+ tcg_const_i32(dflag),
tcg_const_i32(s->pc - pc_start));
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
- tcg_const_i32(dflag),
+ tcg_const_i32(dflag),
tcg_const_i32(s->pc - s->cs_base));
}
gen_eob(s);
@@ -4793,7 +4793,7 @@ static target_ulong disas_insn(DisasContext *s,
target_ulong pc_start)
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
gen_helper_cmpxchg16b(cpu_A0);
} else
-#endif
+#endif
{
if (!(s->cpuid_features & CPUID_CX8))
goto illegal_op;
@@ -5368,7 +5368,7 @@ static target_ulong disas_insn(DisasContext *s,
target_ulong pc_start)
gen_helper_fildl_FT0(cpu_tmp2_i32);
break;
case 2:
- tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fldl_FT0(cpu_tmp1_i64);
break;
@@ -5407,7 +5407,7 @@ static target_ulong disas_insn(DisasContext *s,
target_ulong pc_start)
gen_helper_fildl_ST0(cpu_tmp2_i32);
break;
case 2:
- tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fldl_ST0(cpu_tmp1_i64);
break;
@@ -5429,7 +5429,7 @@ static target_ulong disas_insn(DisasContext *s,
target_ulong pc_start)
break;
case 2:
gen_helper_fisttll_ST0(cpu_tmp1_i64);
- tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
break;
case 3:
@@ -5455,7 +5455,7 @@ static target_ulong disas_insn(DisasContext *s,
target_ulong pc_start)
break;
case 2:
gen_helper_fstl_ST0(cpu_tmp1_i64);
- tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
break;
case 3:
@@ -5537,13 +5537,13 @@ static target_ulong disas_insn(DisasContext *s,
target_ulong pc_start)
gen_helper_fpop();
break;
case 0x3d: /* fildll */
- tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fildll_ST0(cpu_tmp1_i64);
break;
case 0x3f: /* fistpll */
gen_helper_fistll_ST0(cpu_tmp1_i64);
- tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fpop();
break;
@@ -5931,7 +5931,7 @@ static target_ulong disas_insn(DisasContext *s,
target_ulong pc_start)
ot = dflag ? OT_LONG : OT_WORD;
gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
gen_op_andl_T0_ffff();
- gen_check_io(s, ot, pc_start - s->cs_base,
+ gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
@@ -6122,7 +6122,7 @@ static target_ulong disas_insn(DisasContext *s,
target_ulong pc_start)
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
- gen_helper_iret_protected(tcg_const_i32(s->dflag),
+ gen_helper_iret_protected(tcg_const_i32(s->dflag),
tcg_const_i32(s->pc - s->cs_base));
s->cc_op = CC_OP_EFLAGS;
}
@@ -6644,7 +6644,7 @@ static target_ulong disas_insn(DisasContext *s,
target_ulong pc_start)
{
TCGv_i32 tmp0;
gen_op_mov_TN_reg(OT_LONG, 0, reg);
-
+
tmp0 = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]);
tcg_gen_bswap_i32(tmp0, tmp0);
@@ -7014,7 +7014,7 @@ static target_ulong disas_insn(DisasContext *s,
target_ulong pc_start)
break;
case 4: /* STGI */
if ((!(s->flags & HF_SVME_MASK) &&
- !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
+ !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
!s->pe)
goto illegal_op;
if (s->cpl != 0) {
@@ -7035,8 +7035,8 @@ static target_ulong disas_insn(DisasContext *s,
target_ulong pc_start)
}
break;
case 6: /* SKINIT */
- if ((!(s->flags & HF_SVME_MASK) &&
- !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
+ if ((!(s->flags & HF_SVME_MASK) &&
+ !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
!s->pe)
goto illegal_op;
gen_helper_skinit();
@@ -7608,8 +7608,8 @@ static inline void
gen_intermediate_code_internal(CPUState *env,
gen_icount_start();
for(;;) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == pc_ptr) {
gen_debug(dc, pc_ptr - dc->cs_base);
break;
diff --git a/target-m68k/translate.c b/target-m68k/translate.c
index bc2fe2b..634f3d8 100644
--- a/target-m68k/translate.c
+++ b/target-m68k/translate.c
@@ -2999,8 +2999,8 @@ gen_intermediate_code_internal(CPUState *env,
TranslationBlock *tb,
do {
pc_offset = dc->pc - pc_start;
gen_throws_exception = NULL;
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_exception(dc, dc->pc, EXCP_DEBUG);
dc->is_jmp = DISAS_JUMP;
diff --git a/target-mips/translate.c b/target-mips/translate.c
index 418b9ef..9ae1e35 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -8286,8 +8286,8 @@ gen_intermediate_code_internal (CPUState *env,
TranslationBlock *tb,
#endif
gen_icount_start();
while (ctx.bstate == BS_NONE) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == ctx.pc) {
save_cpu_state(&ctx, 1);
ctx.bstate = BS_BRANCH;
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index aa85ba7..f6b7eed 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -7765,8 +7765,8 @@ static always_inline void gen_intermediate_code_internal
(CPUState *env,
gen_icount_start();
/* Set env in case of segfault during code fetch */
while (ctx.exception == POWERPC_EXCP_NONE && gen_opc_ptr < gen_opc_end) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == ctx.nip) {
gen_update_nip(&ctx, ctx.nip);
gen_helper_raise_debug();
diff --git a/target-sh4/translate.c b/target-sh4/translate.c
index 287b4a3..e67ebba 100644
--- a/target-sh4/translate.c
+++ b/target-sh4/translate.c
@@ -1798,8 +1798,9 @@ gen_intermediate_code_internal(CPUState * env,
TranslationBlock * tb,
max_insns = CF_COUNT_MASK;
gen_icount_start();
while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ fprintf (stderr, "\n\nin translate\n\n");
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (ctx.pc == bp->pc) {
/* We have hit a breakpoint - make sure PC is up-to-date */
tcg_gen_movi_i32(cpu_pc, ctx.pc);
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index 07b2624..fbe0ded 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -4816,8 +4816,8 @@ static inline void
gen_intermediate_code_internal(TranslationBlock * tb,
max_insns = CF_COUNT_MASK;
gen_icount_start();
do {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == dc->pc) {
if (dc->pc != pc_start)
save_state(dc, cpu_cond);
--
1.5.6.5
- [Qemu-devel] [linux-user] Fixed Qemu crash using Gdbstub,
Lionel Landwerlin <=
- Re: [Qemu-devel] [linux-user] Fixed Qemu crash using Gdbstub, Lionel Landwerlin, 2008/12/12
- [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub, Jan Kiszka, 2008/12/13
- [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub, Jan Kiszka, 2008/12/13
- Re: [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub, Lionel Landwerlin, 2008/12/13
- [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub, Jan Kiszka, 2008/12/13
- Re: [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub, Lionel Landwerlin, 2008/12/13
- [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub, Jan Kiszka, 2008/12/13
- Re: [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub, Lionel Landwerlin, 2008/12/13
- [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub, Jan Kiszka, 2008/12/14
- Re: [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub, Lionel Landwerlin, 2008/12/14