[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v9 60/74] accel/tcg: convert to cpu_interrupt_request
From: |
Robert Foley |
Subject: |
[PATCH v9 60/74] accel/tcg: convert to cpu_interrupt_request |
Date: |
Thu, 21 May 2020 12:39:57 -0400 |
From: "Emilio G. Cota" <address@hidden>
Reviewed-by: Richard Henderson <address@hidden>
Reviewed-by: Alex Bennée <address@hidden>
Signed-off-by: Emilio G. Cota <address@hidden>
Signed-off-by: Robert Foley <address@hidden>
---
accel/tcg/cpu-exec.c | 15 ++++++++-------
accel/tcg/tcg-all.c | 12 +++++++++---
accel/tcg/translate-all.c | 2 +-
3 files changed, 18 insertions(+), 11 deletions(-)
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 099dd83ee0..b549a37847 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -431,7 +431,7 @@ static inline bool cpu_handle_halt_locked(CPUState *cpu)
if (cpu_halted(cpu)) {
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
- if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
+ if ((cpu_interrupt_request(cpu) & CPU_INTERRUPT_POLL)
&& replay_interrupt()) {
X86CPU *x86_cpu = X86_CPU(cpu);
@@ -544,16 +544,17 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
*/
atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
- if (unlikely(atomic_read(&cpu->interrupt_request))) {
+ if (unlikely(cpu_interrupt_request(cpu))) {
int interrupt_request;
+
qemu_mutex_lock_iothread();
- interrupt_request = cpu->interrupt_request;
+ interrupt_request = cpu_interrupt_request(cpu);
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
}
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_DEBUG);
cpu->exception_index = EXCP_DEBUG;
qemu_mutex_unlock_iothread();
return true;
@@ -562,7 +563,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
/* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
replay_interrupt();
- cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT);
cpu_halted_set(cpu, 1);
cpu->exception_index = EXCP_HLT;
qemu_mutex_unlock_iothread();
@@ -599,10 +600,10 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
}
/* The target hook may have updated the 'cpu->interrupt_request';
* reload the 'interrupt_request' value */
- interrupt_request = cpu->interrupt_request;
+ interrupt_request = cpu_interrupt_request(cpu);
}
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_EXITTB);
/* ensure that no TB jump will be modified as
the program flow was changed */
*last_tb = NULL;
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
index 3b4fda5640..5eda24d87b 100644
--- a/accel/tcg/tcg-all.c
+++ b/accel/tcg/tcg-all.c
@@ -52,10 +52,16 @@ typedef struct TCGState {
static void tcg_handle_interrupt(CPUState *cpu, int mask)
{
int old_mask;
- g_assert(qemu_mutex_iothread_locked());
- old_mask = cpu->interrupt_request;
- cpu->interrupt_request |= mask;
+ if (!cpu_mutex_locked(cpu)) {
+ cpu_mutex_lock(cpu);
+ old_mask = cpu_interrupt_request(cpu);
+ cpu_interrupt_request_or(cpu, mask);
+ cpu_mutex_unlock(cpu);
+ } else {
+ old_mask = cpu_interrupt_request(cpu);
+ cpu_interrupt_request_or(cpu, mask);
+ }
/*
* If called from iothread context, wake the target cpu in
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 42ce1dfcff..af8ecf3380 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -2392,7 +2392,7 @@ void dump_opcount_info(void)
void cpu_interrupt(CPUState *cpu, int mask)
{
g_assert(qemu_mutex_iothread_locked());
- cpu->interrupt_request |= mask;
+ cpu_interrupt_request_or(cpu, mask);
atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
}
--
2.17.1
- [PATCH v9 50/74] m68k: convert to cpu_interrupt_request, (continued)
- [PATCH v9 50/74] m68k: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 51/74] mips: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 53/74] s390x: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 52/74] nios: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 55/74] moxie: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 56/74] sparc: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 58/74] unicore32: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 57/74] openrisc: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 54/74] alpha: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 61/74] cpu: convert to interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 60/74] accel/tcg: convert to cpu_interrupt_request,
Robert Foley <=
- [PATCH v9 62/74] cpu: call .cpu_has_work with the CPU lock held, Robert Foley, 2020/05/21
- [PATCH v9 59/74] microblaze: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 63/74] cpu: introduce cpu_has_work_with_iothread_lock, Robert Foley, 2020/05/21
- [PATCH v9 64/74] ppc: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/05/21
- [PATCH v9 65/74] mips: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/05/21
- [PATCH v9 67/74] riscv: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/05/21
- [PATCH v9 66/74] s390x: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/05/21
- [PATCH v9 68/74] sparc: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/05/21
- [PATCH v9 69/74] xtensa: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/05/21