[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v2 09/11] Use per-cpu reset handlers.
From: |
Glauber Costa |
Subject: |
[Qemu-devel] [PATCH v2 09/11] Use per-cpu reset handlers. |
Date: |
Tue, 1 Dec 2009 10:51:35 -0200 |
The proposal in this patch is to add a system_reset caller that only
resets state related to the cpu. This will guarantee that does functions
are called from the cpu-threads, not the I/O thread.
In principle, it might seem close to the remote execution mechanism, but:
* It does not involve any extra signalling, so it should be faster.
* The cpu is guaranteed to be stopped, so it is much less racy.
* What runs where becomes more explicit.
* This is much, much less racy
The previous implementation was giving me races on reset. This one makes
it work flawlesly w.r.t reset.
Signed-off-by: Glauber Costa <address@hidden>
---
cpu-defs.h | 2 ++
exec-all.h | 12 ++++++++++++
exec.c | 32 ++++++++++++++++++++++++++++++++
hw/apic-kvm.c | 16 ++++++++--------
kvm-all.c | 7 +++----
vl.c | 10 ++++++++++
6 files changed, 67 insertions(+), 12 deletions(-)
diff --git a/cpu-defs.h b/cpu-defs.h
index 18792fc..37fd11c 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -185,6 +185,8 @@ typedef struct QemuWorkItem {
CPUWatchpoint *watchpoint_hit; \
\
QemuWorkItem queued_work; \
+ int reset_requested; \
+ QTAILQ_HEAD(reset_head, CPUResetEntry) reset_handlers; \
uint64_t queued_local, queued_total; \
struct QemuMutex queue_lock; \
\
diff --git a/exec-all.h b/exec-all.h
index 820b59e..5acf363 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -78,6 +78,18 @@ void cpu_io_recompile(CPUState *env, void *retaddr);
TranslationBlock *tb_gen_code(CPUState *env,
target_ulong pc, target_ulong cs_base, int flags,
int cflags);
+
+typedef void CPUResetHandler(CPUState *env);
+typedef struct CPUResetEntry {
+ QTAILQ_ENTRY(CPUResetEntry) entry;
+ CPUResetHandler *func;
+ void *opaque;
+} CPUResetEntry;
+
+void qemu_register_reset_cpu(CPUState *env, CPUResetHandler *func);
+void qemu_unregister_reset_cpu(CPUState *env, CPUResetHandler *func);
+
+void qemu_cpu_reset(CPUState *env);
void cpu_exec_init(CPUState *env);
void QEMU_NORETURN cpu_loop_exit(void);
int page_unprotect(target_ulong address, unsigned long pc, void *puc);
diff --git a/exec.c b/exec.c
index eb1ee51..e06d862 100644
--- a/exec.c
+++ b/exec.c
@@ -588,6 +588,7 @@ void cpu_exec_init(CPUState *env)
env->numa_node = 0;
QTAILQ_INIT(&env->breakpoints);
QTAILQ_INIT(&env->watchpoints);
+ QTAILQ_INIT(&env->reset_handlers);
*penv = env;
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
@@ -599,6 +600,37 @@ void cpu_exec_init(CPUState *env)
#endif
}
+void qemu_register_reset_cpu(CPUState *env, CPUResetHandler *func)
+{
+ CPUResetEntry *re = qemu_mallocz(sizeof(CPUResetEntry));
+
+ re->func = func;
+ re->opaque = env;
+ QTAILQ_INSERT_TAIL(&env->reset_handlers, re, entry);
+}
+
+void qemu_unregister_reset_cpu(CPUState *env, CPUResetHandler *func)
+{
+ CPUResetEntry *re;
+
+ QTAILQ_FOREACH(re, &env->reset_handlers, entry) {
+ if (re->func == func && re->opaque == env) {
+ QTAILQ_REMOVE(&env->reset_handlers, re, entry);
+ qemu_free(re);
+ return;
+ }
+ }
+}
+
+void qemu_cpu_reset(CPUState *env)
+{
+ CPUResetEntry *re, *nre;
+ /* reset all devices */
+ QTAILQ_FOREACH_SAFE(re, &env->reset_handlers, entry, nre) {
+ re->func(re->opaque);
+ }
+}
+
static inline void invalidate_page_bitmap(PageDesc *p)
{
if (p->code_bitmap) {
diff --git a/hw/apic-kvm.c b/hw/apic-kvm.c
index b2864b6..91a77d0 100644
--- a/hw/apic-kvm.c
+++ b/hw/apic-kvm.c
@@ -91,20 +91,20 @@ static const VMStateDescription vmstate_apic = {
.post_load = apic_post_load,
};
-static void kvm_apic_reset(void *opaque)
+static void kvm_apic_reset(CPUState *env)
{
- APICState *s = opaque;
+ APICState *s = env->apic_state;
int bsp;
int i;
- cpu_synchronize_state(s->cpu_env);
+ cpu_synchronize_state(env);
- bsp = cpu_is_bsp(s->cpu_env);
+ bsp = cpu_is_bsp(env);
s->dev.apicbase = 0xfee00000 |
(bsp ? MSR_IA32_APICBASE_BSP : 0) | MSR_IA32_APICBASE_ENABLE;
- cpu_reset(s->cpu_env);
+ cpu_reset(env);
s->dev.tpr = 0;
s->dev.spurious_vec = 0xff;
@@ -125,7 +125,7 @@ static void kvm_apic_reset(void *opaque)
s->dev.wait_for_sipi = 1;
- s->cpu_env->mp_state
+ env->mp_state
= bsp ? KVM_MP_STATE_RUNNABLE : KVM_MP_STATE_UNINITIALIZED;
/* We have to tell the kernel about mp_state, but also save sregs, since
@@ -141,7 +141,7 @@ static void kvm_apic_reset(void *opaque)
*/
s->dev.lvt[APIC_LVT_LINT0] = 0x700;
}
- kvm_set_lapic(s->cpu_env, &s->kvm_lapic_state);
+ kvm_set_lapic(env, &s->kvm_lapic_state);
}
int kvm_apic_init(CPUState *env)
@@ -155,7 +155,7 @@ int kvm_apic_init(CPUState *env)
msix_supported = 1;
vmstate_register(s->dev.id, &vmstate_apic, s);
- qemu_register_reset(kvm_apic_reset, s);
+ qemu_register_reset_cpu(env, kvm_apic_reset);
return 0;
}
diff --git a/kvm-all.c b/kvm-all.c
index ecd1102..286d0ee 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -21,6 +21,7 @@
#include <linux/kvm.h>
#include "qemu-common.h"
+#include "exec-all.h"
#include "sysemu.h"
#include "hw/hw.h"
#include "gdbstub.h"
@@ -148,10 +149,8 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot
*slot)
return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
}
-static void kvm_reset_vcpu(void *opaque)
+static void kvm_reset_vcpu(CPUState *env)
{
- CPUState *env = opaque;
-
kvm_arch_reset_vcpu(env);
if (kvm_arch_put_registers(env)) {
fprintf(stderr, "Fatal: kvm vcpu reset failed\n");
@@ -203,7 +202,7 @@ int kvm_init_vcpu(CPUState *env)
ret = kvm_arch_init_vcpu(env);
if (ret == 0) {
- qemu_register_reset(kvm_reset_vcpu, env);
+ qemu_register_reset_cpu(env, kvm_reset_vcpu);
}
err:
return ret;
diff --git a/vl.c b/vl.c
index 97446fc..ad2e7d6 100644
--- a/vl.c
+++ b/vl.c
@@ -3232,11 +3232,17 @@ void qemu_unregister_reset(QEMUResetHandler *func, void
*opaque)
void qemu_system_reset(void)
{
QEMUResetEntry *re, *nre;
+ CPUState *penv = first_cpu;
/* reset all devices */
QTAILQ_FOREACH_SAFE(re, &reset_handlers, entry, nre) {
re->func(re->opaque);
}
+
+ while (penv) {
+ penv->reset_requested = 1;
+ penv = penv->next_cpu;
+ }
}
void qemu_system_reset_request(void)
@@ -3528,6 +3534,10 @@ static void *kvm_cpu_thread_fn(void *arg)
}
while (1) {
+ if (env->reset_requested) {
+ qemu_cpu_reset(env);
+ env->reset_requested = 0;
+ }
if (cpu_can_run(env))
qemu_cpu_exec(env);
qemu_wait_io_event(env);
--
1.6.5.2
- [Qemu-devel] [PATCH v2 0/11], Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 01/11] Don't mess with halted state., Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 02/11] store thread-specific env information, Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 03/11] update halted state on mp_state sync, Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 04/11] qemu_flush_work for remote vcpu execution, Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 05/11] tell kernel about all registers instead of just mp_state, Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 06/11] flush state in migration post_load, Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 07/11] Don't call kvm cpu reset on initialization, Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 08/11] use cpu_kick instead of direct signalling., Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 09/11] Use per-cpu reset handlers.,
Glauber Costa <=
- [Qemu-devel] [PATCH v2 10/11] Use __thread where available., Glauber Costa, 2009/12/01
- [Qemu-devel] [PATCH v2 11/11] remove smp restriction from kvm, Glauber Costa, 2009/12/01
- Re: [Qemu-devel] [PATCH v2 05/11] tell kernel about all registers instead of just mp_state, Gleb Natapov, 2009/12/02
- Re: [Qemu-devel] [PATCH v2 05/11] tell kernel about all registers instead of just mp_state, Glauber Costa, 2009/12/02
- [Qemu-devel] Re: [PATCH v2 05/11] tell kernel about all registers instead of just mp_state, Jan Kiszka, 2009/12/02
- [Qemu-devel] Re: [PATCH v2 05/11] tell kernel about all registers instead of just mp_state, Gleb Natapov, 2009/12/02
- [Qemu-devel] Re: [PATCH v2 05/11] tell kernel about all registers instead of just mp_state, Jan Kiszka, 2009/12/02
- Re: [Qemu-devel] [PATCH v2 04/11] qemu_flush_work for remote vcpu execution, Marcelo Tosatti, 2009/12/02
- Re: [Qemu-devel] [PATCH v2 04/11] qemu_flush_work for remote vcpu execution, Glauber Costa, 2009/12/02
- Re: [Qemu-devel] [PATCH v2 04/11] qemu_flush_work for remote vcpu execution, Marcelo Tosatti, 2009/12/02