qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC 01/48] cpu: introduce run_on_cpu_no_bql


From: Emilio G. Cota
Subject: [Qemu-devel] [RFC 01/48] cpu: introduce run_on_cpu_no_bql
Date: Thu, 25 Oct 2018 13:20:10 -0400

This allows us to queue synchronous CPU work without the BQL.

Will gain a user soon.

Signed-off-by: Emilio G. Cota <address@hidden>
---
 include/qom/cpu.h | 13 +++++++++++++
 cpus-common.c     | 28 ++++++++++++++++++++++------
 2 files changed, 35 insertions(+), 6 deletions(-)

diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 204bc94056..863aa2bff1 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -877,6 +877,19 @@ bool cpu_is_stopped(CPUState *cpu);
  */
 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
 
+/**
+ * run_on_cpu_no_bql
+ * @cpu: The vCPU to run on.
+ * @func: The function to be executed.
+ * @data: Data to pass to the function.
+ *
+ * Schedules the function @func for execution on the vCPU @cpu.
+ * This function is run outside the BQL.
+ * See also: run_on_cpu()
+ */
+void run_on_cpu_no_bql(CPUState *cpu, run_on_cpu_func func,
+                       run_on_cpu_data data);
+
 /**
  * async_run_on_cpu:
  * @cpu: The vCPU to run on.
diff --git a/cpus-common.c b/cpus-common.c
index cffb2b71ac..b478fc8741 100644
--- a/cpus-common.c
+++ b/cpus-common.c
@@ -144,7 +144,8 @@ static void queue_work_on_cpu(CPUState *cpu, struct 
qemu_work_item *wi)
     cpu_mutex_unlock(cpu);
 }
 
-void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
+static void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
+                          run_on_cpu_data data, bool bql)
 {
     struct qemu_work_item wi;
     bool has_bql = qemu_mutex_iothread_locked();
@@ -152,12 +153,16 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, 
run_on_cpu_data data)
     g_assert(no_cpu_mutex_locked());
 
     if (qemu_cpu_is_self(cpu)) {
-        if (has_bql) {
-            func(cpu, data);
+        if (bql) {
+            if (has_bql) {
+                func(cpu, data);
+            } else {
+                qemu_mutex_lock_iothread();
+                func(cpu, data);
+                qemu_mutex_unlock_iothread();
+            }
         } else {
-            qemu_mutex_lock_iothread();
             func(cpu, data);
-            qemu_mutex_unlock_iothread();
         }
         return;
     }
@@ -172,7 +177,7 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, 
run_on_cpu_data data)
     wi.done = false;
     wi.free = false;
     wi.exclusive = false;
-    wi.bql = true;
+    wi.bql = bql;
 
     cpu_mutex_lock(cpu);
     queue_work_on_cpu_locked(cpu, &wi);
@@ -189,6 +194,17 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, 
run_on_cpu_data data)
     }
 }
 
+void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
+{
+    do_run_on_cpu(cpu, func, data, true);
+}
+
+void
+run_on_cpu_no_bql(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
+{
+    do_run_on_cpu(cpu, func, data, false);
+}
+
 void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data 
data)
 {
     struct qemu_work_item *wi;
-- 
2.17.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]