qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v5 09/12] contrib/plugins/hotblocks: migrate to new per_vcpu


From: Pierrick Bouvier
Subject: Re: [PATCH v5 09/12] contrib/plugins/hotblocks: migrate to new per_vcpu API
Date: Wed, 28 Feb 2024 12:04:10 +0400
User-agent: Mozilla Thunderbird

On 2/27/24 6:26 PM, Luc Michel wrote:
On 15:09 Tue 27 Feb     , Pierrick Bouvier wrote:
On 2/27/24 2:54 PM, Luc Michel wrote:
Hi Pierrick,

On 13:14 Mon 26 Feb     , Pierrick Bouvier wrote:
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
   contrib/plugins/hotblocks.c | 50 ++++++++++++++++++++++---------------
   1 file changed, 30 insertions(+), 20 deletions(-)

diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c
index 4de1b134944..02bc5078bdd 100644
--- a/contrib/plugins/hotblocks.c
+++ b/contrib/plugins/hotblocks.c
@@ -34,8 +34,8 @@ static guint64 limit = 20;
    */
   typedef struct {
       uint64_t start_addr;
-    uint64_t exec_count;
-    int      trans_count;
+    struct qemu_plugin_scoreboard *exec_count;
+    int trans_count;
       unsigned long insns;
   } ExecCount;

@@ -43,7 +43,17 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b)
   {
       ExecCount *ea = (ExecCount *) a;
       ExecCount *eb = (ExecCount *) b;
-    return ea->exec_count > eb->exec_count ? -1 : 1;
+    uint64_t count_a =
+        qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(ea->exec_count));
+    uint64_t count_b =
+        qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(eb->exec_count));
+    return count_a > count_b ? -1 : 1;
+}
+
+static void exec_count_free(gpointer key, gpointer value, gpointer user_data)
+{
+    ExecCount *cnt = value;
+    qemu_plugin_scoreboard_free(cnt->exec_count);
   }

   static void plugin_exit(qemu_plugin_id_t id, void *p)
@@ -52,7 +62,6 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
       GList *counts, *it;
       int i;

-    g_mutex_lock(&lock);

I encountered cases before where the vCPUs continue executing while
plugin_exit is called. This can happen e.g., when QEMU calls exit(3)
from one CPU thread. Others will continue to run at the same time the
atexit callbacks are called.

This also means that you can't really free the resources as you do at
the end of plugin_exit.


Interesting...

The current documentation [1] mentions it's the right place to free
resources, and from what I saw, existing plugins assume this too (see
contrib/plugins/cache.c for instance).

There is probably a bug related to the case you mention, and I'll try to
reproduce this, and see if we can have a proper fix.

I'm not sure that removing cleanup code from existing plugins is the
right thing to do at the moment, even though there is an existing issue
with some programs.

Yes absolutely. The problem is on the QEMU side. FWIW I tried to fix one
of those exit cases (semihosted exit syscall) some time ago:
https://lore.kernel.org/qemu-devel/20220621125916.25257-1-lmichel@kalray.eu/

IIRC Peter was not fundamentally against it. But then I was off for
several months due to medical reasons.

I can take some time to rebase it and address Peter's comments if there
is interest to it.


It could be well appreciated, and definitely worth fixing!

Luc


[1]
https://www.qemu.org/docs/master/devel/tcg-plugins.html#c.qemu_plugin_register_atexit_cb

       g_string_append_printf(report, "%d entries in the hash table\n",
                              g_hash_table_size(hotblocks));
       counts = g_hash_table_get_values(hotblocks);
@@ -63,16 +72,21 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)

           for (i = 0; i < limit && it->next; i++, it = it->next) {
               ExecCount *rec = (ExecCount *) it->data;
-            g_string_append_printf(report, "0x%016"PRIx64", %d, %ld, 
%"PRId64"\n",
-                                   rec->start_addr, rec->trans_count,
-                                   rec->insns, rec->exec_count);
+            g_string_append_printf(
+                report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n",
+                rec->start_addr, rec->trans_count,
+                rec->insns,
+                qemu_plugin_u64_sum(
+                    qemu_plugin_scoreboard_u64(rec->exec_count)));
           }

           g_list_free(it);
       }
-    g_mutex_unlock(&lock);

       qemu_plugin_outs(report->str);
+
+    g_hash_table_foreach(hotblocks, exec_count_free, NULL);
+    g_hash_table_destroy(hotblocks);
   }

   static void plugin_init(void)
@@ -82,15 +96,9 @@ static void plugin_init(void)

   static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
   {
-    ExecCount *cnt;
-    uint64_t hash = (uint64_t) udata;
-
-    g_mutex_lock(&lock);
-    cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash);
-    /* should always succeed */
-    g_assert(cnt);
-    cnt->exec_count++;
-    g_mutex_unlock(&lock);
+    ExecCount *cnt = (ExecCount *)udata;
+    qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(cnt->exec_count),
+                        cpu_index, 1);
   }

   /*
@@ -114,18 +122,20 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct 
qemu_plugin_tb *tb)
           cnt->start_addr = pc;
           cnt->trans_count = 1;
           cnt->insns = insns;
+        cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t));
           g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt);
       }

       g_mutex_unlock(&lock);

       if (do_inline) {
-        qemu_plugin_register_vcpu_tb_exec_inline(tb, 
QEMU_PLUGIN_INLINE_ADD_U64,
-                                                 &cnt->exec_count, 1);
+        qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+            tb, QEMU_PLUGIN_INLINE_ADD_U64,
+            qemu_plugin_scoreboard_u64(cnt->exec_count), 1);
       } else {
           qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
                                                QEMU_PLUGIN_CB_NO_REGS,
-                                             (void *)hash);
+                                             (void *)cnt);
       }
   }

--
2.43.0







reply via email to

[Prev in Thread] Current Thread [Next in Thread]