[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v5 09/12] contrib/plugins/hotblocks: migrate to new per_vcpu
From: |
Luc Michel |
Subject: |
Re: [PATCH v5 09/12] contrib/plugins/hotblocks: migrate to new per_vcpu API |
Date: |
Tue, 27 Feb 2024 11:54:12 +0100 |
Hi Pierrick,
On 13:14 Mon 26 Feb , Pierrick Bouvier wrote:
> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
> ---
> contrib/plugins/hotblocks.c | 50 ++++++++++++++++++++++---------------
> 1 file changed, 30 insertions(+), 20 deletions(-)
>
> diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c
> index 4de1b134944..02bc5078bdd 100644
> --- a/contrib/plugins/hotblocks.c
> +++ b/contrib/plugins/hotblocks.c
> @@ -34,8 +34,8 @@ static guint64 limit = 20;
> */
> typedef struct {
> uint64_t start_addr;
> - uint64_t exec_count;
> - int trans_count;
> + struct qemu_plugin_scoreboard *exec_count;
> + int trans_count;
> unsigned long insns;
> } ExecCount;
>
> @@ -43,7 +43,17 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer
> b)
> {
> ExecCount *ea = (ExecCount *) a;
> ExecCount *eb = (ExecCount *) b;
> - return ea->exec_count > eb->exec_count ? -1 : 1;
> + uint64_t count_a =
> + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(ea->exec_count));
> + uint64_t count_b =
> + qemu_plugin_u64_sum(qemu_plugin_scoreboard_u64(eb->exec_count));
> + return count_a > count_b ? -1 : 1;
> +}
> +
> +static void exec_count_free(gpointer key, gpointer value, gpointer user_data)
> +{
> + ExecCount *cnt = value;
> + qemu_plugin_scoreboard_free(cnt->exec_count);
> }
>
> static void plugin_exit(qemu_plugin_id_t id, void *p)
> @@ -52,7 +62,6 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
> GList *counts, *it;
> int i;
>
> - g_mutex_lock(&lock);
I encountered cases before where the vCPUs continue executing while
plugin_exit is called. This can happen e.g., when QEMU calls exit(3)
from one CPU thread. Others will continue to run at the same time the
atexit callbacks are called.
This also means that you can't really free the resources as you do at
the end of plugin_exit.
> g_string_append_printf(report, "%d entries in the hash table\n",
> g_hash_table_size(hotblocks));
> counts = g_hash_table_get_values(hotblocks);
> @@ -63,16 +72,21 @@ static void plugin_exit(qemu_plugin_id_t id, void *p)
>
> for (i = 0; i < limit && it->next; i++, it = it->next) {
> ExecCount *rec = (ExecCount *) it->data;
> - g_string_append_printf(report, "0x%016"PRIx64", %d, %ld,
> %"PRId64"\n",
> - rec->start_addr, rec->trans_count,
> - rec->insns, rec->exec_count);
> + g_string_append_printf(
> + report, "0x%016"PRIx64", %d, %ld, %"PRId64"\n",
> + rec->start_addr, rec->trans_count,
> + rec->insns,
> + qemu_plugin_u64_sum(
> + qemu_plugin_scoreboard_u64(rec->exec_count)));
> }
>
> g_list_free(it);
> }
> - g_mutex_unlock(&lock);
>
> qemu_plugin_outs(report->str);
> +
> + g_hash_table_foreach(hotblocks, exec_count_free, NULL);
> + g_hash_table_destroy(hotblocks);
> }
>
> static void plugin_init(void)
> @@ -82,15 +96,9 @@ static void plugin_init(void)
>
> static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
> {
> - ExecCount *cnt;
> - uint64_t hash = (uint64_t) udata;
> -
> - g_mutex_lock(&lock);
> - cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash);
> - /* should always succeed */
> - g_assert(cnt);
> - cnt->exec_count++;
> - g_mutex_unlock(&lock);
> + ExecCount *cnt = (ExecCount *)udata;
> + qemu_plugin_u64_add(qemu_plugin_scoreboard_u64(cnt->exec_count),
> + cpu_index, 1);
> }
>
> /*
> @@ -114,18 +122,20 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct
> qemu_plugin_tb *tb)
> cnt->start_addr = pc;
> cnt->trans_count = 1;
> cnt->insns = insns;
> + cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t));
> g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt);
> }
>
> g_mutex_unlock(&lock);
>
> if (do_inline) {
> - qemu_plugin_register_vcpu_tb_exec_inline(tb,
> QEMU_PLUGIN_INLINE_ADD_U64,
> - &cnt->exec_count, 1);
> + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
> + tb, QEMU_PLUGIN_INLINE_ADD_U64,
> + qemu_plugin_scoreboard_u64(cnt->exec_count), 1);
> } else {
> qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
> QEMU_PLUGIN_CB_NO_REGS,
> - (void *)hash);
> + (void *)cnt);
> }
> }
>
> --
> 2.43.0
>
>
--
- Re: [PATCH v5 06/12] tests/plugin/mem: migrate to new per_vcpu API, (continued)
[PATCH v5 05/12] tests/plugin: add test plugin for inline operations, Pierrick Bouvier, 2024/02/26
[PATCH v5 07/12] tests/plugin/insn: migrate to new per_vcpu API, Pierrick Bouvier, 2024/02/26
[PATCH v5 08/12] tests/plugin/bb: migrate to new per_vcpu API, Pierrick Bouvier, 2024/02/26
[PATCH v5 09/12] contrib/plugins/hotblocks: migrate to new per_vcpu API, Pierrick Bouvier, 2024/02/26
- Re: [PATCH v5 09/12] contrib/plugins/hotblocks: migrate to new per_vcpu API,
Luc Michel <=
[PATCH v5 11/12] plugins: remove non per_vcpu inline operation from API, Pierrick Bouvier, 2024/02/26
[PATCH v5 10/12] contrib/plugins/howvec: migrate to new per_vcpu API, Pierrick Bouvier, 2024/02/26
[PATCH v5 12/12] plugins: cleanup codepath for previous inline operation, Pierrick Bouvier, 2024/02/26