We have qemu_plugin_dyn_cb.type to differentiate the various
callback types, so we do not need to keep them in separate queues.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/qemu/plugin.h | 35 ++++++----------
accel/tcg/plugin-gen.c | 90 ++++++++++++++++++++++--------------------
plugins/api.c | 18 +++------
3 files changed, 65 insertions(+), 78 deletions(-)
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
index ee1c1b174a..cf9758be55 100644
--- a/include/qemu/plugin.h
+++ b/include/qemu/plugin.h
@@ -66,15 +66,8 @@ union qemu_plugin_cb_sig {
};
enum plugin_dyn_cb_type {
- PLUGIN_CB_INSN,
- PLUGIN_CB_MEM,
- PLUGIN_N_CB_TYPES,
-};
-
-enum plugin_dyn_cb_subtype {
PLUGIN_CB_REGULAR,
PLUGIN_CB_INLINE,
- PLUGIN_N_CB_SUBTYPES,
};
/*
@@ -84,7 +77,7 @@ enum plugin_dyn_cb_subtype {
*/
struct qemu_plugin_dyn_cb {
void *userp;
- enum plugin_dyn_cb_subtype type;
+ enum plugin_dyn_cb_type type;
/* @rw applies to mem callbacks only (both regular and inline) */
enum qemu_plugin_mem_rw rw;
/* fields specific to each dyn_cb type go here */
@@ -106,7 +99,8 @@ struct qemu_plugin_insn {
GByteArray *data;
uint64_t vaddr;
void *haddr;
- GArray *cbs[PLUGIN_N_CB_TYPES][PLUGIN_N_CB_SUBTYPES];
+ GArray *insn_cbs;
+ GArray *mem_cbs;
bool calls_helpers;
/* if set, the instruction calls helpers that might access guest memory */
@@ -135,16 +129,9 @@ static inline void qemu_plugin_insn_cleanup_fn(gpointer
data)
static inline struct qemu_plugin_insn *qemu_plugin_insn_alloc(void)
{
- int i, j;
struct qemu_plugin_insn *insn = g_new0(struct qemu_plugin_insn, 1);
- insn->data = g_byte_array_sized_new(4);
- for (i = 0; i < PLUGIN_N_CB_TYPES; i++) {
- for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) {
- insn->cbs[i][j] = g_array_new(false, false,
- sizeof(struct qemu_plugin_dyn_cb));
- }
- }
+ insn->data = g_byte_array_sized_new(4);
return insn;
}
@@ -161,7 +148,7 @@ struct qemu_plugin_tb {
/* if set, the TB calls helpers that might access guest memory */
bool mem_helper;
- GArray *cbs[PLUGIN_N_CB_SUBTYPES];
+ GArray *cbs;
};
/**
@@ -174,22 +161,22 @@ struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct
qemu_plugin_tb *tb,
uint64_t pc)
{
struct qemu_plugin_insn *insn;
- int i, j;
if (unlikely(tb->n == tb->insns->len)) {
struct qemu_plugin_insn *new_insn = qemu_plugin_insn_alloc();
g_ptr_array_add(tb->insns, new_insn);
}
+
insn = g_ptr_array_index(tb->insns, tb->n++);
g_byte_array_set_size(insn->data, 0);
insn->calls_helpers = false;
insn->mem_helper = false;
insn->vaddr = pc;
-
- for (i = 0; i < PLUGIN_N_CB_TYPES; i++) {
- for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) {
- g_array_set_size(insn->cbs[i][j], 0);
- }
+ if (insn->insn_cbs) {
+ g_array_set_size(insn->insn_cbs, 0);
+ }
+ if (insn->mem_cbs) {
+ g_array_set_size(insn->mem_cbs, 0);
}
return insn;
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
index c8f0e0ecaa..669e343cfb 100644
--- a/accel/tcg/plugin-gen.c
+++ b/accel/tcg/plugin-gen.c
@@ -89,9 +89,8 @@ void plugin_gen_disable_mem_helpers(void)
static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
struct qemu_plugin_insn *insn)
{
- GArray *cbs[2];
GArray *arr;
- size_t n_cbs;
+ size_t len;
/*
* Tracking memory accesses performed from helpers requires extra work.
@@ -110,22 +109,25 @@ static void gen_enable_mem_helper(struct qemu_plugin_tb
*ptb,
return;
}
- cbs[0] = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
- cbs[1] = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
- n_cbs = cbs[0]->len + cbs[1]->len;
-
- if (n_cbs == 0) {
+ if (!insn->mem_cbs || !insn->mem_cbs->len) {
insn->mem_helper = false;
return;
}
insn->mem_helper = true;
ptb->mem_helper = true;
+ /*
+ * TODO: It seems like we should be able to use ref/unref
+ * to avoid needing to actually copy this array.
+ * Alternately, perhaps we could allocate new memory adjacent
+ * to the TranslationBlock itself, so that we do not have to
+ * actively manage the lifetime after this.
+ */
+ len = insn->mem_cbs->len;
arr = g_array_sized_new(false, false,
- sizeof(struct qemu_plugin_dyn_cb), n_cbs);
- g_array_append_vals(arr, cbs[0]->data, cbs[0]->len);
- g_array_append_vals(arr, cbs[1]->data, cbs[1]->len);
-
+ sizeof(struct qemu_plugin_dyn_cb), len);
+ memcpy(arr->data, insn->mem_cbs->data,
+ len * sizeof(struct qemu_plugin_dyn_cb));
qemu_plugin_add_dyn_cb_arr(arr);
tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
@@ -294,18 +296,21 @@ static void plugin_gen_inject(struct qemu_plugin_tb
*plugin_tb)
case PLUGIN_GEN_FROM_TB:
assert(insn == NULL);
- cbs = plugin_tb->cbs[PLUGIN_CB_REGULAR];
+ cbs = plugin_tb->cbs;
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
struct qemu_plugin_dyn_cb *cb =
&g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
- gen_udata_cb(cb);
- }
- cbs = plugin_tb->cbs[PLUGIN_CB_INLINE];
- for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
- struct qemu_plugin_dyn_cb *cb =
- &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
- gen_inline_cb(cb);
+ switch (cb->type) {
+ case PLUGIN_CB_REGULAR:
+ gen_udata_cb(cb);
+ break;
+ case PLUGIN_CB_INLINE:
+ gen_inline_cb(cb);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
break;
@@ -314,18 +319,21 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
gen_enable_mem_helper(plugin_tb, insn);
- cbs = insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR];
+ cbs = insn->insn_cbs;
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
struct qemu_plugin_dyn_cb *cb =
&g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
- gen_udata_cb(cb);
- }
- cbs = insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE];
- for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
- struct qemu_plugin_dyn_cb *cb =
- &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
- gen_inline_cb(cb);
+ switch (cb->type) {
+ case PLUGIN_CB_REGULAR:
+ gen_udata_cb(cb);
+ break;
+ case PLUGIN_CB_INLINE:
+ gen_inline_cb(cb);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
break;
@@ -352,21 +360,22 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
tcg_ctx->emit_before_op = op;
- cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
+ cbs = insn->mem_cbs;
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
struct qemu_plugin_dyn_cb *cb =
&g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
- if (cb->rw & rw) {
- gen_mem_cb(cb, meminfo, addr);
- }
- }
- cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
- for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
- struct qemu_plugin_dyn_cb *cb =
- &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
if (cb->rw & rw) {
- gen_inline_cb(cb);
+ switch (cb->type) {
+ case PLUGIN_CB_REGULAR:
+ gen_mem_cb(cb, meminfo, addr);
+ break;
+ case PLUGIN_CB_INLINE:
+ gen_inline_cb(cb);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
}
@@ -390,13 +399,10 @@ bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_state->event_mask)) {
struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
- int i;
/* reset callbacks */
- for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) {
- if (ptb->cbs[i]) {
- g_array_set_size(ptb->cbs[i], 0);
- }
+ if (ptb->cbs) {
+ g_array_set_size(ptb->cbs, 0);
}
ptb->n = 0;
diff --git a/plugins/api.c b/plugins/api.c
index 29cce2d97c..3912c9cc8f 100644
--- a/plugins/api.c
+++ b/plugins/api.c
@@ -92,8 +92,7 @@ void qemu_plugin_register_vcpu_tb_exec_cb(struct
qemu_plugin_tb *tb,
void *udata)
{
if (!tb->mem_only) {
- plugin_register_dyn_cb__udata(&tb->cbs[PLUGIN_CB_REGULAR],
- cb, flags, udata);
+ plugin_register_dyn_cb__udata(&tb->cbs, cb, flags, udata);
}
}
@@ -104,8 +103,7 @@ void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
uint64_t imm)
{
if (!tb->mem_only) {
- plugin_register_inline_op_on_entry(
- &tb->cbs[PLUGIN_CB_INLINE], 0, op, entry, imm);
+ plugin_register_inline_op_on_entry(&tb->cbs, 0, op, entry, imm);
}
}
@@ -115,8 +113,7 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
void *udata)
{
if (!insn->mem_only) {
- plugin_register_dyn_cb__udata(
- &insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], cb, flags, udata);
+ plugin_register_dyn_cb__udata(&insn->insn_cbs, cb, flags, udata);
}
}
@@ -127,8 +124,7 @@ void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
uint64_t imm)
{
if (!insn->mem_only) {
- plugin_register_inline_op_on_entry(
- &insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], 0, op, entry, imm);
+ plugin_register_inline_op_on_entry(&insn->insn_cbs, 0, op, entry, imm);
}
}
@@ -143,8 +139,7 @@ void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn,
enum qemu_plugin_mem_rw rw,
void *udata)
{
- plugin_register_vcpu_mem_cb(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR],
- cb, flags, rw, udata);
+ plugin_register_vcpu_mem_cb(&insn->mem_cbs, cb, flags, rw, udata);
}
void qemu_plugin_register_vcpu_mem_inline_per_vcpu(
@@ -154,8 +149,7 @@ void qemu_plugin_register_vcpu_mem_inline_per_vcpu(
qemu_plugin_u64 entry,
uint64_t imm)
{
- plugin_register_inline_op_on_entry(
- &insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE], rw, op, entry, imm);
+ plugin_register_inline_op_on_entry(&insn->mem_cbs, rw, op, entry, imm);
}
void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id,