|
From: | Albert Esteve |
Subject: | Re: [PATCH v3 2/3] hw/virtio: cleanup shared resources |
Date: | Mon, 19 Feb 2024 11:45:08 +0100 |
On Tue, Feb 6, 2024 at 12:11 AM Alex Bennée <alex.bennee@linaro.org> wrote:Albert Esteve <aesteve@redhat.com> writes:
> Ensure that we cleanup all virtio shared
> resources when the vhost devices is cleaned
> up (after a hot unplug, or a crash).
>
> To do so, we add a new function to the virtio_dmabuf
> API called `virtio_dmabuf_vhost_cleanup`, which
> loop through the table and removes all
> resources owned by the vhost device parameter.
>
> Also, add a test to verify that the new
> function in the API behaves as expected.
>
> Signed-off-by: Albert Esteve <aesteve@redhat.com>
> Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
> hw/display/virtio-dmabuf.c | 22 +++++++++++++++++++++
> hw/virtio/vhost.c | 3 +++
> include/hw/virtio/virtio-dmabuf.h | 10 ++++++++++
> tests/unit/test-virtio-dmabuf.c | 33 +++++++++++++++++++++++++++++++
> 4 files changed, 68 insertions(+)
>
> diff --git a/hw/display/virtio-dmabuf.c b/hw/display/virtio-dmabuf.c
> index 3dba4577ca..6688809777 100644
> --- a/hw/display/virtio-dmabuf.c
> +++ b/hw/display/virtio-dmabuf.c
> @@ -136,6 +136,28 @@ SharedObjectType virtio_object_type(const QemuUUID *uuid)
> return vso->type;
> }
>
> +static bool virtio_dmabuf_resource_is_owned(gpointer key,
> + gpointer value,
> + gpointer dev)
> +{
> + VirtioSharedObject *vso;
> +
> + vso = (VirtioSharedObject *) value;
> + return vso->type == TYPE_VHOST_DEV && vso->value == dev;
It's a bit surprising to see vso->value being an anonymous gpointer
rather than the proper type and a bit confusing between value and
vso->value.
It is the signature required for this to be used with `g_hash_table_foreach_remove`.For the naming, the HashMap stores gpointers, that point to `VirtioSharedObject`, andthese point to the underlying type (stored at `vso->value`). It may sound a bit confusing,but is a byproduct of the VirtioSharedObject indirection. Not sure which names could bemore fit for this, but I'm open to change them.> +}
> +
> +int virtio_dmabuf_vhost_cleanup(struct vhost_dev *dev)
> +{
> + int num_removed;
> +
> + g_mutex_lock(&lock);
> + num_removed = g_hash_table_foreach_remove(
> + resource_uuids, (GHRFunc) virtio_dmabuf_resource_is_owned, dev);
> + g_mutex_unlock(&lock);
I'll note if we used a QemuMutex for the lock we could:
- use WITH_QEMU_LOCK_GUARD(&lock) { }
- enable QSP porfiling for the lock
Was not aware of these QemuMutex's. I wouldn't mind changing the mutex in thisfile in a different commit.
> +
> + return num_removed;
> +}
> +
> void virtio_free_resources(void)
> {
> g_mutex_lock(&lock);
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 2c9ac79468..c5622eac14 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -16,6 +16,7 @@
> #include "qemu/osdep.h"
> #include "qapi/error.h"
> #include "hw/virtio/vhost.h"
> +#include "hw/virtio/virtio-dmabuf.h"
> #include "qemu/atomic.h"
> #include "qemu/range.h"
> #include "qemu/error-report.h"
> @@ -1599,6 +1600,8 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
> migrate_del_blocker(&hdev->migration_blocker);
> g_free(hdev->mem);
> g_free(hdev->mem_sections);
> + /* free virtio shared objects */
> + virtio_dmabuf_vhost_cleanup(hdev);
> if (hdev->vhost_ops) {
> hdev->vhost_ops->vhost_backend_cleanup(hdev);
> }
> diff --git a/include/hw/virtio/virtio-dmabuf.h b/include/hw/virtio/virtio-dmabuf.h
> index 627c3b6db7..73f70fb482 100644
> --- a/include/hw/virtio/virtio-dmabuf.h
> +++ b/include/hw/virtio/virtio-dmabuf.h
> @@ -91,6 +91,16 @@ struct vhost_dev *virtio_lookup_vhost_device(const QemuUUID *uuid);
> */
> SharedObjectType virtio_object_type(const QemuUUID *uuid);
>
> +/**
> + * virtio_dmabuf_vhost_cleanup() - Destroys all entries of the shared
> + * resources lookup table that are owned by the vhost backend
> + * @dev: the pointer to the vhost device that owns the entries. Data is owned
> + * by the called of the function.
> + *
> + * Return: the number of resource entries removed.
> + */
> +int virtio_dmabuf_vhost_cleanup(struct vhost_dev *dev);
> +
> /**
> * virtio_free_resources() - Destroys all keys and values of the shared
> * resources lookup table, and frees them
> diff --git a/tests/unit/test-virtio-dmabuf.c b/tests/unit/test-virtio-dmabuf.c
> index a45ec52f42..1c8123c2d2 100644
> --- a/tests/unit/test-virtio-dmabuf.c
> +++ b/tests/unit/test-virtio-dmabuf.c
> @@ -103,6 +103,38 @@ static void test_add_invalid_resource(void)
> }
> }
>
> +static void test_cleanup_res(void)
> +{
> + QemuUUID uuids[20], uuid_alt;
> + struct vhost_dev *dev = g_new0(struct vhost_dev, 1);
> + struct vhost_dev *dev_alt = g_new0(struct vhost_dev, 1);
> + int i, num_removed;
> +
> + for (i = 0; i < ARRAY_SIZE(uuids); ++i) {
> + qemu_uuid_generate(&uuids[i]);
> + virtio_add_vhost_device(&uuids[i], dev);
> + /* vhost device is found */
> + g_assert(virtio_lookup_vhost_device(&uuids[i]) != NULL);
> + }
> + qemu_uuid_generate(&uuid_alt);
> + virtio_add_vhost_device(&uuid_alt, dev_alt);
> + /* vhost device is found */
> + g_assert(virtio_lookup_vhost_device(&uuid_alt) != NULL);
> + /* cleanup all dev resources */
> + num_removed = virtio_dmabuf_vhost_cleanup(dev);
> + g_assert_cmpint(num_removed, ==, ARRAY_SIZE(uuids));
> + for (i = 0; i < ARRAY_SIZE(uuids); ++i) {
> + /* None of the dev resources is found after free'd */
> + g_assert_cmpint(virtio_lookup_dmabuf(&uuids[i]), ==, -1);
> + }
> + /* uuid_alt is still in the hash table */
> + g_assert(virtio_lookup_vhost_device(&uuid_alt) != NULL);
> +
> + virtio_free_resources();
> + g_free(dev);
> + g_free(dev_alt);
> +}
> +
> static void test_free_resources(void)
> {
> QemuUUID uuids[20];
> @@ -131,6 +163,7 @@ int main(int argc, char **argv)
> test_remove_invalid_resource);
> g_test_add_func("/virtio-dmabuf/add_invalid_res",
> test_add_invalid_resource);
> + g_test_add_func("/virtio-dmabuf/cleanup_dev", test_cleanup_res);
> g_test_add_func("/virtio-dmabuf/free_res", test_free_resources);
>
> return g_test_run();
--
Alex Bennée
Virtualisation Tech Lead @ Linaro
[Prev in Thread] | Current Thread | [Next in Thread] |