qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH] virtio-net: vhost-user: Implement internal migration


From: Michael S. Tsirkin
Subject: Re: [PATCH] virtio-net: vhost-user: Implement internal migration
Date: Wed, 15 Jan 2025 05:10:53 -0500

On Wed, Dec 18, 2024 at 03:34:53PM +0100, Laurent Vivier wrote:
> Add support of VHOST_USER_PROTOCOL_F_DEVICE_STATE in virtio-net
> with vhost-user backend.
> 
> Cc: Hanna Czenczek <hreitz@redhat.com>
> Signed-off-by: Laurent Vivier <lvivier@redhat.com>


Breaks windows builds:

https://gitlab.com/mstredhat/qemu/-/jobs/8855973625

> ---
>  hw/net/virtio-net.c | 135 ++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 112 insertions(+), 23 deletions(-)
> 
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 6e8c51a2dbce..b4d9e96dc0d7 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -3337,6 +3337,117 @@ static const VMStateDescription 
> vmstate_virtio_net_rss = {
>      },
>  };
>  
> +static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev)
> +{
> +    VirtIONet *n = VIRTIO_NET(vdev);
> +    NetClientState *nc;
> +    struct vhost_net *net;
> +
> +    if (!n->nic) {
> +        return NULL;
> +    }
> +
> +    nc = qemu_get_queue(n->nic);
> +    if (!nc) {
> +        return NULL;
> +    }
> +
> +    net = get_vhost_net(nc->peer);
> +    if (!net) {
> +        return NULL;
> +    }
> +
> +    return &net->dev;
> +}
> +
> +static int vhost_user_net_save_state(QEMUFile *f, void *pv, size_t size,
> +                                     const VMStateField *field,
> +                                     JSONWriter *vmdesc)
> +{
> +    VirtIONet *n = pv;
> +    VirtIODevice *vdev = VIRTIO_DEVICE(n);
> +    struct vhost_dev *vhdev;
> +    Error *local_error = NULL;
> +    int ret;
> +
> +    vhdev = virtio_net_get_vhost(vdev);
> +    if (vhdev == NULL) {
> +        error_reportf_err(local_error,
> +                          "Error getting vhost back-end of %s device %s: ",
> +                          vdev->name, vdev->parent_obj.canonical_path);
> +        return -1;
> +    }
> +
> +    ret = vhost_save_backend_state(vhdev, f, &local_error);
> +    if (ret < 0) {
> +        error_reportf_err(local_error,
> +                          "Error saving back-end state of %s device %s: ",
> +                          vdev->name, vdev->parent_obj.canonical_path);
> +        return ret;
> +    }
> +
> +    return 0;
> +}
> +
> +static int vhost_user_net_load_state(QEMUFile *f, void *pv, size_t size,
> +                                     const VMStateField *field)
> +{
> +    VirtIONet *n = pv;
> +    VirtIODevice *vdev = VIRTIO_DEVICE(n);
> +    struct vhost_dev *vhdev;
> +    Error *local_error = NULL;
> +    int ret;
> +
> +    vhdev = virtio_net_get_vhost(vdev);
> +    if (vhdev == NULL) {
> +        error_reportf_err(local_error,
> +                          "Error getting vhost back-end of %s device %s: ",
> +                          vdev->name, vdev->parent_obj.canonical_path);
> +        return -1;
> +    }
> +
> +    ret = vhost_load_backend_state(vhdev, f, &local_error);
> +    if (ret < 0) {
> +        error_reportf_err(local_error,
> +                          "Error loading  back-end state of %s device %s: ",
> +                          vdev->name, vdev->parent_obj.canonical_path);
> +        return ret;
> +    }
> +
> +    return 0;
> +}
> +
> +static bool vhost_user_net_is_internal_migration(void *opaque)
> +{
> +    VirtIONet *n = opaque;
> +    VirtIODevice *vdev = VIRTIO_DEVICE(n);
> +    struct vhost_dev *vhdev;
> +
> +    vhdev = virtio_net_get_vhost(vdev);
> +    if (vhdev == NULL) {
> +        return false;
> +    }
> +
> +    return vhost_supports_device_state(vhdev);
> +}
> +
> +static const VMStateDescription vhost_user_net_backend_state = {
> +    .name = "virtio-net-device/backend",
> +    .version_id = 0,
> +    .needed = vhost_user_net_is_internal_migration,
> +    .fields = (const VMStateField[]) {
> +        {
> +            .name = "backend",
> +            .info = &(const VMStateInfo) {
> +                .name = "virtio-net vhost-user backend state",
> +                .get = vhost_user_net_load_state,
> +                .put = vhost_user_net_save_state,
> +            },
> +         },
> +         VMSTATE_END_OF_LIST()
> +    }
> +};
> +
>  static const VMStateDescription vmstate_virtio_net_device = {
>      .name = "virtio-net-device",
>      .version_id = VIRTIO_NET_VM_VERSION,
> @@ -3389,6 +3500,7 @@ static const VMStateDescription 
> vmstate_virtio_net_device = {
>      },
>      .subsections = (const VMStateDescription * const []) {
>          &vmstate_virtio_net_rss,
> +        &vhost_user_net_backend_state,
>          NULL
>      }
>  };
> @@ -3950,29 +4062,6 @@ static bool dev_unplug_pending(void *opaque)
>      return vdc->primary_unplug_pending(dev);
>  }
>  
> -static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev)
> -{
> -    VirtIONet *n = VIRTIO_NET(vdev);
> -    NetClientState *nc;
> -    struct vhost_net *net;
> -
> -    if (!n->nic) {
> -        return NULL;
> -    }
> -
> -    nc = qemu_get_queue(n->nic);
> -    if (!nc) {
> -        return NULL;
> -    }
> -
> -    net = get_vhost_net(nc->peer);
> -    if (!net) {
> -        return NULL;
> -    }
> -
> -    return &net->dev;
> -}
> -
>  static const VMStateDescription vmstate_virtio_net = {
>      .name = "virtio-net",
>      .minimum_version_id = VIRTIO_NET_VM_VERSION,
> -- 
> 2.47.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]