qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v9 13/20] virtio-net: Return an error when vhost cannot enabl


From: Yuri Benditovich
Subject: Re: [PATCH v9 13/20] virtio-net: Return an error when vhost cannot enable RSS
Date: Tue, 16 Apr 2024 08:43:27 +0300

On Tue, Apr 16, 2024 at 7:00 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Mon, Apr 15, 2024 at 10:05 PM Yuri Benditovich
> <yuri.benditovich@daynix.com> wrote:
> >
> > On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> 
> > wrote:
> > >
> > > vhost requires eBPF for RSS. When eBPF is not available, virtio-net
> > > implicitly disables RSS even if the user explicitly requests it. Return
> > > an error instead of implicitly disabling RSS if RSS is requested but not
> > > available.
> > >
> > > Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> > > ---
> > >  hw/net/virtio-net.c | 97 
> > > ++++++++++++++++++++++++++---------------------------
> > >  1 file changed, 48 insertions(+), 49 deletions(-)
> > >
> > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > > index 61b49e335dea..3d53eba88cfc 100644
> > > --- a/hw/net/virtio-net.c
> > > +++ b/hw/net/virtio-net.c
> > > @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice 
> > > *vdev, uint64_t features,
> > >          return features;
> > >      }
> > >
> > > -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> > > -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> > > -    }
> > >      features = vhost_net_get_features(get_vhost_net(nc->peer), features);
> > >      vdev->backend_features = features;
> > >
> > > @@ -3591,6 +3588,50 @@ static bool 
> > > failover_hide_primary_device(DeviceListener *listener,
> > >      return qatomic_read(&n->failover_primary_hidden);
> > >  }
> > >
> > > +static void virtio_net_device_unrealize(DeviceState *dev)
> > > +{
> > > +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > +    VirtIONet *n = VIRTIO_NET(dev);
> > > +    int i, max_queue_pairs;
> > > +
> > > +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > +        virtio_net_unload_ebpf(n);
> > > +    }
> > > +
> > > +    /* This will stop vhost backend if appropriate. */
> > > +    virtio_net_set_status(vdev, 0);
> > > +
> > > +    g_free(n->netclient_name);
> > > +    n->netclient_name = NULL;
> > > +    g_free(n->netclient_type);
> > > +    n->netclient_type = NULL;
> > > +
> > > +    g_free(n->mac_table.macs);
> > > +    g_free(n->vlans);
> > > +
> > > +    if (n->failover) {
> > > +        qobject_unref(n->primary_opts);
> > > +        device_listener_unregister(&n->primary_listener);
> > > +        migration_remove_notifier(&n->migration_state);
> > > +    } else {
> > > +        assert(n->primary_opts == NULL);
> > > +    }
> > > +
> > > +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> > > +    for (i = 0; i < max_queue_pairs; i++) {
> > > +        virtio_net_del_queue(n, i);
> > > +    }
> > > +    /* delete also control vq */
> > > +    virtio_del_queue(vdev, max_queue_pairs * 2);
> > > +    qemu_announce_timer_del(&n->announce_timer, false);
> > > +    g_free(n->vqs);
> > > +    qemu_del_nic(n->nic);
> > > +    virtio_net_rsc_cleanup(n);
> > > +    g_free(n->rss_data.indirections_table);
> > > +    net_rx_pkt_uninit(n->rx_pkt);
> > > +    virtio_cleanup(vdev);
> > > +}
> > > +
> > >  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> > >  {
> > >      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState 
> > > *dev, Error **errp)
> > >
> > >      net_rx_pkt_init(&n->rx_pkt);
> > >
> > > -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > -        virtio_net_load_ebpf(n);
> > > -    }
> > > -}
> > > -
> > > -static void virtio_net_device_unrealize(DeviceState *dev)
> > > -{
> > > -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > -    VirtIONet *n = VIRTIO_NET(dev);
> > > -    int i, max_queue_pairs;
> > > -
> > > -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > -        virtio_net_unload_ebpf(n);
> > > +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
> > > +        !virtio_net_load_ebpf(n) && get_vhost_net(nc->peer)) {
> > > +        virtio_net_device_unrealize(dev);
> > > +        error_setg(errp, "Can't load eBPF RSS for vhost");
> > >      }
> >
> > As I already mentioned, I think this is an extremely bad idea to
> > fail to run qemu due to such a reason as .absence of one feature.
> > What I suggest is:
> > 1. Redefine rss as tri-state (off|auto|on)
> > 2. Fail to run only if rss is on and not available via ebpf
> > 3. On auto - silently drop it
>
> "Auto" might be promatic for migration compatibility which is hard to
> be used by management layers like libvirt. The reason is that there's
> no way for libvirt to know if it is supported by device or not.

In terms of migration every feature that somehow depends on the kernel
is problematic, not only RSS. Last time we added the USO feature - is
it different?
And in terms of migration "rss=on" is problematic the same way as "rss=auto".
Can you please show one scenario of migration where they will behave
differently? And in terms of regular experience there is a big advantage.


>
> Thanks
>
> > 4. The same with 'hash' option - it is not compatible with vhost (at
> > least at the moment)
> > 5. Reformat the patch as it is hard to review it due to replacing
> > entire procedures, i.e. one patch with replacing without changes,
> > another one - with real changes.
> > If this is hard to review only for me - please ignore that.
> >
> > > -
> > > -    /* This will stop vhost backend if appropriate. */
> > > -    virtio_net_set_status(vdev, 0);
> > > -
> > > -    g_free(n->netclient_name);
> > > -    n->netclient_name = NULL;
> > > -    g_free(n->netclient_type);
> > > -    n->netclient_type = NULL;
> > > -
> > > -    g_free(n->mac_table.macs);
> > > -    g_free(n->vlans);
> > > -
> > > -    if (n->failover) {
> > > -        qobject_unref(n->primary_opts);
> > > -        device_listener_unregister(&n->primary_listener);
> > > -        migration_remove_notifier(&n->migration_state);
> > > -    } else {
> > > -        assert(n->primary_opts == NULL);
> > > -    }
> > > -
> > > -    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> > > -    for (i = 0; i < max_queue_pairs; i++) {
> > > -        virtio_net_del_queue(n, i);
> > > -    }
> > > -    /* delete also control vq */
> > > -    virtio_del_queue(vdev, max_queue_pairs * 2);
> > > -    qemu_announce_timer_del(&n->announce_timer, false);
> > > -    g_free(n->vqs);
> > > -    qemu_del_nic(n->nic);
> > > -    virtio_net_rsc_cleanup(n);
> > > -    g_free(n->rss_data.indirections_table);
> > > -    net_rx_pkt_uninit(n->rx_pkt);
> > > -    virtio_cleanup(vdev);
> > >  }
> > >
> > >  static void virtio_net_reset(VirtIODevice *vdev)
> > >
> > > --
> > > 2.44.0
> > >
> >
>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]