qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 1/1] virtio-pci: Fix the crash when the vector changes back from


From: Cindy Lu
Subject: [PATCH 1/1] virtio-pci: Fix the crash when the vector changes back from VIRTIO_NO_VECTOR
Date: Tue, 2 Apr 2024 23:00:10 +0800

When the guest calls virtio_stop and then virtio_reset, the vector will change
to VIRTIO_NO_VECTOR and the IRQFD for this vector will be released. After that
If you want to change the vector back, it will cause a crash.

To fix this, we need to call the function "kvm_virtio_pci_vector_use_one()"
when the vector changes back from VIRTIO_NO_VECTOR

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 hw/virtio/virtio-pci.c | 31 ++++++++++++++++++++++++++++---
 1 file changed, 28 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index e433879542..45f3ab38c3 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -874,12 +874,14 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, 
int queue_no,
     return 0;
 }
 
-static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
+static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no,
+                                         bool recovery)
 {
     unsigned int vector;
     int ret;
     EventNotifier *n;
     PCIDevice *dev = &proxy->pci_dev;
+    VirtIOIRQFD *irqfd;
     VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
 
@@ -890,10 +892,21 @@ static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy 
*proxy, int queue_no)
     if (vector >= msix_nr_vectors_allocated(dev)) {
         return 0;
     }
+    /*
+     * if this is recovery and irqfd still in use, means the irqfd was not
+     * release before and don't need to set up again
+     */
+    if (recovery) {
+        irqfd = &proxy->vector_irqfd[vector];
+        if (irqfd->users != 0) {
+            return 0;
+        }
+    }
     ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
     if (ret < 0) {
         goto undo;
     }
+
     /*
      * If guest supports masking, set up irqfd now.
      * Otherwise, delay until unmasked in the frontend.
@@ -932,14 +945,14 @@ static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy 
*proxy, int nvqs)
         if (!virtio_queue_get_num(vdev, queue_no)) {
             return -1;
         }
-        ret = kvm_virtio_pci_vector_use_one(proxy, queue_no);
+        ret = kvm_virtio_pci_vector_use_one(proxy, queue_no, false);
     }
     return ret;
 }
 
 static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
 {
-    return kvm_virtio_pci_vector_use_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
+    return kvm_virtio_pci_vector_use_one(proxy, VIRTIO_CONFIG_IRQ_IDX, false);
 }
 
 static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
@@ -1570,7 +1583,13 @@ static void virtio_pci_common_write(void *opaque, hwaddr 
addr,
         } else {
             val = VIRTIO_NO_VECTOR;
         }
+        vector = vdev->config_vector;
         vdev->config_vector = val;
+        /*check if the vector need to recovery*/
+        if ((val != VIRTIO_NO_VECTOR) && (vector == VIRTIO_NO_VECTOR) &&
+            (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
+            kvm_virtio_pci_vector_use_one(proxy, VIRTIO_CONFIG_IRQ_IDX, true);
+        }
         break;
     case VIRTIO_PCI_COMMON_STATUS:
         if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
@@ -1611,6 +1630,12 @@ static void virtio_pci_common_write(void *opaque, hwaddr 
addr,
             val = VIRTIO_NO_VECTOR;
         }
         virtio_queue_set_vector(vdev, vdev->queue_sel, val);
+
+        /*check if the vector need to recovery*/
+        if ((val != VIRTIO_NO_VECTOR) && (vector == VIRTIO_NO_VECTOR) &&
+            (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
+            kvm_virtio_pci_vector_use_one(proxy, vdev->queue_sel, true);
+        }
         break;
     case VIRTIO_PCI_COMMON_Q_ENABLE:
         if (val == 1) {
-- 
2.43.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]