[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 9.0 01/13] vdpa: add VhostVDPAShared
From: |
Eugenio Pérez |
Subject: |
[PATCH 9.0 01/13] vdpa: add VhostVDPAShared |
Date: |
Fri, 24 Nov 2023 18:14:18 +0100 |
It will hold properties shared among all vhost_vdpa instances associated
with of the same device. For example, we just need one iova_tree or one
memory listener for the entire device.
Next patches will register the vhost_vdpa memory listener at the
beginning of the VM migration at the destination. This enables QEMU to
map the memory to the device before stopping the VM at the source,
instead of doing while both source and destination are stopped, thus
minimizing the downtime.
However, the destination QEMU is unaware of which vhost_vdpa struct will
register its memory_listener. If the source guest has CVQ enabled, it
will be the one associated with the CVQ. Otherwise, it will be the
first one.
Save the memory operations related members in a common place rather than
always in the first / last vhost_vdpa.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
include/hw/virtio/vhost-vdpa.h | 5 +++++
net/vhost-vdpa.c | 24 ++++++++++++++++++++++--
2 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 5407d54fd7..eb1a56d75a 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -30,6 +30,10 @@ typedef struct VhostVDPAHostNotifier {
void *addr;
} VhostVDPAHostNotifier;
+/* Info shared by all vhost_vdpa device models */
+typedef struct vhost_vdpa_shared {
+} VhostVDPAShared;
+
typedef struct vhost_vdpa {
int device_fd;
int index;
@@ -46,6 +50,7 @@ typedef struct vhost_vdpa {
bool suspended;
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
+ VhostVDPAShared *shared;
GPtrArray *shadow_vqs;
const VhostShadowVirtqueueOps *shadow_vq_ops;
void *shadow_vq_ops_opaque;
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index d0614d7954..8b661b9e6d 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -240,6 +240,10 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
qemu_close(s->vhost_vdpa.device_fd);
s->vhost_vdpa.device_fd = -1;
}
+ if (s->vhost_vdpa.index != 0) {
+ return;
+ }
+ g_free(s->vhost_vdpa.shared);
}
/** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */
@@ -1661,6 +1665,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState
*peer,
bool svq,
struct vhost_vdpa_iova_range iova_range,
uint64_t features,
+ VhostVDPAShared *shared,
Error **errp)
{
NetClientState *nc = NULL;
@@ -1696,6 +1701,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState
*peer,
if (queue_pair_index == 0) {
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
+ s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
} else if (!is_datapath) {
s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
PROT_READ | PROT_WRITE,
@@ -1708,11 +1714,16 @@ static NetClientState
*net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.shadow_vq_ops_opaque = s;
s->cvq_isolated = cvq_isolated;
}
+ if (queue_pair_index != 0) {
+ s->vhost_vdpa.shared = shared;
+ }
+
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
if (ret) {
qemu_del_net_client(nc);
return NULL;
}
+
return nc;
}
@@ -1824,17 +1835,26 @@ int net_init_vhost_vdpa(const Netdev *netdev, const
char *name,
ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
for (i = 0; i < queue_pairs; i++) {
+ VhostVDPAShared *shared = NULL;
+
+ if (i) {
+ shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared;
+ }
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 2, true, opts->x_svq,
- iova_range, features, errp);
+ iova_range, features, shared, errp);
if (!ncs[i])
goto err;
}
if (has_cvq) {
+ VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]);
+ VhostVDPAShared *shared = s0->vhost_vdpa.shared;
+
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, false,
- opts->x_svq, iova_range, features, errp);
+ opts->x_svq, iova_range, features, shared,
+ errp);
if (!nc)
goto err;
}
--
2.39.3
- [PATCH 9.0 00/13] Consolidate common vdpa members in VhostVDPAShared, Eugenio Pérez, 2023/11/24
- [PATCH 9.0 01/13] vdpa: add VhostVDPAShared,
Eugenio Pérez <=
- [PATCH 9.0 02/13] vdpa: move iova tree to the shared struct, Eugenio Pérez, 2023/11/24
- [PATCH 9.0 03/13] vdpa: move iova_range to vhost_vdpa_shared, Eugenio Pérez, 2023/11/24
- [PATCH 9.0 04/13] vdpa: move shadow_data to vhost_vdpa_shared, Eugenio Pérez, 2023/11/24
- [PATCH 9.0 07/13] vdpa: move iotlb_batch_begin_sent to vhost_vdpa_shared, Eugenio Pérez, 2023/11/24
- [PATCH 9.0 08/13] vdpa: move backend_cap to vhost_vdpa_shared, Eugenio Pérez, 2023/11/24
- [PATCH 9.0 06/13] vdpa: move file descriptor to vhost_vdpa_shared, Eugenio Pérez, 2023/11/24
- [PATCH 9.0 05/13] vdpa: use vdpa shared for tracing, Eugenio Pérez, 2023/11/24
- [PATCH 9.0 10/13] vdpa: move iommu_list to vhost_vdpa_shared, Eugenio Pérez, 2023/11/24
- [PATCH 9.0 09/13] vdpa: remove msg type of vhost_vdpa, Eugenio Pérez, 2023/11/24
- [PATCH 9.0 11/13] vdpa: use VhostVDPAShared in vdpa_dma_map and unmap, Eugenio Pérez, 2023/11/24