[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 15/68] libvhost-user: Merge vu_set_mem_table_exec_postcopy() into
From: |
Michael S. Tsirkin |
Subject: |
[PULL 15/68] libvhost-user: Merge vu_set_mem_table_exec_postcopy() into vu_set_mem_table_exec() |
Date: |
Tue, 12 Mar 2024 18:26:14 -0400 |
From: David Hildenbrand <david@redhat.com>
Let's reduce some code duplication and prepare for further changes.
Reviewed-by: Raphael Norwitz <raphael@enfabrica.net>
Acked-by: Stefano Garzarella <sgarzare@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20240214151701.29906-5-david@redhat.com>
Tested-by: Mario Casquero <mcasquer@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
subprojects/libvhost-user/libvhost-user.c | 119 +++++++---------------
1 file changed, 39 insertions(+), 80 deletions(-)
diff --git a/subprojects/libvhost-user/libvhost-user.c
b/subprojects/libvhost-user/libvhost-user.c
index e4907dfc26..a7bd7de3cd 100644
--- a/subprojects/libvhost-user/libvhost-user.c
+++ b/subprojects/libvhost-user/libvhost-user.c
@@ -937,95 +937,23 @@ vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg)
}
static bool
-vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
+vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
{
- unsigned int i;
VhostUserMemory m = vmsg->payload.memory, *memory = &m;
- dev->nregions = memory->nregions;
+ int prot = PROT_READ | PROT_WRITE;
+ unsigned int i;
- DPRINT("Nregions: %u\n", memory->nregions);
- for (i = 0; i < dev->nregions; i++) {
- void *mmap_addr;
- VhostUserMemoryRegion *msg_region = &memory->regions[i];
- VuDevRegion *dev_region = &dev->regions[i];
-
- DPRINT("Region %d\n", i);
- DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
- msg_region->guest_phys_addr);
- DPRINT(" memory_size: 0x%016"PRIx64"\n",
- msg_region->memory_size);
- DPRINT(" userspace_addr 0x%016"PRIx64"\n",
- msg_region->userspace_addr);
- DPRINT(" mmap_offset 0x%016"PRIx64"\n",
- msg_region->mmap_offset);
-
- dev_region->gpa = msg_region->guest_phys_addr;
- dev_region->size = msg_region->memory_size;
- dev_region->qva = msg_region->userspace_addr;
- dev_region->mmap_offset = msg_region->mmap_offset;
-
- /* We don't use offset argument of mmap() since the
- * mapped address has to be page aligned, and we use huge
- * pages.
+ if (dev->postcopy_listening) {
+ /*
* In postcopy we're using PROT_NONE here to catch anyone
* accessing it before we userfault
*/
- mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
- PROT_NONE, MAP_SHARED | MAP_NORESERVE,
- vmsg->fds[i], 0);
-
- if (mmap_addr == MAP_FAILED) {
- vu_panic(dev, "region mmap error: %s", strerror(errno));
- } else {
- dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
- DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
- dev_region->mmap_addr);
- }
-
- /* Return the address to QEMU so that it can translate the ufd
- * fault addresses back.
- */
- msg_region->userspace_addr = (uintptr_t)(mmap_addr +
- dev_region->mmap_offset);
- close(vmsg->fds[i]);
+ prot = PROT_NONE;
}
- /* Send the message back to qemu with the addresses filled in */
- vmsg->fd_num = 0;
- if (!vu_send_reply(dev, dev->sock, vmsg)) {
- vu_panic(dev, "failed to respond to set-mem-table for postcopy");
- return false;
- }
-
- /* Wait for QEMU to confirm that it's registered the handler for the
- * faults.
- */
- if (!dev->read_msg(dev, dev->sock, vmsg) ||
- vmsg->size != sizeof(vmsg->payload.u64) ||
- vmsg->payload.u64 != 0) {
- vu_panic(dev, "failed to receive valid ack for postcopy
set-mem-table");
- return false;
- }
-
- /* OK, now we can go and register the memory and generate faults */
- (void)generate_faults(dev);
-
- return false;
-}
-
-static bool
-vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
-{
- unsigned int i;
- VhostUserMemory m = vmsg->payload.memory, *memory = &m;
-
vu_remove_all_mem_regs(dev);
dev->nregions = memory->nregions;
- if (dev->postcopy_listening) {
- return vu_set_mem_table_exec_postcopy(dev, vmsg);
- }
-
DPRINT("Nregions: %u\n", memory->nregions);
for (i = 0; i < dev->nregions; i++) {
void *mmap_addr;
@@ -1051,8 +979,7 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
* mapped address has to be page aligned, and we use huge
* pages. */
mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE,
- vmsg->fds[i], 0);
+ prot, MAP_SHARED | MAP_NORESERVE, vmsg->fds[i], 0);
if (mmap_addr == MAP_FAILED) {
vu_panic(dev, "region mmap error: %s", strerror(errno));
@@ -1062,9 +989,41 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
dev_region->mmap_addr);
}
+ if (dev->postcopy_listening) {
+ /*
+ * Return the address to QEMU so that it can translate the ufd
+ * fault addresses back.
+ */
+ msg_region->userspace_addr = (uintptr_t)(mmap_addr +
+ dev_region->mmap_offset);
+ }
close(vmsg->fds[i]);
}
+ if (dev->postcopy_listening) {
+ /* Send the message back to qemu with the addresses filled in */
+ vmsg->fd_num = 0;
+ if (!vu_send_reply(dev, dev->sock, vmsg)) {
+ vu_panic(dev, "failed to respond to set-mem-table for postcopy");
+ return false;
+ }
+
+ /*
+ * Wait for QEMU to confirm that it's registered the handler for the
+ * faults.
+ */
+ if (!dev->read_msg(dev, dev->sock, vmsg) ||
+ vmsg->size != sizeof(vmsg->payload.u64) ||
+ vmsg->payload.u64 != 0) {
+ vu_panic(dev, "failed to receive valid ack for postcopy
set-mem-table");
+ return false;
+ }
+
+ /* OK, now we can go and register the memory and generate faults */
+ (void)generate_faults(dev);
+ return false;
+ }
+
for (i = 0; i < dev->max_queues; i++) {
if (dev->vq[i].vring.desc) {
if (map_ring(dev, &dev->vq[i])) {
--
MST
- [PULL 05/68] vdpa: add vhost_vdpa_get_vring_base trace for svq mode, (continued)
- [PULL 05/68] vdpa: add vhost_vdpa_get_vring_base trace for svq mode, Michael S. Tsirkin, 2024/03/12
- [PULL 09/68] vdpa: define SVQ transitioning state for mode switching, Michael S. Tsirkin, 2024/03/12
- [PULL 10/68] vdpa: indicate transitional state for SVQ switching, Michael S. Tsirkin, 2024/03/12
- [PULL 06/68] vdpa: add vhost_vdpa_set_dev_vring_base trace for svq mode, Michael S. Tsirkin, 2024/03/12
- [PULL 07/68] vdpa: add trace events for vhost_vdpa_net_load_cmd, Michael S. Tsirkin, 2024/03/12
- [PULL 12/68] libvhost-user: Dynamically allocate memory for memory slots, Michael S. Tsirkin, 2024/03/12
- [PULL 01/68] vdpa: add back vhost_vdpa_net_first_nc_vdpa, Michael S. Tsirkin, 2024/03/12
- [PULL 11/68] vdpa: fix network breakage after cancelling migration, Michael S. Tsirkin, 2024/03/12
- [PULL 13/68] libvhost-user: Bump up VHOST_USER_MAX_RAM_SLOTS to 509, Michael S. Tsirkin, 2024/03/12
- [PULL 14/68] libvhost-user: Factor out removing all mem regions, Michael S. Tsirkin, 2024/03/12
- [PULL 15/68] libvhost-user: Merge vu_set_mem_table_exec_postcopy() into vu_set_mem_table_exec(),
Michael S. Tsirkin <=
- [PULL 16/68] libvhost-user: Factor out adding a memory region, Michael S. Tsirkin, 2024/03/12
- [PULL 17/68] libvhost-user: No need to check for NULL when unmapping, Michael S. Tsirkin, 2024/03/12
- [PULL 18/68] libvhost-user: Don't zero out memory for memory regions, Michael S. Tsirkin, 2024/03/12
- [PULL 19/68] libvhost-user: Don't search for duplicates when removing memory regions, Michael S. Tsirkin, 2024/03/12
- [PULL 23/68] libvhost-user: Factor out vq usability check, Michael S. Tsirkin, 2024/03/12
- [PULL 26/68] pcie: Support PCIe Gen5/Gen6 link speeds, Michael S. Tsirkin, 2024/03/12
- [PULL 21/68] libvhost-user: Speedup gpa_to_mem_region() and vu_gpa_to_va(), Michael S. Tsirkin, 2024/03/12
- [PULL 22/68] libvhost-user: Use most of mmap_offset as fd_offset, Michael S. Tsirkin, 2024/03/12
- [PULL 27/68] vdpa: stash memory region properties in vars, Michael S. Tsirkin, 2024/03/12
- [PULL 20/68] libvhost-user: Factor out search for memory region by GPA and simplify, Michael S. Tsirkin, 2024/03/12