[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v9 2/2] vhost-vdpa: add support for vIOMMU
From: |
Michael S. Tsirkin |
Subject: |
Re: [PATCH v9 2/2] vhost-vdpa: add support for vIOMMU |
Date: |
Mon, 31 Oct 2022 09:07:25 -0400 |
On Mon, Oct 31, 2022 at 08:56:22PM +0800, Cindy Lu wrote:
> On Mon, 31 Oct 2022 at 16:30, Cindy Lu <lulu@redhat.com> wrote:
> >
> > On Mon, 31 Oct 2022 at 15:20, Michael S. Tsirkin <mst@redhat.com> wrote:
> > >
> > > On Mon, Oct 31, 2022 at 03:15:14PM +0800, Cindy Lu wrote:
> > > > ,
> > > >
> > > >
> > > > On Mon, 31 Oct 2022 at 15:04, Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > >
> > > > > On Mon, Oct 31, 2022 at 11:10:20AM +0800, Cindy Lu wrote:
> > > > > > Add support for vIOMMU. add the new function to deal with iommu MR.
> > > > > > - during iommu_region_add register a specific IOMMU notifier,
> > > > > > and store all notifiers in a list.
> > > > > > - during iommu_region_del, compare and delete the IOMMU notifier
> > > > > > from the list
> > > > > >
> > > > > > Verified in vp_vdpa and vdpa_sim_net driver
> > > > > >
> > > > > > Signed-off-by: Cindy Lu <lulu@redhat.com>
> > > > > > ---
> > > > > > hw/virtio/vhost-vdpa.c | 123
> > > > > > ++++++++++++++++++++++++++++++---
> > > > > > include/hw/virtio/vhost-vdpa.h | 10 +++
> > > > > > 2 files changed, 122 insertions(+), 11 deletions(-)
> > > > > >
> > > > > > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > > > > > index 3ff9ce3501..dcfaaccfa9 100644
> > > > > > --- a/hw/virtio/vhost-vdpa.c
> > > > > > +++ b/hw/virtio/vhost-vdpa.c
> > > > > > @@ -26,6 +26,7 @@
> > > > > > #include "cpu.h"
> > > > > > #include "trace.h"
> > > > > > #include "qapi/error.h"
> > > > > > +#include "hw/virtio/virtio-access.h"
> > > > > >
> > > > > > /*
> > > > > > * Return one past the end of the end of section. Be careful with
> > > > > > uint64_t
> > > > > > @@ -44,7 +45,6 @@ static bool
> > > > > > vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
> > > > > > uint64_t iova_min,
> > > > > > uint64_t iova_max)
> > > > > > {
> > > > > > - Int128 llend;
> > > > > >
> > > > > > if ((!memory_region_is_ram(section->mr) &&
> > > > > > !memory_region_is_iommu(section->mr)) ||
> > > > > > @@ -61,14 +61,6 @@ static bool
> > > > > > vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
> > > > > > return true;
> > > > > > }
> > > > > >
> > > > > > - llend = vhost_vdpa_section_end(section);
> > > > > > - if (int128_gt(llend, int128_make64(iova_max))) {
> > > > > > - error_report("RAM section out of device range (max=0x%"
> > > > > > PRIx64
> > > > > > - ", end addr=0x%" PRIx64 ")",
> > > > > > - iova_max, int128_get64(llend));
> > > > > > - return true;
> > > > > > - }
> > > > > > -
> > > > > > return false;
> > > > > > }
> > > > > >
> > > > >
> > > > > I couldn't figure out we are completely removing this.
> > > > > So this function is now checking iova_min but not iova_max?
> > > > > Seems asymmetrical.
> > > > >
> > > > this is because this is an asset for int128_get64,So I just not jump
> > > > this part of the check,
> > > > static inline uint64_t int128_get64(Int128 a)
> > > > {
> > > > uint64_t r = a;
> > > > assert(r == a);
> > > > return r;
> > > > }
> > >
> > >
> > > ?
> > >
> > > Could not parse this. You mean assert? And removing functionality
> > > because you don't like an error message does not make sense.
> > > So find another way to print it?
> > >
> > sorry for my mistake here
> > for this part, I remove this because it will report error in iommu mr added
> > Also there is no such check in vfio,
> > seems the llend is always small than iov_max in iommu domain,
> > not sure we can remove it first and I will add more comments later ?
> > Thanks
> > cindy
> sorry here I mean llend is larger than iov_max here, so the iommu mr
> can not pass the
> check, not sure if we can remove this check first?
> Thanks
> Cindy
ys split it out with proper documentation first.
> > >
> > > >
> > > > >
> > > > > > @@ -173,6 +165,106 @@ static void
> > > > > > vhost_vdpa_listener_commit(MemoryListener *listener)
> > > > > > v->iotlb_batch_begin_sent = false;
> > > > > > }
> > > > > >
> > > > > > +static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n,
> > > > > > IOMMUTLBEntry *iotlb)
> > > > > > +{
> > > > > > + struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu,
> > > > > > n);
> > > > > > +
> > > > > > + hwaddr iova = iotlb->iova + iommu->iommu_offset;
> > > > > > + struct vhost_vdpa *v = iommu->dev;
> > > > > > + void *vaddr;
> > > > > > + int ret;
> > > > > > +
> > > > > > + if (iotlb->target_as != &address_space_memory) {
> > > > > > + error_report("Wrong target AS \"%s\", only system memory
> > > > > > is allowed",
> > > > > > + iotlb->target_as->name ?
> > > > > > iotlb->target_as->name : "none");
> > > > > > + return;
> > > > > > + }
> > > > > > + RCU_READ_LOCK_GUARD();
> > > > > > + vhost_vdpa_iotlb_batch_begin_once(v);
> > > > > > +
> > > > > > + if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
> > > > > > + bool read_only;
> > > > > > +
> > > > > > + if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only,
> > > > > > NULL)) {
> > > > > > + return;
> > > > > > + }
> > > > > > + ret =
> > > > > > + vhost_vdpa_dma_map(v, iova, iotlb->addr_mask + 1,
> > > > > > vaddr, read_only);
> > > > > > + if (ret) {
> > > > > > + error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx
> > > > > > ", "
> > > > > > + "0x%" HWADDR_PRIx ", %p) = %d (%m)",
> > > > > > + v, iova, iotlb->addr_mask + 1, vaddr,
> > > > > > ret);
> > > > > > + }
> > > > > > + } else {
> > > > > > + ret = vhost_vdpa_dma_unmap(v, iova, iotlb->addr_mask + 1);
> > > > > > + if (ret) {
> > > > > > + error_report("vhost_vdpa_dma_unmap(%p, 0x%"
> > > > > > HWADDR_PRIx ", "
> > > > > > + "0x%" HWADDR_PRIx ") = %d (%m)",
> > > > > > + v, iova, iotlb->addr_mask + 1, ret);
> > > > > > + }
> > > > > > + }
> > > > > > +}
> > > > > > +
> > > > > > +static void vhost_vdpa_iommu_region_add(MemoryListener *listener,
> > > > > > + MemoryRegionSection
> > > > > > *section)
> > > > > > +{
> > > > > > + struct vhost_vdpa *v = container_of(listener, struct
> > > > > > vhost_vdpa, listener);
> > > > > > +
> > > > > > + struct vdpa_iommu *iommu;
> > > > > > + Int128 end;
> > > > > > + int iommu_idx;
> > > > > > + IOMMUMemoryRegion *iommu_mr;
> > > > > > + int ret;
> > > > > > +
> > > > > > + iommu_mr = IOMMU_MEMORY_REGION(section->mr);
> > > > > > +
> > > > > > + iommu = g_malloc0(sizeof(*iommu));
> > > > > > + end = int128_add(int128_make64(section->offset_within_region),
> > > > > > + section->size);
> > > > > > + end = int128_sub(end, int128_one());
> > > > > > + iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
> > > > > > + MEMTXATTRS_UNSPECIFIED);
> > > > > > +
> > > > > > + iommu->iommu_mr = iommu_mr;
> > > > > > +
> > > > > > + iommu_notifier_init(
> > > > > > + &iommu->n, vhost_vdpa_iommu_map_notify,
> > > > > > IOMMU_NOTIFIER_IOTLB_EVENTS,
> > > > > > + section->offset_within_region, int128_get64(end),
> > > > > > iommu_idx);
> > > > > > + iommu->iommu_offset =
> > > > > > + section->offset_within_address_space -
> > > > > > section->offset_within_region;
> > > > > > + iommu->dev = v;
> > > > > > +
> > > > > > + ret = memory_region_register_iommu_notifier(section->mr,
> > > > > > &iommu->n, NULL);
> > > > > > + if (ret) {
> > > > > > + g_free(iommu);
> > > > > > + return;
> > > > > > + }
> > > > > > +
> > > > > > + QLIST_INSERT_HEAD(&v->iommu_list, iommu, iommu_next);
> > > > > > + memory_region_iommu_replay(iommu->iommu_mr, &iommu->n);
> > > > > > +
> > > > > > + return;
> > > > > > +}
> > > > > > +
> > > > > > +static void vhost_vdpa_iommu_region_del(MemoryListener *listener,
> > > > > > + MemoryRegionSection
> > > > > > *section)
> > > > > > +{
> > > > > > + struct vhost_vdpa *v = container_of(listener, struct
> > > > > > vhost_vdpa, listener);
> > > > > > +
> > > > > > + struct vdpa_iommu *iommu;
> > > > > > +
> > > > > > + QLIST_FOREACH(iommu, &v->iommu_list, iommu_next)
> > > > > > + {
> > > > > > + if (MEMORY_REGION(iommu->iommu_mr) == section->mr &&
> > > > > > + iommu->n.start == section->offset_within_region) {
> > > > > > + memory_region_unregister_iommu_notifier(section->mr,
> > > > > > &iommu->n);
> > > > > > + QLIST_REMOVE(iommu, iommu_next);
> > > > > > + g_free(iommu);
> > > > > > + break;
> > > > > > + }
> > > > > > + }
> > > > > > +}
> > > > > > +
> > > > > > static void vhost_vdpa_listener_region_add(MemoryListener
> > > > > > *listener,
> > > > > > MemoryRegionSection
> > > > > > *section)
> > > > > > {
> > > > > > @@ -186,6 +278,10 @@ static void
> > > > > > vhost_vdpa_listener_region_add(MemoryListener *listener,
> > > > > > v->iova_range.last)) {
> > > > > > return;
> > > > > > }
> > > > > > + if (memory_region_is_iommu(section->mr)) {
> > > > > > + vhost_vdpa_iommu_region_add(listener, section);
> > > > > > + return;
> > > > > > + }
> > > > > >
> > > > > > if (unlikely((section->offset_within_address_space &
> > > > > > ~TARGET_PAGE_MASK) !=
> > > > > > (section->offset_within_region &
> > > > > > ~TARGET_PAGE_MASK))) {
> > > > > > @@ -260,6 +356,10 @@ static void
> > > > > > vhost_vdpa_listener_region_del(MemoryListener *listener,
> > > > > > v->iova_range.last)) {
> > > > > > return;
> > > > > > }
> > > > > > + if (memory_region_is_iommu(section->mr)) {
> > > > > > + vhost_vdpa_iommu_region_del(listener, section);
> > > > > > + return;
> > > > > > + }
> > > > > >
> > > > > > if (unlikely((section->offset_within_address_space &
> > > > > > ~TARGET_PAGE_MASK) !=
> > > > > > (section->offset_within_region &
> > > > > > ~TARGET_PAGE_MASK))) {
> > > > > > @@ -312,6 +412,7 @@ static const MemoryListener
> > > > > > vhost_vdpa_memory_listener = {
> > > > > > .region_del = vhost_vdpa_listener_region_del,
> > > > > > };
> > > > > >
> > > > > > +
> > > > > > static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long
> > > > > > int request,
> > > > > > void *arg)
> > > > > > {
> > > > >
> > > > >
> > > > > This change is not necessary.
> > > > >
> > > > will fix this
> > > > > > @@ -587,7 +688,6 @@ static int vhost_vdpa_cleanup(struct vhost_dev
> > > > > > *dev)
> > > > > > v = dev->opaque;
> > > > > > trace_vhost_vdpa_cleanup(dev, v);
> > > > > > vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
> > > > > > - memory_listener_unregister(&v->listener);
> > > > > > vhost_vdpa_svq_cleanup(dev);
> > > > > >
> > > > > > dev->opaque = NULL;
> > > > > > @@ -1127,7 +1227,8 @@ static int vhost_vdpa_dev_start(struct
> > > > > > vhost_dev *dev, bool started)
> > > > > > }
> > > > > >
> > > > > > if (started) {
> > > > > > - memory_listener_register(&v->listener,
> > > > > > &address_space_memory);
> > > > > > + memory_listener_register(&v->listener, dev->vdev->dma_as);
> > > > > > +
> > > > > > return vhost_vdpa_add_status(dev,
> > > > > > VIRTIO_CONFIG_S_DRIVER_OK);
> > > > > > } else {
> > > > > > vhost_vdpa_reset_device(dev);
> > > > > > diff --git a/include/hw/virtio/vhost-vdpa.h
> > > > > > b/include/hw/virtio/vhost-vdpa.h
> > > > > > index d10a89303e..64a46e37cb 100644
> > > > > > --- a/include/hw/virtio/vhost-vdpa.h
> > > > > > +++ b/include/hw/virtio/vhost-vdpa.h
> > > > > > @@ -41,8 +41,18 @@ typedef struct vhost_vdpa {
> > > > > > void *shadow_vq_ops_opaque;
> > > > > > struct vhost_dev *dev;
> > > > > > VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
> > > > > > + QLIST_HEAD(, vdpa_iommu) iommu_list;
> > > > > > + IOMMUNotifier n;
> > > > > > } VhostVDPA;
> > > > > >
> > > > > > +struct vdpa_iommu {
> > > > > > + struct vhost_vdpa *dev;
> > > > > > + IOMMUMemoryRegion *iommu_mr;
> > > > > > + hwaddr iommu_offset;
> > > > > > + IOMMUNotifier n;
> > > > > > + QLIST_ENTRY(vdpa_iommu) iommu_next;
> > > > > > +};
> > > > > > +
> > > > >
> > > > > You need to add a typedef as per coding style.
> > > > >
> > > > will fix this
> > > > Thanks
> > > > Cindy
> > > > > > int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr
> > > > > > size,
> > > > > > void *vaddr, bool readonly);
> > > > > > int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr
> > > > > > size);
> > > > > > --
> > > > > > 2.34.3
> > > > >
> > >
- Re: [PATCH v9 1/2] vfio: move implement of vfio_get_xlat_addr() to memory.c, (continued)
[PATCH v9 2/2] vhost-vdpa: add support for vIOMMU, Cindy Lu, 2022/10/30