[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 10/16] util/vfio-helpers: Let qemu_vfio_dma_map() propagate Error
From: |
Philippe Mathieu-Daudé |
Subject: |
[PATCH 10/16] util/vfio-helpers: Let qemu_vfio_dma_map() propagate Error |
Date: |
Tue, 20 Oct 2020 19:24:22 +0200 |
Currently qemu_vfio_dma_map() displays errors on stderr.
When using management interface, this information is simply
lost. Pass qemu_vfio_dma_map() an Error* argument so it can
propagate the error to callers.
Reviewed-by: Fam Zheng <fam@euphon.net>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
include/qemu/vfio-helpers.h | 2 +-
block/nvme.c | 14 +++++++-------
util/vfio-helpers.c | 12 +++++++-----
3 files changed, 15 insertions(+), 13 deletions(-)
diff --git a/include/qemu/vfio-helpers.h b/include/qemu/vfio-helpers.h
index 5cb346d8e67..4b97a904e93 100644
--- a/include/qemu/vfio-helpers.h
+++ b/include/qemu/vfio-helpers.h
@@ -19,7 +19,7 @@ QEMUVFIOState *qemu_vfio_open_pci(const char *device, size_t
*min_page_size,
Error **errp);
void qemu_vfio_close(QEMUVFIOState *s);
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
- bool temporary, uint64_t *iova_list);
+ bool temporary, uint64_t *iova_list, Error **errp);
int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s);
void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host);
void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
diff --git a/block/nvme.c b/block/nvme.c
index 8335f5d70dd..428cda620df 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -167,9 +167,9 @@ static void nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
return;
}
memset(q->queue, 0, bytes);
- r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
+ r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp);
if (r) {
- error_setg(errp, "Cannot map queue");
+ error_prepend(errp, "Cannot map queue: ");
}
}
@@ -223,7 +223,7 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState
*s,
q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,
s->page_size * NVME_NUM_REQS,
- false, &prp_list_iova);
+ false, &prp_list_iova, errp);
if (r) {
goto fail;
}
@@ -514,9 +514,9 @@ static void nvme_identify(BlockDriverState *bs, int
namespace, Error **errp)
error_setg(errp, "Cannot allocate buffer for identify response");
goto out;
}
- r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova);
+ r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova, errp);
if (r) {
- error_setg(errp, "Cannot map buffer for DMA");
+ error_prepend(errp, "Cannot map buffer for DMA: ");
goto out;
}
@@ -990,7 +990,7 @@ try_map:
r = qemu_vfio_dma_map(s->vfio,
qiov->iov[i].iov_base,
qiov->iov[i].iov_len,
- true, &iova);
+ true, &iova, NULL);
if (r == -ENOMEM && retry) {
retry = false;
trace_nvme_dma_flush_queue_wait(s);
@@ -1437,7 +1437,7 @@ static void nvme_register_buf(BlockDriverState *bs, void
*host, size_t size)
int ret;
BDRVNVMeState *s = bs->opaque;
- ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL);
+ ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, NULL);
if (ret) {
/* FIXME: we may run out of IOVA addresses after repeated
* bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
index 6a5100f4892..8c075d9aae7 100644
--- a/util/vfio-helpers.c
+++ b/util/vfio-helpers.c
@@ -486,7 +486,7 @@ static void qemu_vfio_ram_block_added(RAMBlockNotifier *n,
{
QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier);
trace_qemu_vfio_ram_block_added(s, host, size);
- qemu_vfio_dma_map(s, host, size, false, NULL);
+ qemu_vfio_dma_map(s, host, size, false, NULL, NULL);
}
static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,
@@ -501,6 +501,7 @@ static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,
static int qemu_vfio_init_ramblock(RAMBlock *rb, void *opaque)
{
+ Error *local_err = NULL;
void *host_addr = qemu_ram_get_host_addr(rb);
ram_addr_t length = qemu_ram_get_used_length(rb);
int ret;
@@ -509,10 +510,11 @@ static int qemu_vfio_init_ramblock(RAMBlock *rb, void
*opaque)
if (!host_addr) {
return 0;
}
- ret = qemu_vfio_dma_map(s, host_addr, length, false, NULL);
+ ret = qemu_vfio_dma_map(s, host_addr, length, false, NULL, &local_err);
if (ret) {
- fprintf(stderr, "qemu_vfio_init_ramblock: failed %p %" PRId64 "\n",
- host_addr, (uint64_t)length);
+ error_reportf_err(local_err,
+ "qemu_vfio_init_ramblock: failed %p %" PRId64 ":",
+ host_addr, (uint64_t)length);
}
return 0;
}
@@ -754,7 +756,7 @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size,
uint64_t *iova)
* mapping status within this area is not allowed).
*/
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
- bool temporary, uint64_t *iova)
+ bool temporary, uint64_t *iova, Error **errp)
{
int ret = 0;
int index;
--
2.26.2
- [PATCH 05/16] util/vfio-helpers: Trace PCI I/O config accesses, (continued)
- [PATCH 05/16] util/vfio-helpers: Trace PCI I/O config accesses, Philippe Mathieu-Daudé, 2020/10/20
- [PATCH 06/16] util/vfio-helpers: Trace PCI BAR region info, Philippe Mathieu-Daudé, 2020/10/20
- [PATCH 07/16] util/vfio-helpers: Trace where BARs are mapped, Philippe Mathieu-Daudé, 2020/10/20
- [PATCH 08/16] util/vfio-helpers: Improve DMA trace events, Philippe Mathieu-Daudé, 2020/10/20
- [PATCH 09/16] util/vfio-helpers: Convert vfio_dump_mapping to trace events, Philippe Mathieu-Daudé, 2020/10/20
- [PATCH 10/16] util/vfio-helpers: Let qemu_vfio_dma_map() propagate Error,
Philippe Mathieu-Daudé <=
- [PATCH 11/16] util/vfio-helpers: Let qemu_vfio_do_mapping() propagate Error, Philippe Mathieu-Daudé, 2020/10/20
- [PATCH 12/16] util/vfio-helpers: Let qemu_vfio_verify_mappings() use error_report(), Philippe Mathieu-Daudé, 2020/10/20
- [PATCH 13/16] util/vfio-helpers: Introduce qemu_vfio_pci_msix_init_irqs(), Philippe Mathieu-Daudé, 2020/10/20
- [PATCH 15/16] block/nvme: Switch to using the MSIX API, Philippe Mathieu-Daudé, 2020/10/20
- [PATCH 14/16] util/vfio-helpers: Introduce qemu_vfio_pci_msix_set_irq(), Philippe Mathieu-Daudé, 2020/10/20