[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 20/51] hw/nvme: fix aio cancel in dsm
From: |
Juan Quintela |
Subject: |
[PATCH v2 20/51] hw/nvme: fix aio cancel in dsm |
Date: |
Mon, 5 Dec 2022 10:51:57 +0100 |
From: Klaus Jensen <k.jensen@samsung.com>
When the DSM operation is cancelled asynchronously, we set iocb->ret to
-ECANCELED. However, the callback function only checks the return value
of the completed aio, which may have completed succesfully prior to the
cancellation and thus the callback ends up continuing the dsm operation
instead of bailing out. Fix this.
Secondly, fix a potential use-after-free by removing the bottom half and
enqueuing the completion directly.
Fixes: d7d1474fd85d ("hw/nvme: reimplement dsm to allow cancellation")
Reviewed-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
hw/nvme/ctrl.c | 34 ++++++++--------------------------
1 file changed, 8 insertions(+), 26 deletions(-)
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index bf4abf73f7..e847b89461 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -2329,7 +2329,6 @@ typedef struct NvmeDSMAIOCB {
BlockAIOCB common;
BlockAIOCB *aiocb;
NvmeRequest *req;
- QEMUBH *bh;
int ret;
NvmeDsmRange *range;
@@ -2351,7 +2350,7 @@ static void nvme_dsm_cancel(BlockAIOCB *aiocb)
} else {
/*
* We only reach this if nvme_dsm_cancel() has already been called or
- * the command ran to completion and nvme_dsm_bh is scheduled to run.
+ * the command ran to completion.
*/
assert(iocb->idx == iocb->nr);
}
@@ -2362,17 +2361,6 @@ static const AIOCBInfo nvme_dsm_aiocb_info = {
.cancel_async = nvme_dsm_cancel,
};
-static void nvme_dsm_bh(void *opaque)
-{
- NvmeDSMAIOCB *iocb = opaque;
-
- iocb->common.cb(iocb->common.opaque, iocb->ret);
-
- qemu_bh_delete(iocb->bh);
- iocb->bh = NULL;
- qemu_aio_unref(iocb);
-}
-
static void nvme_dsm_cb(void *opaque, int ret);
static void nvme_dsm_md_cb(void *opaque, int ret)
@@ -2384,16 +2372,10 @@ static void nvme_dsm_md_cb(void *opaque, int ret)
uint64_t slba;
uint32_t nlb;
- if (ret < 0) {
- iocb->ret = ret;
+ if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
goto done;
}
- if (!ns->lbaf.ms) {
- nvme_dsm_cb(iocb, 0);
- return;
- }
-
range = &iocb->range[iocb->idx - 1];
slba = le64_to_cpu(range->slba);
nlb = le32_to_cpu(range->nlb);
@@ -2406,7 +2388,6 @@ static void nvme_dsm_md_cb(void *opaque, int ret)
ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_ZERO);
if (ret) {
if (ret < 0) {
- iocb->ret = ret;
goto done;
}
@@ -2420,8 +2401,7 @@ static void nvme_dsm_md_cb(void *opaque, int ret)
return;
done:
- iocb->aiocb = NULL;
- qemu_bh_schedule(iocb->bh);
+ nvme_dsm_cb(iocb, ret);
}
static void nvme_dsm_cb(void *opaque, int ret)
@@ -2434,7 +2414,9 @@ static void nvme_dsm_cb(void *opaque, int ret)
uint64_t slba;
uint32_t nlb;
- if (ret < 0) {
+ if (iocb->ret < 0) {
+ goto done;
+ } else if (ret < 0) {
iocb->ret = ret;
goto done;
}
@@ -2468,7 +2450,8 @@ next:
done:
iocb->aiocb = NULL;
- qemu_bh_schedule(iocb->bh);
+ iocb->common.cb(iocb->common.opaque, iocb->ret);
+ qemu_aio_unref(iocb);
}
static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
@@ -2486,7 +2469,6 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
nvme_misc_cb, req);
iocb->req = req;
- iocb->bh = qemu_bh_new(nvme_dsm_bh, iocb);
iocb->ret = 0;
iocb->range = g_new(NvmeDsmRange, nr);
iocb->nr = nr;
--
2.38.1
- [PATCH v2 10/51] target/arm: Set TCGCPUOps.restore_state_to_opc for v7m, (continued)
- [PATCH v2 10/51] target/arm: Set TCGCPUOps.restore_state_to_opc for v7m, Juan Quintela, 2022/12/05
- [PATCH v2 11/51] Update VERSION for v7.2.0-rc3, Juan Quintela, 2022/12/05
- [PATCH v2 12/51] tests/qtests: override "force-legacy" for gpio virtio-mmio tests, Juan Quintela, 2022/12/05
- [PATCH v2 13/51] vhost: enable vrings in vhost_dev_start() for vhost-user devices, Juan Quintela, 2022/12/05
- [PATCH v2 15/51] hw/virtio: generalise CHR_EVENT_CLOSED handling, Juan Quintela, 2022/12/05
- [PATCH v2 14/51] hw/virtio: add started_vu status field to vhost-user-gpio, Juan Quintela, 2022/12/05
- [PATCH v2 16/51] include/hw: VM state takes precedence in virtio_device_should_start, Juan Quintela, 2022/12/05
- [PATCH v2 17/51] hw/nvme: fix aio cancel in format, Juan Quintela, 2022/12/05
- [PATCH v2 18/51] hw/nvme: fix aio cancel in flush, Juan Quintela, 2022/12/05
- [PATCH v2 19/51] hw/nvme: fix aio cancel in zone reset, Juan Quintela, 2022/12/05
- [PATCH v2 20/51] hw/nvme: fix aio cancel in dsm,
Juan Quintela <=
- [PATCH v2 21/51] hw/nvme: remove copy bh scheduling, Juan Quintela, 2022/12/05
- [PATCH v2 23/51] target/i386: Always completely initialize TranslateFault, Juan Quintela, 2022/12/05
- [PATCH v2 22/51] target/i386: allow MMX instructions with CR4.OSFXSR=0, Juan Quintela, 2022/12/05
- [PATCH v2 24/51] hw/loongarch/virt: Add cfi01 pflash device, Juan Quintela, 2022/12/05
- [PATCH v2 25/51] tests/qtest/migration-test: Fix unlink error and memory leaks, Juan Quintela, 2022/12/05
- [PATCH v2 26/51] target/s390x/tcg: Fix and improve the SACF instruction, Juan Quintela, 2022/12/05
- [PATCH v2 27/51] hw/display/next-fb: Fix comment typo, Juan Quintela, 2022/12/05
- [PATCH v2 28/51] multifd: Create page_size fields into both MultiFD{Recv, Send}Params, Juan Quintela, 2022/12/05
- [PATCH v2 29/51] multifd: Create page_count fields into both MultiFD{Recv, Send}Params, Juan Quintela, 2022/12/05
- [PATCH v2 30/51] migration: Export ram_transferred_ram(), Juan Quintela, 2022/12/05