[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 1/7] memory: associate DMA accesses with the initiator Device
From: |
Alexander Bulekov |
Subject: |
[PATCH v3 1/7] memory: associate DMA accesses with the initiator Device |
Date: |
Fri, 28 Oct 2022 15:16:42 -0400 |
Add transitionary DMA APIs which associate accesses with the device
initiating them. The modified APIs maintain a "MemReentrancyGuard" in
the DeviceState, which is used to prevent DMA re-entrancy issues.
The MemReentrancyGuard is set/checked when entering IO handlers and when
initiating a DMA access.
1.) mmio -> dma -> mmio case
2.) bh -> dma write -> mmio case
These issues have led to problems such as stack-exhaustion and
use-after-frees.
Summary of the problem from Peter Maydell:
https://lore.kernel.org/qemu-devel/CAFEAcA_23vc7hE3iaM-JVA6W38LK4hJoWae5KcknhPRD5fPBZA@mail.gmail.com
Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
---
include/hw/qdev-core.h | 2 ++
include/sysemu/dma.h | 41 +++++++++++++++++++++++++++++++++++++++++
softmmu/memory.c | 15 +++++++++++++++
softmmu/trace-events | 1 +
4 files changed, 59 insertions(+)
diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h
index 785dd5a56e..ab78d211af 100644
--- a/include/hw/qdev-core.h
+++ b/include/hw/qdev-core.h
@@ -8,6 +8,7 @@
#include "qom/object.h"
#include "hw/hotplug.h"
#include "hw/resettable.h"
+#include "sysemu/dma.h"
enum {
DEV_NVECTORS_UNSPECIFIED = -1,
@@ -194,6 +195,7 @@ struct DeviceState {
int alias_required_for_version;
ResettableState reset;
GSList *unplug_blockers;
+ MemReentrancyGuard mem_reentrancy_guard;
};
struct DeviceListener {
diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
index a1ac5bc1b5..879b666bbb 100644
--- a/include/sysemu/dma.h
+++ b/include/sysemu/dma.h
@@ -15,6 +15,10 @@
#include "block/block.h"
#include "block/accounting.h"
+typedef struct {
+ bool engaged_in_io;
+} MemReentrancyGuard;
+
typedef enum {
DMA_DIRECTION_TO_DEVICE = 0,
DMA_DIRECTION_FROM_DEVICE = 1,
@@ -321,4 +325,41 @@ void dma_acct_start(BlockBackend *blk, BlockAcctCookie
*cookie,
uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end,
int max_addr_bits);
+#define REENTRANCY_GUARD(func, ret_type, dev, ...) \
+ ({\
+ ret_type retval;\
+ MemReentrancyGuard prior_guard_state = dev->mem_reentrancy_guard;\
+ dev->mem_reentrancy_guard.engaged_in_io = 1;\
+ retval = func(__VA_ARGS__);\
+ dev->mem_reentrancy_guard = prior_guard_state;\
+ retval;\
+ })
+#define REENTRANCY_GUARD_NORET(func, dev, ...) \
+ ({\
+ MemReentrancyGuard prior_guard_state = dev->mem_reentrancy_guard;\
+ dev->mem_reentrancy_guard.engaged_in_io = 1;\
+ func(__VA_ARGS__);\
+ dev->mem_reentrancy_guard = prior_guard_state;\
+ })
+#define dma_memory_rw_guarded(dev, ...) \
+ REENTRANCY_GUARD(dma_memory_rw, MemTxResult, dev, __VA_ARGS__)
+#define dma_memory_read_guarded(dev, ...) \
+ REENTRANCY_GUARD(dma_memory_read, MemTxResult, dev, __VA_ARGS__)
+#define dma_memory_write_guarded(dev, ...) \
+ REENTRANCY_GUARD(dma_memory_write, MemTxResult, dev, __VA_ARGS__)
+#define dma_memory_set_guarded(dev, ...) \
+ REENTRANCY_GUARD(dma_memory_set, MemTxResult, dev, __VA_ARGS__)
+#define dma_memory_map_guarded(dev, ...) \
+ REENTRANCY_GUARD(dma_memory_map, void*, dev, __VA_ARGS__)
+#define dma_memory_unmap_guarded(dev, ...) \
+ REENTRANCY_GUARD_NORET(dma_memory_unmap, dev, __VA_ARGS__)
+#define ldub_dma_guarded(dev, ...) \
+ REENTRANCY_GUARD(ldub_dma, MemTxResult, dev, __VA_ARGS__)
+#define stb_dma_guarded(dev, ...) \
+ REENTRANCY_GUARD(stb_dma, MemTxResult, dev, __VA_ARGS__)
+#define dma_buf_read_guarded(dev, ...) \
+ REENTRANCY_GUARD(dma_buf_read, MemTxResult, dev, __VA_ARGS__)
+#define dma_buf_write_guarded(dev, ...) \
+ REENTRANCY_GUARD(dma_buf_read, MemTxResult, dev, __VA_ARGS__)
+
#endif
diff --git a/softmmu/memory.c b/softmmu/memory.c
index 7ba2048836..c44dc75149 100644
--- a/softmmu/memory.c
+++ b/softmmu/memory.c
@@ -532,6 +532,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
uint64_t access_mask;
unsigned access_size;
unsigned i;
+ DeviceState *dev = NULL;
MemTxResult r = MEMTX_OK;
if (!access_size_min) {
@@ -541,6 +542,17 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
access_size_max = 4;
}
+ /* Do not allow more than one simultanous access to a device's IO Regions
*/
+ if (mr->owner &&
+ !mr->ram_device && !mr->ram && !mr->rom_device && !mr->readonly) {
+ dev = (DeviceState *) object_dynamic_cast(mr->owner, TYPE_DEVICE);
+ if (dev->mem_reentrancy_guard.engaged_in_io) {
+ trace_memory_region_reentrant_io(get_cpu_index(), mr, addr, size);
+ return MEMTX_ERROR;
+ }
+ dev->mem_reentrancy_guard.engaged_in_io = true;
+ }
+
/* FIXME: support unaligned access? */
access_size = MAX(MIN(size, access_size_max), access_size_min);
access_mask = MAKE_64BIT_MASK(0, access_size * 8);
@@ -555,6 +567,9 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
access_mask, attrs);
}
}
+ if (dev) {
+ dev->mem_reentrancy_guard.engaged_in_io = false;
+ }
return r;
}
diff --git a/softmmu/trace-events b/softmmu/trace-events
index 22606dc27b..62d04ea9a7 100644
--- a/softmmu/trace-events
+++ b/softmmu/trace-events
@@ -13,6 +13,7 @@ memory_region_ops_read(int cpu_index, void *mr, uint64_t
addr, uint64_t value, u
memory_region_ops_write(int cpu_index, void *mr, uint64_t addr, uint64_t
value, unsigned size, const char *name) "cpu %d mr %p addr 0x%"PRIx64" value
0x%"PRIx64" size %u name '%s'"
memory_region_subpage_read(int cpu_index, void *mr, uint64_t offset, uint64_t
value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size
%u"
memory_region_subpage_write(int cpu_index, void *mr, uint64_t offset, uint64_t
value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size
%u"
+memory_region_reentrant_io(int cpu_index, void *mr, uint64_t offset, unsigned
size) "cpu %d mr %p offset 0x%"PRIx64" size %u"
memory_region_ram_device_read(int cpu_index, void *mr, uint64_t addr, uint64_t
value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
memory_region_ram_device_write(int cpu_index, void *mr, uint64_t addr,
uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64"
size %u"
memory_region_sync_dirty(const char *mr, const char *listener, int global) "mr
'%s' listener '%s' synced (global=%d)"
--
2.27.0
- [PATCH v3 0/7] memory: prevent dma-reentracy issues, Alexander Bulekov, 2022/10/28
- [PATCH v3 1/7] memory: associate DMA accesses with the initiator Device,
Alexander Bulekov <=
- [PATCH v3 2/7] dma-helpers: switch to guarded DMA accesses, Alexander Bulekov, 2022/10/28
- [PATCH v3 6/7] xhci: switch to guarded DMA accesses, Alexander Bulekov, 2022/10/28
- [PATCH v3 3/7] ahci: switch to guarded DMA acccesses, Alexander Bulekov, 2022/10/28
- [PATCH v3 4/7] sdhci: switch to guarded DMA accesses, Alexander Bulekov, 2022/10/28
- [PATCH v3 7/7] usb/libhw: switch to guarded DMA accesses, Alexander Bulekov, 2022/10/28
- [PATCH v3 5/7] ehci: switch to guarded DMA accesses, Alexander Bulekov, 2022/10/28