[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC PATCH 5/7] virtio-fs: Fill in slave commands for mappi
From: |
Dr. David Alan Gilbert (git) |
Subject: |
[Qemu-devel] [RFC PATCH 5/7] virtio-fs: Fill in slave commands for mapping |
Date: |
Mon, 10 Dec 2018 17:31:49 +0000 |
From: "Dr. David Alan Gilbert" <address@hidden>
Fill in definitions for map, unmap and sync commands.
Signed-off-by: Dr. David Alan Gilbert <address@hidden>
---
hw/virtio/vhost-user-fs.c | 129 ++++++++++++++++++++++++++++++++++++--
1 file changed, 123 insertions(+), 6 deletions(-)
diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
index da70d9cd2c..bbb15477e5 100644
--- a/hw/virtio/vhost-user-fs.c
+++ b/hw/virtio/vhost-user-fs.c
@@ -24,20 +24,137 @@
int vhost_user_fs_slave_map(struct vhost_dev *dev, VhostUserFSSlaveMsg *sm,
int fd)
{
- /* TODO */
- return -1;
+ VHostUserFS *fs = VHOST_USER_FS(dev->vdev);
+ size_t cache_size = fs->conf.cache_size;
+ void *cache_host = memory_region_get_ram_ptr(&fs->cache);
+
+ unsigned int i;
+ int res = 0;
+
+ if (fd < 0) {
+ fprintf(stderr, "%s: Bad fd for map\n", __func__);
+ return -1;
+ }
+
+ for (i = 0; i < VHOST_USER_FS_SLAVE_ENTRIES; i++) {
+ if (sm->len[i] == 0) {
+ continue;
+ }
+
+ if ((sm->c_offset[i] + sm->len[i]) < sm->len[i] ||
+ (sm->c_offset[i] + sm->len[i]) > cache_size) {
+ fprintf(stderr, "%s: Bad offset/len for map [%d] %"
+ PRIx64 "+%" PRIx64 "\n", __func__,
+ i, sm->c_offset[i], sm->len[i]);
+ res = -1;
+ break;
+ }
+
+ if (mmap(cache_host + sm->c_offset[i], sm->len[i],
+ ((sm->flags[i] & VHOST_USER_FS_FLAG_MAP_R) ? PROT_READ : 0) |
+ ((sm->flags[i] & VHOST_USER_FS_FLAG_MAP_W) ? PROT_WRITE : 0),
+ MAP_SHARED | MAP_FIXED,
+ fd, sm->fd_offset[i]) != (cache_host + sm->c_offset[i])) {
+ fprintf(stderr, "%s: map failed err %d [%d] %"
+ PRIx64 "+%" PRIx64 " from %" PRIx64 "\n", __func__,
+ errno, i, sm->c_offset[i], sm->len[i],
+ sm->fd_offset[i]);
+ res = -1;
+ break;
+ }
+ }
+
+ if (res) {
+ /* Something went wrong, unmap them all */
+ vhost_user_fs_slave_unmap(dev, sm);
+ }
+ return res;
}
int vhost_user_fs_slave_unmap(struct vhost_dev *dev, VhostUserFSSlaveMsg *sm)
{
- /* TODO */
- return -1;
+ VHostUserFS *fs = VHOST_USER_FS(dev->vdev);
+ size_t cache_size = fs->conf.cache_size;
+ void *cache_host = memory_region_get_ram_ptr(&fs->cache);
+
+ unsigned int i;
+ int res = 0;
+
+ /* Note even if one unmap fails we try the rest, since the effect
+ * is to clean up as much as possible.
+ */
+ for (i = 0; i < VHOST_USER_FS_SLAVE_ENTRIES; i++) {
+ void *ptr;
+ if (sm->len[i] == 0) {
+ continue;
+ }
+
+ if (sm->len[i] == ~(uint64_t)0) {
+ /* Special case meaning the whole arena */
+ sm->len[i] = cache_size;
+ }
+
+ if ((sm->c_offset[i] + sm->len[i]) < sm->len[i] ||
+ (sm->c_offset[i] + sm->len[i]) > cache_size) {
+ fprintf(stderr, "%s: Bad offset/len for unmap [%d] %"
+ PRIx64 "+%" PRIx64 "\n", __func__,
+ i, sm->c_offset[i], sm->len[i]);
+ res = -1;
+ continue;
+ }
+
+ ptr = mmap(cache_host + sm->c_offset[i], sm->len[i],
+ PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ if (ptr != (cache_host + sm->c_offset[i])) {
+ fprintf(stderr, "%s: mmap failed (%s) [%d] %"
+ PRIx64 "+%" PRIx64 " from %" PRIx64 " res: %p\n",
+ __func__,
+ strerror(errno),
+ i, sm->c_offset[i], sm->len[i],
+ sm->fd_offset[i], ptr);
+ res = -1;
+ }
+ }
+
+ return res;
}
int vhost_user_fs_slave_sync(struct vhost_dev *dev, VhostUserFSSlaveMsg *sm)
{
- /* TODO */
- return -1;
+ VHostUserFS *fs = VHOST_USER_FS(dev->vdev);
+ size_t cache_size = fs->conf.cache_size;
+ void *cache_host = memory_region_get_ram_ptr(&fs->cache);
+
+ unsigned int i;
+ int res = 0;
+
+ /* Note even if one sync fails we try the rest */
+ for (i = 0; i < VHOST_USER_FS_SLAVE_ENTRIES; i++) {
+ if (sm->len[i] == 0) {
+ continue;
+ }
+
+ if ((sm->c_offset[i] + sm->len[i]) < sm->len[i] ||
+ (sm->c_offset[i] + sm->len[i]) > cache_size) {
+ fprintf(stderr, "%s: Bad offset/len for sync [%d] %"
+ PRIx64 "+%" PRIx64 "\n", __func__,
+ i, sm->c_offset[i], sm->len[i]);
+ res = -1;
+ continue;
+ }
+
+ if (msync(cache_host + sm->c_offset[i], sm->len[i],
+ MS_SYNC /* ?? */)) {
+ fprintf(stderr, "%s: msync failed (%s) [%d] %"
+ PRIx64 "+%" PRIx64 " from %" PRIx64 "\n", __func__,
+ strerror(errno),
+ i, sm->c_offset[i], sm->len[i],
+ sm->fd_offset[i]);
+ res = -1;
+ }
+ }
+
+ return res;
}
--
2.19.2
- [Qemu-devel] [RFC PATCH 0/7] virtio-fs: shared file system for virtual machines3, Dr. David Alan Gilbert (git), 2018/12/10
- [Qemu-devel] [RFC PATCH 1/7] virtio: Add shared memory capability, Dr. David Alan Gilbert (git), 2018/12/10
- [Qemu-devel] [RFC PATCH 3/7] virtio-fs: Add cache BAR, Dr. David Alan Gilbert (git), 2018/12/10
- [Qemu-devel] [RFC PATCH 2/7] virtio: add vhost-user-fs-pci device, Dr. David Alan Gilbert (git), 2018/12/10
- [Qemu-devel] [RFC PATCH 5/7] virtio-fs: Fill in slave commands for mapping,
Dr. David Alan Gilbert (git) <=
- [Qemu-devel] [RFC PATCH 7/7] virtio-fs: Allow mapping of journal, Dr. David Alan Gilbert (git), 2018/12/10
- [Qemu-devel] [RFC PATCH 4/7] virtio-fs: Add vhost-user slave commands for mapping, Dr. David Alan Gilbert (git), 2018/12/10
- [Qemu-devel] [RFC PATCH 6/7] virtio-fs: Allow mapping of meta data version table, Dr. David Alan Gilbert (git), 2018/12/10
- Re: [Qemu-devel] [RFC PATCH 0/7] virtio-fs: shared file system for virtual machines3, no-reply, 2018/12/10
- Re: [Qemu-devel] [RFC PATCH 0/7] virtio-fs: shared file system for virtual machines3, Stefan Hajnoczi, 2018/12/11
- Re: [Qemu-devel] [RFC PATCH 0/7] virtio-fs: shared file system for virtual machines3, Daniel P . Berrangé, 2018/12/12