[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v2 12/19] bsd-user: add support for memory managemen
From: |
Stacey Son |
Subject: |
[Qemu-devel] [PATCH v2 12/19] bsd-user: add support for memory management related system calls |
Date: |
Fri, 8 Nov 2013 10:33:30 -0600 |
This change adds support or stubs for memory management related system calls
including mmap(2), munmap(2), mprotect(2), msync(2), mlock(2), munlock(2),
mlockall(2), munlockall(2), madvise(2), minherit(2), mincore(2), shm_open(2),
shm_unlink(2), shmget(2), shmctl(2), shmat(2), shmdt(2), vadvise(), sbrk(),
sstk(),
and freebsd6_mmap().
Signed-off-by: Stacey Son <address@hidden>
---
bsd-user/Makefile.objs | 2 +-
bsd-user/bsd-mem.c | 122 ++++++++++++
bsd-user/bsd-mem.h | 393 ++++++++++++++++++++++++++++++++++++++
bsd-user/mmap.c | 493 ++++++++++++++++++++++++++++++------------------
bsd-user/qemu-bsd.h | 10 +
bsd-user/qemu.h | 3 +-
bsd-user/syscall.c | 174 ++++++++++-------
7 files changed, 942 insertions(+), 255 deletions(-)
create mode 100644 bsd-user/bsd-mem.c
create mode 100644 bsd-user/bsd-mem.h
diff --git a/bsd-user/Makefile.objs b/bsd-user/Makefile.objs
index ee70866..1a33a6d 100644
--- a/bsd-user/Makefile.objs
+++ b/bsd-user/Makefile.objs
@@ -1,5 +1,5 @@
obj-y = main.o bsdload.o elfload.o mmap.o signal.o strace.o syscall.o \
- uaccess.o bsd-proc.o \
+ uaccess.o bsd-mem.o bsd-proc.o \
$(HOST_ABI_DIR)/os-proc.o \
$(HOST_ABI_DIR)/os-stat.o \
$(HOST_ABI_DIR)/os-sys.o \
diff --git a/bsd-user/bsd-mem.c b/bsd-user/bsd-mem.c
new file mode 100644
index 0000000..bfe03aa
--- /dev/null
+++ b/bsd-user/bsd-mem.c
@@ -0,0 +1,122 @@
+/*
+ * memory management system conversion routines
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <sys/ipc.h>
+#include <sys/shm.h>
+
+#include "qemu.h"
+#include "qemu-bsd.h"
+
+struct bsd_shm_regions bsd_shm_regions[N_BSD_SHM_REGIONS];
+
+abi_ulong bsd_target_brk;
+abi_ulong bsd_target_original_brk;
+
+void target_set_brk(abi_ulong new_brk)
+{
+
+ bsd_target_original_brk = bsd_target_brk = HOST_PAGE_ALIGN(new_brk);
+}
+
+abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
+ abi_ulong target_addr)
+{
+ struct target_ipc_perm *target_ip;
+
+ if (!lock_user_struct(VERIFY_READ, target_ip, target_addr, 1)) {
+ return -TARGET_EFAULT;
+ }
+ __get_user(host_ip->cuid, &target_ip->cuid);
+ __get_user(host_ip->cgid, &target_ip->cgid);
+ __get_user(host_ip->uid, &target_ip->uid);
+ __get_user(host_ip->gid, &target_ip->gid);
+ __get_user(host_ip->mode, &target_ip->mode);
+ __get_user(host_ip->seq, &target_ip->seq);
+ __get_user(host_ip->key, &target_ip->key);
+ unlock_user_struct(target_ip, target_addr, 0);
+
+ return 0;
+}
+
+abi_long host_to_target_ipc_perm(abi_ulong target_addr,
+ struct ipc_perm *host_ip)
+{
+ struct target_ipc_perm *target_ip;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_ip, target_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+ __put_user(host_ip->cuid, &target_ip->cuid);
+ __put_user(host_ip->cgid, &target_ip->cgid);
+ __put_user(host_ip->uid, &target_ip->uid);
+ __put_user(host_ip->gid, &target_ip->gid);
+ __put_user(host_ip->mode, &target_ip->mode);
+ __put_user(host_ip->seq, &target_ip->seq);
+ __put_user(host_ip->key, &target_ip->key);
+ unlock_user_struct(target_ip, target_addr, 1);
+
+ return 0;
+}
+
+abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
+ abi_ulong target_addr)
+{
+ struct target_shmid_ds *target_sd;
+
+ if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) {
+ return -TARGET_EFAULT;
+ }
+ if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) {
+ return -TARGET_EFAULT;
+ }
+ __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
+ __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
+ __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
+ __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
+ __get_user(host_sd->shm_atime, &target_sd->shm_atime);
+ __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
+ __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
+ unlock_user_struct(target_sd, target_addr, 0);
+
+ return 0;
+}
+
+abi_long host_to_target_shmid_ds(abi_ulong target_addr,
+ struct shmid_ds *host_sd)
+{
+ struct target_shmid_ds *target_sd;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+ if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) {
+ return -TARGET_EFAULT;
+ }
+ __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
+ __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
+ __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
+ __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
+ __put_user(host_sd->shm_atime, &target_sd->shm_atime);
+ __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
+ __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
+ unlock_user_struct(target_sd, target_addr, 1);
+
+ return 0;
+}
+
diff --git a/bsd-user/bsd-mem.h b/bsd-user/bsd-mem.h
new file mode 100644
index 0000000..88c01ec
--- /dev/null
+++ b/bsd-user/bsd-mem.h
@@ -0,0 +1,393 @@
+/*
+ * memory management system call shims and definitions
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*--
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _BSD_MMAN_H_
+#define _BSD_MMAN_H_
+
+#include <sys/types.h>
+#include <sys/ipc.h>
+#include <sys/mman.h>
+#include <sys/shm.h>
+#include <fcntl.h>
+
+#include "qemu-bsd.h"
+
+extern struct bsd_shm_regions bsd_shm_regions[];
+extern abi_ulong bsd_target_brk;
+extern abi_ulong bsd_target_original_brk;
+
+/* mmap(2) */
+static inline abi_long do_bsd_mmap(void *cpu_env, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6, abi_long arg7,
+ abi_long arg8)
+{
+
+ if (regpairs_aligned(cpu_env) != 0) {
+ arg6 = arg7;
+ arg7 = arg8;
+ }
+ return get_errno(target_mmap(arg1, arg2, arg3,
+ target_to_host_bitmask(arg4, mmap_flags_tbl), arg5,
+ target_offset64(arg6, arg7)));
+}
+
+/* munmap(2) */
+static inline abi_long do_bsd_munmap(abi_long arg1, abi_long arg2)
+{
+
+ return get_errno(target_munmap(arg1, arg2));
+}
+
+/* mprotect(2) */
+static inline abi_long do_bsd_mprotect(abi_long arg1, abi_long arg2,
+ abi_long arg3)
+{
+
+ return get_errno(target_mprotect(arg1, arg2, arg3));
+}
+
+/* msync(2) */
+static inline abi_long do_bsd_msync(abi_long arg1, abi_long arg2, abi_long
arg3)
+{
+
+ return get_errno(msync(g2h(arg1), arg2, arg3));
+}
+
+/* mlock(2) */
+static inline abi_long do_bsd_mlock(abi_long arg1, abi_long arg2)
+{
+
+ return get_errno(mlock(g2h(arg1), arg2));
+}
+
+/* munlock(2) */
+static inline abi_long do_bsd_munlock(abi_long arg1, abi_long arg2)
+{
+
+ return get_errno(munlock(g2h(arg1), arg2));
+}
+
+/* mlockall(2) */
+static inline abi_long do_bsd_mlockall(abi_long arg1)
+{
+
+ return get_errno(mlockall(arg1));
+}
+
+/* munlockall(2) */
+static inline abi_long do_bsd_munlockall(void)
+{
+
+ return get_errno(munlockall());
+}
+
+/* madvise(2) */
+static inline abi_long do_bsd_madvise(abi_long arg1, abi_long arg2,
+ abi_long arg3)
+{
+ /*
+ * A straight passthrough may not be safe because qemu sometimes
+ * turns private file-backed mapping into anonymous mappings. This
+ * will break MADV_DONTNEED. This is a hint, so ignoring and returing
+ * success is ok.
+ */
+ return get_errno(0);
+}
+
+/* minherit(2) */
+static inline abi_long do_bsd_minherit(abi_long addr, abi_long len,
+ abi_long inherit)
+{
+
+ return get_errno(minherit(g2h(addr), len, inherit));
+}
+
+/* mincore(2) */
+static inline abi_long do_bsd_mincore(abi_ulong target_addr, abi_ulong len,
+ abi_ulong target_vec)
+{
+ abi_long ret;
+ void *p, *a;
+
+ a = lock_user(VERIFY_WRITE, target_addr, len, 0);
+ if (a == NULL) {
+ return -TARGET_EFAULT;
+ }
+ p = lock_user_string(target_vec);
+ if (p == NULL) {
+ unlock_user(a, target_addr, 0);
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(mincore(a, len, p));
+ unlock_user(p, target_vec, ret);
+ unlock_user(a, target_addr, 0);
+
+ return ret;
+}
+
+/* break() XXX this needs some more work. */
+static inline abi_long do_obreak(abi_ulong new_brk)
+{
+ abi_ulong brk_page;
+ abi_long mapped_addr;
+ int new_alloc_size;
+
+ return -TARGET_EINVAL;
+
+ if (!new_brk) {
+ return 0;
+ }
+ if (new_brk < bsd_target_original_brk) {
+ return -TARGET_EINVAL;
+ }
+
+ brk_page = HOST_PAGE_ALIGN(bsd_target_brk);
+
+ /* If the new brk is less than this, set it and we're done... */
+ if (new_brk < brk_page) {
+ bsd_target_brk = new_brk;
+ return 0;
+ }
+
+ /* We need to allocate more memory after the brk... */
+ new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
+ mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
+ PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0));
+
+ if (!is_error(mapped_addr)) {
+ bsd_target_brk = new_brk;
+ } else {
+ return mapped_addr;
+ }
+
+ return 0;
+}
+
+/* shm_open(2) */
+static inline abi_long do_bsd_shm_open(abi_ulong arg1, abi_long arg2,
+ abi_long arg3)
+{
+ int ret;
+ void *p;
+
+ p = lock_user_string(arg1);
+ if (p == NULL) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(shm_open(path(p),
+ target_to_host_bitmask(arg2, fcntl_flags_tbl), arg3));
+ unlock_user(p, arg1, 0);
+
+ return ret;
+}
+
+/* shm_unlink(2) */
+static inline abi_long do_bsd_shm_unlink(abi_ulong arg1)
+{
+ int ret;
+ void *p;
+
+ p = lock_user_string(arg1);
+ if (p == NULL) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(shm_unlink(p)); /* XXX path(p)? */
+ unlock_user(p, arg1, 0);
+
+ return ret;
+}
+
+/* shmget(2) */
+static inline abi_long do_bsd_shmget(abi_long arg1, abi_ulong arg2,
+ abi_long arg3)
+{
+
+ return get_errno(shmget(arg1, arg2, arg3));
+}
+
+/* shmctl(2) */
+static inline abi_long do_bsd_shmctl(abi_long shmid, abi_long cmd,
+ abi_ulong buff)
+{
+ struct shmid_ds dsarg;
+ abi_long ret = -TARGET_EINVAL;
+
+ cmd &= 0xff;
+
+ switch (cmd) {
+ case IPC_STAT:
+ case IPC_SET:
+ if (target_to_host_shmid_ds(&dsarg, buff)) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(shmctl(shmid, cmd, &dsarg));
+ if (host_to_target_shmid_ds(buff, &dsarg)) {
+ return -TARGET_EFAULT;
+ }
+ break;
+
+ case IPC_RMID:
+ ret = get_errno(shmctl(shmid, cmd, NULL));
+ break;
+
+ default:
+ ret = -TARGET_EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* shmat(2) */
+static inline abi_long do_bsd_shmat(int shmid, abi_ulong shmaddr, int shmflg)
+{
+ abi_ulong raddr;
+ abi_long ret;
+ void *host_raddr;
+ struct shmid_ds shm_info;
+ int i;
+
+ /* Find out the length of the shared memory segment. */
+ ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
+ if (is_error(ret)) {
+ /* Can't get the length */
+ return ret;
+ }
+
+ mmap_lock();
+
+ if (shmaddr) {
+ host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
+ } else {
+ abi_ulong mmap_start;
+
+ mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
+
+ if (mmap_start == -1) {
+ errno = ENOMEM;
+ host_raddr = (void *)-1;
+ } else {
+ host_raddr = shmat(shmid, g2h(mmap_start),
+ shmflg /* | SHM_REMAP */);
+ }
+ }
+
+ if (host_raddr == (void *)-1) {
+ mmap_unlock();
+ return get_errno((long)host_raddr);
+ }
+ raddr = h2g((unsigned long)host_raddr);
+
+ page_set_flags(raddr, raddr + shm_info.shm_segsz,
+ PAGE_VALID | PAGE_READ | ((shmflg & SHM_RDONLY) ? 0 : PAGE_WRITE));
+
+ for (i = 0; i < N_BSD_SHM_REGIONS; i++) {
+ if (bsd_shm_regions[i].start == 0) {
+ bsd_shm_regions[i].start = raddr;
+ bsd_shm_regions[i].size = shm_info.shm_segsz;
+ break;
+ }
+ }
+
+ mmap_unlock();
+ return raddr;
+}
+
+/* shmdt(2) */
+static inline abi_long do_bsd_shmdt(abi_ulong shmaddr)
+{
+ int i;
+
+ for (i = 0; i < N_BSD_SHM_REGIONS; ++i) {
+ if (bsd_shm_regions[i].start == shmaddr) {
+ bsd_shm_regions[i].start = 0;
+ page_set_flags(shmaddr,
+ shmaddr + bsd_shm_regions[i].size, 0);
+ break;
+ }
+ }
+
+ return get_errno(shmdt(g2h(shmaddr)));
+}
+
+
+static inline abi_long do_bsd_vadvise(void)
+{
+ /* See sys_ovadvise() in vm_unix.c */
+ qemu_log("qemu: Unsupported syscall vadvise()\n");
+ return -TARGET_ENOSYS;
+}
+
+static inline abi_long do_bsd_sbrk(void)
+{
+ /* see sys_sbrk() in vm_mmap.c */
+ qemu_log("qemu: Unsupported syscall sbrk()\n");
+ return -TARGET_ENOSYS;
+}
+
+static inline abi_long do_bsd_sstk(void)
+{
+ /* see sys_sstk() in vm_mmap.c */
+ qemu_log("qemu: Unsupported syscall sstk()\n");
+ return -TARGET_ENOSYS;
+}
+
+/*
+ * undocumented freebsd6_mmap(caddr_t addr, size_t len, int prot, int
+ * flags, int fd, int pad, off_t pos) system call.
+ */
+static inline abi_long do_bsd_freebsd6_mmap(abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6,
+ abi_long arg7)
+{
+
+ qemu_log("qemu: Unsupported syscall freebsd6_mmap()\n");
+ return -TARGET_ENOSYS;
+}
+
+#endif /* !_BSD_MMAN_H_ */
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
index aae8ea1..fc7bc5c 100644
--- a/bsd-user/mmap.c
+++ b/bsd-user/mmap.c
@@ -1,4 +1,4 @@
-/*
+/**
* mmap support for qemu
*
* Copyright (c) 2003 - 2008 Fabrice Bellard
@@ -26,44 +26,9 @@
#include "qemu.h"
#include "qemu-common.h"
-#include "bsd-mman.h"
-
-//#define DEBUG_MMAP
-
-#if defined(CONFIG_USE_NPTL)
-pthread_mutex_t mmap_mutex;
-static int __thread mmap_lock_count;
-
-void mmap_lock(void)
-{
- if (mmap_lock_count++ == 0) {
- pthread_mutex_lock(&mmap_mutex);
- }
-}
-
-void mmap_unlock(void)
-{
- if (--mmap_lock_count == 0) {
- pthread_mutex_unlock(&mmap_mutex);
- }
-}
-/* Grab lock to make sure things are in a consistent state after fork(). */
-void mmap_fork_start(void)
-{
- if (mmap_lock_count)
- abort();
- pthread_mutex_lock(&mmap_mutex);
-}
+// #define DEBUG_MMAP
-void mmap_fork_end(int child)
-{
- if (child)
- pthread_mutex_init(&mmap_mutex, NULL);
- else
- pthread_mutex_unlock(&mmap_mutex);
-}
-#else
/* We aren't threadsafe to start with, so no need to worry about locking. */
void mmap_lock(void)
{
@@ -72,67 +37,6 @@ void mmap_lock(void)
void mmap_unlock(void)
{
}
-#endif
-
-static void *bsd_vmalloc(size_t size)
-{
- void *p;
- mmap_lock();
- /* Use map and mark the pages as used. */
- p = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON, -1, 0);
-
- if (h2g_valid(p)) {
- /* Allocated region overlaps guest address space.
- This may recurse. */
- abi_ulong addr = h2g(p);
- page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
- PAGE_RESERVED);
- }
-
- mmap_unlock();
- return p;
-}
-
-void *g_malloc(size_t size)
-{
- char * p;
- size += 16;
- p = bsd_vmalloc(size);
- *(size_t *)p = size;
- return p + 16;
-}
-
-/* We use map, which is always zero initialized. */
-void * g_malloc0(size_t size)
-{
- return g_malloc(size);
-}
-
-void g_free(void *ptr)
-{
- /* FIXME: We should unmark the reserved pages here. However this gets
- complicated when one target page spans multiple host pages, so we
- don't bother. */
- size_t *p;
- p = (size_t *)((char *)ptr - 16);
- munmap(p, *p);
-}
-
-void *g_realloc(void *ptr, size_t size)
-{
- size_t old_size, copy;
- void *new_ptr;
-
- if (!ptr)
- return g_malloc(size);
- old_size = *(size_t *)((char *)ptr - 16);
- copy = old_size < size ? old_size : size;
- new_ptr = g_malloc(size);
- memcpy(new_ptr, ptr, copy);
- g_free(ptr);
- return new_ptr;
-}
/* NOTE: all the constants are the HOST ones, but addresses are target. */
int target_mprotect(abi_ulong start, abi_ulong len, int prot)
@@ -164,11 +68,11 @@ int target_mprotect(abi_ulong start, abi_ulong len, int
prot)
if (start > host_start) {
/* handle host page containing start */
prot1 = prot;
- for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
+ for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
if (host_end == host_start + qemu_host_page_size) {
- for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
+ for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
end = host_end;
@@ -180,7 +84,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
}
if (end < host_end) {
prot1 = prot;
- for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
+ for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
ret = mprotect(g2h(host_end - qemu_host_page_size),
qemu_host_page_size,
@@ -218,7 +122,7 @@ static int mmap_frag(abi_ulong real_start,
/* get the protection of the target pages outside the mapping */
prot1 = 0;
- for(addr = real_start; addr < real_end; addr++) {
+ for (addr = real_start; addr < real_end; addr++) {
if (addr < start || addr >= end)
prot1 |= page_get_flags(addr);
}
@@ -246,7 +150,8 @@ static int mmap_frag(abi_ulong real_start,
mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
/* read the corresponding file data */
- pread(fd, g2h(start), end - start, offset);
+ if (pread(fd, g2h(start), end - start, offset) == -1)
+ return -1;
/* put final protection */
if (prot_new != (prot1 | PROT_WRITE))
@@ -260,69 +165,184 @@ static int mmap_frag(abi_ulong real_start,
return 0;
}
-#if defined(__CYGWIN__)
+#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
+# define TASK_UNMAPPED_BASE (1ul << 38)
+#elif defined(__CYGWIN__)
/* Cygwin doesn't have a whole lot of address space. */
-static abi_ulong mmap_next_start = 0x18000000;
+# define TASK_UNMAPPED_BASE 0x18000000
#else
-static abi_ulong mmap_next_start = 0x40000000;
+# define TASK_UNMAPPED_BASE 0x40000000
#endif
+abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
unsigned long last_brk;
-/* find a free memory area of size 'size'. The search starts at
- 'start'. If 'start' == 0, then a default start address is used.
- Return -1 if error.
-*/
-/* page_init() marks pages used by the host as reserved to be sure not
- to use them. */
-static abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
+#ifdef CONFIG_USE_GUEST_BASE
+/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
+ of guest address space. */
+static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
{
- abi_ulong addr, addr1, addr_start;
+ abi_ulong addr;
+ abi_ulong end_addr;
int prot;
- unsigned long new_brk;
-
- new_brk = (unsigned long)sbrk(0);
- if (last_brk && last_brk < new_brk && last_brk == (target_ulong)last_brk) {
- /* This is a hack to catch the host allocating memory with brk().
- If it uses mmap then we loose.
- FIXME: We really want to avoid the host allocating memory in
- the first place, and maybe leave some slack to avoid switching
- to mmap. */
- page_set_flags(last_brk & TARGET_PAGE_MASK,
- TARGET_PAGE_ALIGN(new_brk),
- PAGE_RESERVED);
+ int looped = 0;
+
+ if (size > RESERVED_VA) {
+ return (abi_ulong)-1;
}
- last_brk = new_brk;
size = HOST_PAGE_ALIGN(size);
- start = start & qemu_host_page_mask;
- addr = start;
- if (addr == 0)
- addr = mmap_next_start;
- addr_start = addr;
- for(;;) {
- prot = 0;
- for(addr1 = addr; addr1 < (addr + size); addr1 += TARGET_PAGE_SIZE) {
- prot |= page_get_flags(addr1);
+ end_addr = start + size;
+ if (end_addr > RESERVED_VA) {
+ end_addr = RESERVED_VA;
+ }
+ addr = end_addr - qemu_host_page_size;
+
+ while (1) {
+ if (addr > end_addr) {
+ if (looped) {
+ return (abi_ulong)-1;
+ }
+ end_addr = RESERVED_VA;
+ addr = end_addr - qemu_host_page_size;
+ looped = 1;
+ continue;
+ }
+ prot = page_get_flags(addr);
+ if (prot) {
+ end_addr = addr;
}
- if (prot == 0)
+ if (addr + size == end_addr) {
break;
- addr += qemu_host_page_size;
- /* we found nothing */
- if (addr == addr_start)
- return (abi_ulong)-1;
+ }
+ addr -= qemu_host_page_size;
}
- if (start == 0)
- mmap_next_start = addr + size;
+
+ if (start == mmap_next_start) {
+ mmap_next_start = addr;
+ }
+
return addr;
}
+#endif
+
+/*
+ * Find and reserve a free memory area of size 'size'. The search
+ * starts at 'start'.
+ * It must be called with mmap_lock() held.
+ * Return -1 if error.
+ */
+abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
+{
+ void *ptr, *prev;
+ abi_ulong addr;
+ int wrapped, repeat;
+
+ /* If 'start' == 0, then a default start address is used. */
+ if (start == 0) {
+ start = mmap_next_start;
+ } else {
+ start &= qemu_host_page_mask;
+ }
+
+ size = HOST_PAGE_ALIGN(size);
+
+#ifdef CONFIG_USE_GUEST_BASE
+ if (RESERVED_VA) {
+ return mmap_find_vma_reserved(start, size);
+ }
+#endif
+
+ addr = start;
+ wrapped = repeat = 0;
+ prev = 0;
+
+ for (;; prev = ptr) {
+ /*
+ * Reserve needed memory area to avoid a race.
+ * It should be discarded using:
+ * - mmap() with MAP_FIXED flag
+ * - mremap() with MREMAP_FIXED flag
+ * - shmat() with SHM_REMAP flag
+ */
+ ptr = mmap(g2h(addr), size, PROT_NONE,
+ MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+
+ /* ENOMEM, if host address space has no memory */
+ if (ptr == MAP_FAILED) {
+ return (abi_ulong)-1;
+ }
+
+ /* Count the number of sequential returns of the same address.
+ This is used to modify the search algorithm below. */
+ repeat = (ptr == prev ? repeat + 1 : 0);
+
+ if (h2g_valid(ptr + size - 1)) {
+ addr = h2g(ptr);
+
+ if ((addr & ~TARGET_PAGE_MASK) == 0) {
+ /* Success. */
+ if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
+ mmap_next_start = addr + size;
+ }
+ return addr;
+ }
+
+ /* The address is not properly aligned for the target. */
+ switch (repeat) {
+ case 0:
+ /* Assume the result that the kernel gave us is the
+ first with enough free space, so start again at the
+ next higher target page. */
+ addr = TARGET_PAGE_ALIGN(addr);
+ break;
+ case 1:
+ /* Sometimes the kernel decides to perform the allocation
+ at the top end of memory instead. */
+ addr &= TARGET_PAGE_MASK;
+ break;
+ case 2:
+ /* Start over at low memory. */
+ addr = 0;
+ break;
+ default:
+ /* Fail. This unaligned block must the last. */
+ addr = -1;
+ break;
+ }
+ } else {
+ /* Since the result the kernel gave didn't fit, start
+ again at low memory. If any repetition, fail. */
+ addr = (repeat ? -1 : 0);
+ }
+
+ /* Unmap and try again. */
+ munmap(ptr, size);
+
+ /* ENOMEM if we checked the whole of the target address space. */
+ if (addr == (abi_ulong)-1) {
+ return (abi_ulong)-1;
+ } else if (addr == 0) {
+ if (wrapped) {
+ return (abi_ulong)-1;
+ }
+ wrapped = 1;
+ /* Don't actually use 0 when wrapping, instead indicate
+ that we'd truly like an allocation in low memory. */
+ addr = (mmap_min_addr > TARGET_PAGE_SIZE
+ ? TARGET_PAGE_ALIGN(mmap_min_addr)
+ : TARGET_PAGE_SIZE);
+ } else if (wrapped && addr >= start) {
+ return (abi_ulong)-1;
+ }
+ }
+}
/* NOTE: all the constants are the HOST ones */
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
- int flags, int fd, abi_ulong offset)
+ int flags, int fd, off_t offset)
{
abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
- unsigned long host_start;
mmap_lock();
#ifdef DEBUG_MMAP
@@ -337,21 +357,30 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int
prot,
printf("MAP_FIXED ");
if (flags & MAP_ANON)
printf("MAP_ANON ");
- switch(flags & TARGET_BSD_MAP_FLAGMASK) {
- case MAP_PRIVATE:
- printf("MAP_PRIVATE ");
- break;
- case MAP_SHARED:
- printf("MAP_SHARED ");
- break;
- default:
- printf("[MAP_FLAGMASK=0x%x] ", flags & TARGET_BSD_MAP_FLAGMASK);
- break;
- }
- printf("fd=%d offset=" TARGET_FMT_lx "\n", fd, offset);
+ if (flags & MAP_PRIVATE)
+ printf("MAP_PRIVATE ");
+ if (flags & MAP_SHARED)
+ printf("MAP_SHARED ");
+ if (flags & MAP_NOCORE)
+ printf("MAP_NOCORE ");
+#ifdef MAP_STACK
+ if (flags & MAP_STACK)
+ printf("MAP_STACK ");
+#endif
+ printf("fd=%d offset=0x%llx\n", fd, offset);
}
#endif
+#ifdef MAP_STACK
+ if (flags & MAP_STACK) {
+ if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) !=
+ (PROT_READ | PROT_WRITE))) {
+ errno = EINVAL;
+ goto fail;
+ }
+ }
+#endif /* MAP_STACK */
+
if (offset & ~TARGET_PAGE_MASK) {
errno = EINVAL;
goto fail;
@@ -361,34 +390,78 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int
prot,
if (len == 0)
goto the_end;
real_start = start & qemu_host_page_mask;
+ host_offset = offset & qemu_host_page_mask;
+ /* If the user is asking for the kernel to find a location, do that
+ before we truncate the length for mapping files below. */
if (!(flags & MAP_FIXED)) {
- abi_ulong mmap_start;
- void *p;
- host_offset = offset & qemu_host_page_mask;
host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len);
- mmap_start = mmap_find_vma(real_start, host_len);
- if (mmap_start == (abi_ulong)-1) {
+ start = mmap_find_vma(real_start, host_len);
+ if (start == (abi_ulong)-1) {
errno = ENOMEM;
goto fail;
}
+ }
+
+ /* When mapping files into a memory area larger than the file, accesses
+ to pages beyond the file size will cause a SIGBUS.
+
+ For example, if mmaping a file of 100 bytes on a host with 4K pages
+ emulating a target with 8K pages, the target expects to be able to
+ access the first 8K. But the host will trap us on any access beyond
+ 4K.
+
+ When emulating a target with a larger page-size than the hosts, we
+ may need to truncate file maps at EOF and add extra anonymous pages
+ up to the targets page boundary. */
+
+ if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
+ && !(flags & MAP_ANONYMOUS)) {
+ struct stat sb;
+
+ if (fstat (fd, &sb) == -1)
+ goto fail;
+
+ /* Are we trying to create a map beyond EOF?. */
+ if (offset + len > sb.st_size) {
+ /* If so, truncate the file map at eof aligned with
+ the hosts real pagesize. Additional anonymous maps
+ will be created beyond EOF. */
+ len = (sb.st_size - offset);
+ len += qemu_real_host_page_size - 1;
+ len &= ~(qemu_real_host_page_size - 1);
+ }
+ }
+
+ if (!(flags & MAP_FIXED)) {
+ unsigned long host_start;
+ void *p;
+
+ host_len = len + offset - host_offset;
+ host_len = HOST_PAGE_ALIGN(host_len);
+
/* Note: we prefer to control the mapping address. It is
especially important if qemu_host_page_size >
qemu_real_host_page_size */
- p = mmap(g2h(mmap_start),
- host_len, prot, flags | MAP_FIXED, fd, host_offset);
- if (p == MAP_FAILED)
+ p = mmap(g2h(start), host_len, prot,
+ flags | MAP_FIXED | MAP_ANON, -1, 0);
+ if (p == MAP_FAILED) {
goto fail;
+ }
/* update start so that it points to the file position at 'offset' */
host_start = (unsigned long)p;
- if (!(flags & MAP_ANON))
+ if (!(flags & MAP_ANON)) {
+ p = mmap(g2h(start), len, prot,
+ flags | MAP_FIXED, fd, host_offset);
+ if (p == MAP_FAILED) {
+ munmap(g2h(start), host_len);
+ goto fail;
+ }
host_start += offset - host_offset;
+ }
start = h2g(host_start);
} else {
- int flg;
- target_ulong addr;
-
if (start & ~TARGET_PAGE_MASK) {
errno = EINVAL;
goto fail;
@@ -396,12 +469,14 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int
prot,
end = start + len;
real_end = HOST_PAGE_ALIGN(end);
- for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
- flg = page_get_flags(addr);
- if (flg & PAGE_RESERVED) {
- errno = ENXIO;
- goto fail;
- }
+ /*
+ * Test if requested memory area fits target address space
+ * It can fail only on 64-bit host with 32-bit target.
+ * On any other target/host host mmap() handles this error correctly.
+ */
+ if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
+ errno = EINVAL;
+ goto fail;
}
/* worst case: we cannot map the file because the offset is not
@@ -410,8 +485,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int
prot,
(offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask))
{
/* msync() won't work here, so we return an error if write is
possible while it is a shared mapping */
- if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
- (prot & PROT_WRITE)) {
+ if ((flags & MAP_SHARED) && (prot & PROT_WRITE)) {
errno = EINVAL;
goto fail;
}
@@ -420,7 +494,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int
prot,
-1, 0);
if (retaddr == -1)
goto fail;
- pread(fd, g2h(start), len, offset);
+ if (pread(fd, g2h(start), len, offset) == -1)
+ goto fail;
if (!(prot & PROT_WRITE)) {
ret = target_mprotect(start, len, prot);
if (ret != 0) {
@@ -480,6 +555,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int
prot,
page_dump(stdout);
printf("\n");
#endif
+ tb_invalidate_phys_range(start, start + len, 0);
mmap_unlock();
return start;
fail:
@@ -487,13 +563,56 @@ fail:
return -1;
}
+static void mmap_reserve(abi_ulong start, abi_ulong size)
+{
+ abi_ulong real_start;
+ abi_ulong real_end;
+ abi_ulong addr;
+ abi_ulong end;
+ int prot;
+
+ real_start = start & qemu_host_page_mask;
+ real_end = HOST_PAGE_ALIGN(start + size);
+ end = start + size;
+ if (start > real_start) {
+ /* handle host page containing start */
+ prot = 0;
+ for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(addr);
+ }
+ if (real_end == real_start + qemu_host_page_size) {
+ for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(addr);
+ }
+ end = real_end;
+ }
+ if (prot != 0)
+ real_start += qemu_host_page_size;
+ }
+ if (end < real_end) {
+ prot = 0;
+ for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
+ prot |= page_get_flags(addr);
+ }
+ if (prot != 0)
+ real_end -= qemu_host_page_size;
+ }
+ if (real_start != real_end) {
+ mmap(g2h(real_start), real_end - real_start, PROT_NONE,
+ MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
+ -1, 0);
+ }
+}
+
int target_munmap(abi_ulong start, abi_ulong len)
{
abi_ulong end, real_start, real_end, addr;
int prot, ret;
#ifdef DEBUG_MMAP
- printf("munmap: start=0x%lx len=0x%lx\n", start, len);
+ printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
+ TARGET_ABI_FMT_lx "\n",
+ start, len);
#endif
if (start & ~TARGET_PAGE_MASK)
return -EINVAL;
@@ -508,11 +627,11 @@ int target_munmap(abi_ulong start, abi_ulong len)
if (start > real_start) {
/* handle host page containing start */
prot = 0;
- for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
+ for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr);
}
if (real_end == real_start + qemu_host_page_size) {
- for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
+ for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr);
}
end = real_end;
@@ -522,7 +641,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
}
if (end < real_end) {
prot = 0;
- for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
+ for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr);
}
if (prot != 0)
@@ -532,11 +651,17 @@ int target_munmap(abi_ulong start, abi_ulong len)
ret = 0;
/* unmap what we can */
if (real_start < real_end) {
- ret = munmap(g2h(real_start), real_end - real_start);
+ if (RESERVED_VA) {
+ mmap_reserve(real_start, real_end - real_start);
+ } else {
+ ret = munmap(g2h(real_start), real_end - real_start);
+ }
}
- if (ret == 0)
+ if (ret == 0) {
page_set_flags(start, start + len, 0);
+ tb_invalidate_phys_range(start, start + len, 0);
+ }
mmap_unlock();
return ret;
}
diff --git a/bsd-user/qemu-bsd.h b/bsd-user/qemu-bsd.h
index 590c36b..f562aad 100644
--- a/bsd-user/qemu-bsd.h
+++ b/bsd-user/qemu-bsd.h
@@ -32,6 +32,16 @@
#include <sys/wait.h>
#include <netinet/in.h>
+/* bsd-mem.c */
+abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
+ abi_ulong target_addr);
+abi_long host_to_target_ipc_perm(abi_ulong target_addr,
+ struct ipc_perm *host_ip);
+abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
+ abi_ulong target_addr);
+abi_long host_to_target_shmid_ds(abi_ulong target_addr,
+ struct shmid_ds *host_sd);
+
/* bsd-proc.c */
int target_to_host_resource(int code);
rlim_t target_to_host_rlim(abi_ulong target_rlim);
diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h
index e668a67..613a89e 100644
--- a/bsd-user/qemu.h
+++ b/bsd-user/qemu.h
@@ -219,7 +219,7 @@ void QEMU_NORETURN force_sig(int target_sig);
/* mmap.c */
int target_mprotect(abi_ulong start, abi_ulong len, int prot);
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
- int flags, int fd, abi_ulong offset);
+ int flags, int fd, off_t offset);
int target_munmap(abi_ulong start, abi_ulong len);
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
abi_ulong new_size, unsigned long flags,
@@ -228,6 +228,7 @@ int target_msync(abi_ulong start, abi_ulong len, int flags);
extern unsigned long last_brk;
void mmap_lock(void);
void mmap_unlock(void);
+abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size);
void cpu_list_lock(void);
void cpu_list_unlock(void);
#if defined(CONFIG_USE_NPTL)
diff --git a/bsd-user/syscall.c b/bsd-user/syscall.c
index 633d638..e3967fa 100644
--- a/bsd-user/syscall.c
+++ b/bsd-user/syscall.c
@@ -17,22 +17,16 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
-#include <unistd.h>
-#include <fcntl.h>
#include <time.h>
-#include <limits.h>
#include <sys/types.h>
-#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/param.h>
#include <sys/sysctl.h>
-#include <utime.h>
#include "qemu.h"
#include "qemu-common.h"
@@ -42,6 +36,7 @@ static int host_to_target_errno(int err);
/* BSD independent syscall shims */
#include "bsd-file.h"
+#include "bsd-mem.h"
#include "bsd-proc.h"
#include "bsd-signal.h"
@@ -53,19 +48,18 @@ static int host_to_target_errno(int err);
/* #define DEBUG */
-static abi_ulong target_brk;
-static abi_ulong target_original_brk;
-
/*
* errno conversion.
*/
abi_long get_errno(abi_long ret)
{
- if (ret == -1)
+
+ if (ret == -1) {
/* XXX need to translate host -> target errnos here */
return -(errno);
- else
+ } else {
return ret;
+ }
}
static int host_to_target_errno(int err)
@@ -76,46 +70,8 @@ static int host_to_target_errno(int err)
int is_error(abi_long ret)
{
- return (abi_ulong)ret >= (abi_ulong)(-4096);
-}
-void target_set_brk(abi_ulong new_brk)
-{
- target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
-}
-
-/* do_obreak() must return target errnos. */
-static abi_long do_obreak(abi_ulong new_brk)
-{
- abi_ulong brk_page;
- abi_long mapped_addr;
- int new_alloc_size;
-
- if (!new_brk)
- return 0;
- if (new_brk < target_original_brk)
- return -TARGET_EINVAL;
-
- brk_page = HOST_PAGE_ALIGN(target_brk);
-
- /* If the new brk is less than this, set it and we're done... */
- if (new_brk < brk_page) {
- target_brk = new_brk;
- return 0;
- }
-
- /* We need to allocate more memory after the brk... */
- new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
- mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
- PROT_READ|PROT_WRITE,
- MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1,
0));
-
- if (!is_error(mapped_addr))
- target_brk = new_brk;
- else
- return mapped_addr;
-
- return 0;
+ return (abi_ulong)ret >= (abi_ulong)(-4096);
}
/* FIXME
@@ -169,6 +125,13 @@ static abi_long unlock_iovec(struct iovec *vec, abi_ulong
target_addr,
return 0;
}
+
+/* stub for arm semihosting support */
+abi_long do_brk(abi_ulong new_brk)
+{
+ return do_obreak(new_brk);
+}
+
/* do_syscall() should always have a single exit point at the end so
that actions, such as logging of syscall results, can be performed.
All errnos that do_syscall() returns must be -TARGET_<errcode>. */
@@ -340,6 +303,7 @@ abi_long do_freebsd_syscall(void *cpu_env, int num,
abi_long arg1,
case TARGET_FREEBSD_NR_getloginclass: /* getloginclass(2) */
ret = do_freebsd_getloginclass(arg1, arg2);
break;
+
#if 0
case TARGET_FREEBSD_NR_pdwait4: /* pdwait4(2) */
ret = do_freebsd_pdwait4(arg1, arg2, arg3, arg4);
@@ -417,6 +381,7 @@ abi_long do_freebsd_syscall(void *cpu_env, int num,
abi_long arg1,
case TARGET_FREEBSD_NR_auditctl: /* auditctl(2) */
ret = do_freebsd_auditctl(arg1);
break;
+
case TARGET_FREEBSD_NR_utrace: /* utrace(2) */
ret = do_bsd_utrace(arg1, arg2);
break;
@@ -434,6 +399,7 @@ abi_long do_freebsd_syscall(void *cpu_env, int num,
abi_long arg1,
break;
+
/*
* File system calls.
*/
@@ -807,20 +773,95 @@ abi_long do_freebsd_syscall(void *cpu_env, int num,
abi_long arg1,
break;
+ /*
+ * Memory management system calls.
+ */
+ case TARGET_FREEBSD_NR_mmap: /* mmap(2) */
+ ret = do_bsd_mmap(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
+ arg8);
+ break;
+
+ case TARGET_FREEBSD_NR_munmap: /* munmap(2) */
+ ret = do_bsd_munmap(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_mprotect: /* mprotect(2) */
+ ret = do_bsd_mprotect(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_msync: /* msync(2) */
+ ret = do_bsd_msync(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_mlock: /* mlock(2) */
+ ret = do_bsd_mlock(arg1, arg2);
+ break;
- case TARGET_FREEBSD_NR_mmap:
- ret = get_errno(target_mmap(arg1, arg2, arg3,
- target_to_host_bitmask(arg4,
mmap_flags_tbl),
- arg5,
- arg6));
+ case TARGET_FREEBSD_NR_munlock: /* munlock(2) */
+ ret = do_bsd_munlock(arg1, arg2);
break;
- case TARGET_FREEBSD_NR_mprotect:
- ret = get_errno(target_mprotect(arg1, arg2, arg3));
+
+ case TARGET_FREEBSD_NR_mlockall: /* mlockall(2) */
+ ret = do_bsd_mlockall(arg1);
break;
- case TARGET_FREEBSD_NR_break:
- ret = do_obreak(arg1);
+
+ case TARGET_FREEBSD_NR_munlockall: /* munlockall(2) */
+ ret = do_bsd_munlockall();
+ break;
+
+ case TARGET_FREEBSD_NR_madvise: /* madvise(2) */
+ ret = do_bsd_madvise(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_minherit: /* minherit(2) */
+ ret = do_bsd_minherit(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_mincore: /* mincore(2) */
+ ret = do_bsd_mincore(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_shm_open: /* shm_open(2) */
+ ret = do_bsd_shm_open(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_shm_unlink: /* shm_unlink(2) */
+ ret = do_bsd_shm_unlink(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_shmget: /* shmget(2) */
+ ret = do_bsd_shmget(arg1, arg2, arg3);
break;
+ case TARGET_FREEBSD_NR_shmctl: /* shmctl(2) */
+ ret = do_bsd_shmctl(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_shmat: /* shmat(2) */
+ ret = do_bsd_shmat(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_shmdt: /* shmdt(2) */
+ ret = do_bsd_shmdt(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_vadvise:
+ ret = do_bsd_vadvise();
+ break;
+
+ case TARGET_FREEBSD_NR_sbrk:
+ ret = do_bsd_sbrk();
+ break;
+
+ case TARGET_FREEBSD_NR_sstk:
+ ret = do_bsd_sstk();
+ break;
+
+ case TARGET_FREEBSD_NR_freebsd6_mmap: /* undocumented */
+ ret = do_bsd_freebsd6_mmap(arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ break;
+
+
/*
* time related system calls.
*/
@@ -997,6 +1038,7 @@ abi_long do_freebsd_syscall(void *cpu_env, int num,
abi_long arg1,
arg8));
break;
}
+
#ifdef DEBUG
gemu_log(" = %ld\n", ret);
#endif
@@ -1033,13 +1075,10 @@ abi_long do_netbsd_syscall(void *cpu_env, int num,
abi_long arg1,
break;
case TARGET_NETBSD_NR_mmap:
- ret = get_errno(target_mmap(arg1, arg2, arg3,
- target_to_host_bitmask(arg4,
mmap_flags_tbl),
- arg5,
- arg6));
+ ret = do_bsd_mmap(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0);
break;
case TARGET_NETBSD_NR_mprotect:
- ret = get_errno(target_mprotect(arg1, arg2, arg3));
+ ret = do_bsd_mprotect(arg1, arg2, arg3);
break;
case TARGET_NETBSD_NR_syscall:
@@ -1086,13 +1125,10 @@ abi_long do_openbsd_syscall(void *cpu_env, int num,
abi_long arg1,
break;
case TARGET_OPENBSD_NR_mmap:
- ret = get_errno(target_mmap(arg1, arg2, arg3,
- target_to_host_bitmask(arg4,
mmap_flags_tbl),
- arg5,
- arg6));
+ ret = do_bsd_mmap(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0);
break;
case TARGET_OPENBSD_NR_mprotect:
- ret = get_errno(target_mprotect(arg1, arg2, arg3));
+ ret = do_bsd_mprotect(arg1, arg2, arg3);
break;
case TARGET_OPENBSD_NR_syscall:
--
1.7.8
- Re: [Qemu-devel] [PATCH v2 02/19] bsd-user: add HOST_ABI_DIR for the various *BSD dependent code., (continued)
- Message not available
- Message not available
- Message not available
- Message not available
- Message not available
- Message not available
- Message not available
- Message not available
- Message not available
- Message not available
- [Qemu-devel] [PATCH v2 12/19] bsd-user: add support for memory management related system calls,
Stacey Son <=
- Message not available
- Message not available
Message not available
Message not available
Message not available
Message not available
Message not available
Message not available