[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 41/60] linux-user: Split out mmap_h_eq_g
From: |
Richard Henderson |
Subject: |
[PATCH 41/60] linux-user: Split out mmap_h_eq_g |
Date: |
Fri, 1 Mar 2024 13:06:00 -1000 |
Move the MAX_FIXED_NOREPLACE check for reserved_va earlier.
Move the computation of host_prot earlier.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Acked-by: Helge Deller <deller@gmx.de>
Message-Id: <20240102015808.132373-22-richard.henderson@linaro.org>
---
linux-user/mmap.c | 68 ++++++++++++++++++++++++++++++++++++++---------
1 file changed, 55 insertions(+), 13 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index cbcd31e941..d3556bcc14 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -542,6 +542,33 @@ static abi_long mmap_end(abi_ulong start, abi_ulong last,
return start;
}
+/*
+ * Special case host page size == target page size,
+ * where there are no edge conditions.
+ */
+static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len,
+ int host_prot, int flags, int page_flags,
+ int fd, off_t offset)
+{
+ void *p, *want_p = g2h_untagged(start);
+ abi_ulong last;
+
+ p = mmap(want_p, len, host_prot, flags, fd, offset);
+ if (p == MAP_FAILED) {
+ return -1;
+ }
+ /* If the host kernel does not support MAP_FIXED_NOREPLACE, emulate. */
+ if ((flags & MAP_FIXED_NOREPLACE) && p != want_p) {
+ do_munmap(p, len);
+ errno = EEXIST;
+ return -1;
+ }
+
+ start = h2g(p);
+ last = start + len - 1;
+ return mmap_end(start, last, start, last, flags, page_flags);
+}
+
static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
int target_prot, int flags, int page_flags,
int fd, off_t offset)
@@ -550,6 +577,7 @@ static abi_long target_mmap__locked(abi_ulong start,
abi_ulong len,
abi_ulong ret, last, real_start, real_last, retaddr, host_len;
abi_ulong passthrough_start = -1, passthrough_last = 0;
off_t host_offset;
+ int host_prot;
real_start = start & -host_page_size;
host_offset = offset & -host_page_size;
@@ -558,16 +586,33 @@ static abi_long target_mmap__locked(abi_ulong start,
abi_ulong len,
* For reserved_va, we are in full control of the allocation.
* Find a suitable hole and convert to MAP_FIXED.
*/
- if (reserved_va && !(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
- host_len = len + offset - host_offset;
- start = mmap_find_vma(real_start, host_len,
- MAX(host_page_size, TARGET_PAGE_SIZE));
- if (start == (abi_ulong)-1) {
- errno = ENOMEM;
- return -1;
+ if (reserved_va) {
+ if (flags & MAP_FIXED_NOREPLACE) {
+ /* Validate that the chosen range is empty. */
+ if (!page_check_range_empty(start, start + len - 1)) {
+ errno = EEXIST;
+ return -1;
+ }
+ flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
+ } else if (!(flags & MAP_FIXED)) {
+ size_t real_len = len + offset - host_offset;
+ abi_ulong align = MAX(host_page_size, TARGET_PAGE_SIZE);
+
+ start = mmap_find_vma(real_start, real_len, align);
+ if (start == (abi_ulong)-1) {
+ errno = ENOMEM;
+ return -1;
+ }
+ start += offset - host_offset;
+ flags |= MAP_FIXED;
}
- start += offset - host_offset;
- flags |= MAP_FIXED;
+ }
+
+ host_prot = target_to_host_prot(target_prot);
+
+ if (host_page_size == TARGET_PAGE_SIZE) {
+ return mmap_h_eq_g(start, len, host_prot, flags,
+ page_flags, fd, offset);
}
/*
@@ -603,12 +648,10 @@ static abi_long target_mmap__locked(abi_ulong start,
abi_ulong len,
if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
uintptr_t host_start;
- int host_prot;
void *p;
host_len = len + offset - host_offset;
host_len = ROUND_UP(host_len, host_page_size);
- host_prot = target_to_host_prot(target_prot);
/* Note: we prefer to control the mapping address. */
p = mmap(g2h_untagged(start), host_len, host_prot,
@@ -731,8 +774,7 @@ static abi_long target_mmap__locked(abi_ulong start,
abi_ulong len,
len1 = real_last - real_start + 1;
want_p = g2h_untagged(real_start);
- p = mmap(want_p, len1, target_to_host_prot(target_prot),
- flags, fd, offset1);
+ p = mmap(want_p, len1, host_prot, flags, fd, offset1);
if (p != want_p) {
if (p != MAP_FAILED) {
do_munmap(p, len1);
--
2.34.1
- [PATCH 24/60] linux-user/arm: Remove qemu_host_page_size from init_guest_commpage, (continued)
- [PATCH 24/60] linux-user/arm: Remove qemu_host_page_size from init_guest_commpage, Richard Henderson, 2024/03/01
- [PATCH 30/60] hw/tpm: Remove HOST_PAGE_ALIGN from tpm_ppi_init, Richard Henderson, 2024/03/01
- [PATCH 32/60] softmmu/physmem: Remove HOST_PAGE_ALIGN, Richard Henderson, 2024/03/01
- [PATCH 10/60] linux-user/elfload: Write corefile elf header in one block, Richard Henderson, 2024/03/01
- [PATCH 17/60] tcg: Avoid double lock if page tables happen to be in mmio memory., Richard Henderson, 2024/03/01
- [PATCH 29/60] migration: Remove qemu_host_page_size, Richard Henderson, 2024/03/01
- [PATCH 31/60] softmmu/physmem: Remove qemu_host_page_size, Richard Henderson, 2024/03/01
- [PATCH 36/60] linux-user: Fix sub-host-page mmap, Richard Henderson, 2024/03/01
- [PATCH 38/60] linux-user: Do early mmap placement only for reserved_va, Richard Henderson, 2024/03/01
- [PATCH 39/60] linux-user: Split out do_munmap, Richard Henderson, 2024/03/01
- [PATCH 41/60] linux-user: Split out mmap_h_eq_g,
Richard Henderson <=
- [PATCH 45/60] tests/tcg: Extend file in linux-madvise.c, Richard Henderson, 2024/03/01
- [PATCH 42/60] linux-user: Split out mmap_h_lt_g, Richard Henderson, 2024/03/01
- [PATCH 50/60] target/arm: Enable TARGET_PAGE_BITS_VARY for AArch64 user-only, Richard Henderson, 2024/03/01
- [PATCH 47/60] cpu: Remove page_size_init, Richard Henderson, 2024/03/01
- [PATCH 40/60] linux-user: Use do_munmap for target_mmap failure, Richard Henderson, 2024/03/01
- [PATCH 51/60] linux-user: Bound mmap_min_addr by host page size, Richard Henderson, 2024/03/01
- [PATCH 57/60] linux-user/loongarch64: Remove TARGET_FORCE_SHMLBA, Richard Henderson, 2024/03/01
- [PATCH 55/60] tcg/optimize: fix uninitialized variable, Richard Henderson, 2024/03/01
- [PATCH 59/60] linux-user: Rewrite target_shmat, Richard Henderson, 2024/03/01
- [PATCH 48/60] accel/tcg: Disconnect TargetPageDataNode from page size, Richard Henderson, 2024/03/01