[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH-for-4.2 v1 3/9] s390x/mmu: DAT translation rewrite
From: |
David Hildenbrand |
Subject: |
[Qemu-devel] [PATCH-for-4.2 v1 3/9] s390x/mmu: DAT translation rewrite |
Date: |
Mon, 5 Aug 2019 17:29:41 +0200 |
Let's rewrite the DAT translation in a non-recursive way, similar to
arch/s390/kvm/gaccess.c:guest_translate() in KVM. This makes the
code much easier to read, compare and maintain.
Use better names for the region/section/page table entries and for the
macros to extract relevant parts from virtual address. Introduce defines
for all defined bits, this will come in handy soon.
All access exceptions now directly go via trigger_access_exception(),
at a central point. DAT protection checks are performed at a central
place.
Also, we now catch and indicate invalid addresses of page tables. All
table entries are accessed via read_table_entry().
Signed-off-by: David Hildenbrand <address@hidden>
---
target/s390x/cpu.h | 77 +++++---
target/s390x/mem_helper.c | 13 +-
target/s390x/mmu_helper.c | 360 +++++++++++++++++---------------------
3 files changed, 229 insertions(+), 221 deletions(-)
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index c34992bb2e..1ff14250bd 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -554,26 +554,63 @@ QEMU_BUILD_BUG_ON(sizeof(SysIB) != 4096);
#define ASCE_TYPE_SEGMENT 0x00 /* segment table type
*/
#define ASCE_TABLE_LENGTH 0x03 /* region table length
*/
-#define REGION_ENTRY_ORIGIN (~0xfffULL) /* region/segment table origin */
-#define REGION_ENTRY_RO 0x200 /* region/segment protection bit */
-#define REGION_ENTRY_TF 0xc0 /* region/segment table offset */
-#define REGION_ENTRY_INV 0x20 /* invalid region table entry */
-#define REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
-#define REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
-#define REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
-#define REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
-#define REGION_ENTRY_LENGTH 0x03 /* region third length */
-
-#define SEGMENT_ENTRY_ORIGIN (~0x7ffULL) /* segment table origin */
-#define SEGMENT_ENTRY_FC 0x400 /* format control */
-#define SEGMENT_ENTRY_RO 0x200 /* page protection bit */
-#define SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
-
-#define VADDR_PX 0xff000 /* page index bits */
-
-#define PAGE_RO 0x200 /* HW read-only bit */
-#define PAGE_INVALID 0x400 /* HW invalid bit */
-#define PAGE_RES0 0x800 /* bit must be zero */
+#define REGION_ENTRY_ORIGIN 0xfffffffffffff000ULL
+#define REGION_ENTRY_P 0x0000000000000200ULL
+#define REGION_ENTRY_TF 0x00000000000000c0ULL
+#define REGION_ENTRY_I 0x0000000000000020ULL
+#define REGION_ENTRY_TT 0x000000000000000cULL
+#define REGION_ENTRY_TL 0x0000000000000003ULL
+
+#define REGION_ENTRY_TT_REGION1 0x000000000000000cULL
+#define REGION_ENTRY_TT_REGION2 0x0000000000000008ULL
+#define REGION_ENTRY_TT_REGION3 0x0000000000000004ULL
+
+#define REGION3_ENTRY_RFAA 0xffffffff80000000ULL
+#define REGION3_ENTRY_AV 0x0000000000010000ULL
+#define REGION3_ENTRY_ACC 0x000000000000f000ULL
+#define REGION3_ENTRY_F 0x0000000000000800ULL
+#define REGION3_ENTRY_FC 0x0000000000000400ULL
+#define REGION3_ENTRY_IEP 0x0000000000000100ULL
+#define REGION3_ENTRY_CR 0x0000000000000010ULL
+
+#define SEGMENT_ENTRY_ORIGIN 0xfffffffffffff800ULL
+#define SEGMENT_ENTRY_SFAA 0xfffffffffff80000ULL
+#define SEGMENT_ENTRY_AV 0x0000000000010000ULL
+#define SEGMENT_ENTRY_ACC 0x000000000000f000ULL
+#define SEGMENT_ENTRY_F 0x0000000000000800ULL
+#define SEGMENT_ENTRY_FC 0x0000000000000400ULL
+#define SEGMENT_ENTRY_P 0x0000000000000200ULL
+#define SEGMENT_ENTRY_IEP 0x0000000000000100ULL
+#define SEGMENT_ENTRY_I 0x0000000000000020ULL
+#define SEGMENT_ENTRY_CS 0x0000000000000010ULL
+#define SEGMENT_ENTRY_TT 0x000000000000000cULL
+
+#define SEGMENT_ENTRY_TT_REGION1 0x000000000000000cULL
+#define SEGMENT_ENTRY_TT_REGION2 0x0000000000000008ULL
+#define SEGMENT_ENTRY_TT_REGION3 0x0000000000000004ULL
+#define SEGMENT_ENTRY_TT_SEGMENT 0x0000000000000000ULL
+
+#define PAGE_ENTRY_0 0x0000000000000800ULL
+#define PAGE_ENTRY_I 0x0000000000000400ULL
+#define PAGE_ENTRY_P 0x0000000000000200ULL
+#define PAGE_ENTRY_IEP 0x0000000000000100ULL
+
+#define VADDR_REGION1_TX_MASK 0xffe0000000000000ULL
+#define VADDR_REGION2_TX_MASK 0x001ffc0000000000ULL
+#define VADDR_REGION3_TX_MASK 0x000003ff80000000ULL
+#define VADDR_SEGMENT_TX_MASK 0x000000007ff00000ULL
+#define VADDR_PAGE_TX_MASK 0x00000000000ff000ULL
+
+#define VADDR_REGION1_TX(vaddr) (((vaddr) & VADDR_REGION1_TX_MASK) >> 53)
+#define VADDR_REGION2_TX(vaddr) (((vaddr) & VADDR_REGION2_TX_MASK) >> 42)
+#define VADDR_REGION3_TX(vaddr) (((vaddr) & VADDR_REGION3_TX_MASK) >> 31)
+#define VADDR_SEGMENT_TX(vaddr) (((vaddr) & VADDR_SEGMENT_TX_MASK) >> 20)
+#define VADDR_PAGE_TX(vaddr) (((vaddr) & VADDR_PAGE_TX_MASK) >> 12)
+
+#define VADDR_REGION1_TL(vaddr) (((vaddr) & 0xc000000000000000ULL) >> 62)
+#define VADDR_REGION2_TL(vaddr) (((vaddr) & 0x0018000000000000ULL) >> 51)
+#define VADDR_REGION3_TL(vaddr) (((vaddr) & 0x0000030000000000ULL) >> 40)
+#define VADDR_SEGMENT_TL(vaddr) (((vaddr) & 0x0000000060000000ULL) >> 29)
#define SK_C (0x1 << 1)
#define SK_R (0x1 << 2)
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
index 29d9eaa5b7..46cc0d66f7 100644
--- a/target/s390x/mem_helper.c
+++ b/target/s390x/mem_helper.c
@@ -1936,9 +1936,9 @@ void HELPER(idte)(CPUS390XState *env, uint64_t r1,
uint64_t r2, uint32_t m4)
/* addresses are not wrapped in 24/31bit mode but table index is */
raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
entry = cpu_ldq_real_ra(env, raddr, ra);
- if (!(entry & REGION_ENTRY_INV)) {
+ if (!(entry & REGION_ENTRY_I)) {
/* we are allowed to not store if already invalid */
- entry |= REGION_ENTRY_INV;
+ entry |= REGION_ENTRY_I;
cpu_stq_real_ra(env, raddr, entry, ra);
}
}
@@ -1963,17 +1963,18 @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pto,
uint64_t vaddr,
/* Compute the page table entry address */
pte_addr = (pto & SEGMENT_ENTRY_ORIGIN);
- pte_addr += (vaddr & VADDR_PX) >> 9;
+ pte_addr += VADDR_PAGE_TX(vaddr) * 8;
/* Mark the page table entry as invalid */
pte = cpu_ldq_real_ra(env, pte_addr, ra);
- pte |= PAGE_INVALID;
+ pte |= PAGE_ENTRY_I;
+
cpu_stq_real_ra(env, pte_addr, pte, ra);
/* XXX we exploit the fact that Linux passes the exact virtual
address here - it's not obliged to! */
if (m4 & 1) {
- if (vaddr & ~VADDR_PX) {
+ if (vaddr & ~VADDR_PAGE_TX_MASK) {
tlb_flush_page(cs, page);
/* XXX 31-bit hack */
tlb_flush_page(cs, page ^ 0x80000000);
@@ -1982,7 +1983,7 @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pto,
uint64_t vaddr,
tlb_flush(cs);
}
} else {
- if (vaddr & ~VADDR_PX) {
+ if (vaddr & ~VADDR_PAGE_TX_MASK) {
tlb_flush_page_all_cpus_synced(cs, page);
/* XXX 31-bit hack */
tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
index 9c0d9b5c5f..de7798284d 100644
--- a/target/s390x/mmu_helper.c
+++ b/target/s390x/mmu_helper.c
@@ -72,44 +72,6 @@ static void trigger_access_exception(CPUS390XState *env,
uint32_t type,
}
}
-static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
- uint64_t asc, int rw, bool exc)
-{
- uint64_t tec;
-
- tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | 4 | asc >> 46;
-
- DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec);
-
- if (!exc) {
- return;
- }
-
- trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, tec);
-}
-
-static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
- uint32_t type, uint64_t asc, int rw, bool exc)
-{
- int ilen = ILEN_AUTO;
- uint64_t tec;
-
- tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | asc >> 46;
-
- DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec);
-
- if (!exc) {
- return;
- }
-
- /* Code accesses have an undefined ilc. */
- if (rw == MMU_INST_FETCH) {
- ilen = 2;
- }
-
- trigger_access_exception(env, type, ilen, tec);
-}
-
/* check whether the address would be proteted by Low-Address Protection */
static bool is_low_address(uint64_t addr)
{
@@ -155,183 +117,171 @@ target_ulong mmu_real2abs(CPUS390XState *env,
target_ulong raddr)
return raddr;
}
-/* Decode page table entry (normal 4KB page) */
-static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
- uint64_t asc, uint64_t pt_entry,
- target_ulong *raddr, int *flags, int rw, bool exc)
+static inline int read_table_entry(hwaddr gaddr, uint64_t *entry)
{
- if (pt_entry & PAGE_INVALID) {
- DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, pt_entry);
- trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw, exc);
- return -1;
+ /*
+ * According to the PoP, these table addresses are "unpredictably real
+ * or absolute". Also, "it is unpredictable whether the address wraps
+ * or an addressing exception is recognized".
+ *
+ * We treat them as absolute addresses and don't wrap them.
+ */
+ if (address_space_read(&address_space_memory, gaddr,
MEMTXATTRS_UNSPECIFIED,
+ (uint8_t *)entry, sizeof(*entry)) != MEMTX_OK) {
+ return -EFAULT;
}
- if (pt_entry & PAGE_RES0) {
- trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
- return -1;
- }
- if (pt_entry & PAGE_RO) {
- *flags &= ~PAGE_WRITE;
- }
-
- *raddr = pt_entry & ASCE_ORIGIN;
-
- PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, pt_entry);
-
+ *entry = be64_to_cpu(*entry);
return 0;
}
-/* Decode segment table entry */
-static int mmu_translate_segment(CPUS390XState *env, target_ulong vaddr,
- uint64_t asc, uint64_t st_entry,
- target_ulong *raddr, int *flags, int rw,
- bool exc)
-{
- CPUState *cs = env_cpu(env);
- uint64_t origin, offs, pt_entry;
-
- if (st_entry & SEGMENT_ENTRY_RO) {
- *flags &= ~PAGE_WRITE;
- }
-
- if ((st_entry & SEGMENT_ENTRY_FC) && (env->cregs[0] & CR0_EDAT)) {
- /* Decode EDAT1 segment frame absolute address (1MB page) */
- *raddr = (st_entry & 0xfffffffffff00000ULL) | (vaddr & 0xfffff);
- PTE_DPRINTF("%s: SEG=0x%" PRIx64 "\n", __func__, st_entry);
- return 0;
- }
-
- /* Look up 4KB page entry */
- origin = st_entry & SEGMENT_ENTRY_ORIGIN;
- offs = (vaddr & VADDR_PX) >> 9;
- pt_entry = ldq_phys(cs->as, origin + offs);
- PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
- __func__, origin, offs, pt_entry);
- return mmu_translate_pte(env, vaddr, asc, pt_entry, raddr, flags, rw, exc);
-}
-
-/* Decode region table entries */
-static int mmu_translate_region(CPUS390XState *env, target_ulong vaddr,
- uint64_t asc, uint64_t entry, int level,
- target_ulong *raddr, int *flags, int rw,
- bool exc)
-{
- CPUState *cs = env_cpu(env);
- uint64_t origin, offs, new_entry;
- const int pchks[4] = {
- PGM_SEGMENT_TRANS, PGM_REG_THIRD_TRANS,
- PGM_REG_SEC_TRANS, PGM_REG_FIRST_TRANS
- };
-
- PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __func__, entry);
-
- origin = entry & REGION_ENTRY_ORIGIN;
- offs = (vaddr >> (17 + 11 * level / 4)) & 0x3ff8;
-
- new_entry = ldq_phys(cs->as, origin + offs);
- PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
- __func__, origin, offs, new_entry);
-
- if ((new_entry & REGION_ENTRY_INV) != 0) {
- DPRINTF("%s: invalid region\n", __func__);
- trigger_page_fault(env, vaddr, pchks[level / 4], asc, rw, exc);
- return -1;
- }
-
- if ((new_entry & REGION_ENTRY_TYPE_MASK) != level) {
- trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
- return -1;
- }
-
- if (level == ASCE_TYPE_SEGMENT) {
- return mmu_translate_segment(env, vaddr, asc, new_entry, raddr, flags,
- rw, exc);
- }
-
- /* Check region table offset and length */
- offs = (vaddr >> (28 + 11 * (level - 4) / 4)) & 3;
- if (offs < ((new_entry & REGION_ENTRY_TF) >> 6)
- || offs > (new_entry & REGION_ENTRY_LENGTH)) {
- DPRINTF("%s: invalid offset or len (%lx)\n", __func__, new_entry);
- trigger_page_fault(env, vaddr, pchks[level / 4 - 1], asc, rw, exc);
- return -1;
- }
-
- if ((env->cregs[0] & CR0_EDAT) && (new_entry & REGION_ENTRY_RO)) {
- *flags &= ~PAGE_WRITE;
- }
-
- /* yet another region */
- return mmu_translate_region(env, vaddr, asc, new_entry, level - 4,
- raddr, flags, rw, exc);
-}
-
static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
- uint64_t asc, uint64_t asce, target_ulong *raddr,
- int *flags, int rw, bool exc)
+ uint64_t asce, target_ulong *raddr, int *flags)
{
- int level;
- int r;
+ const bool edat1 = (env->cregs[0] & CR0_EDAT) &&
+ s390_has_feat(S390_FEAT_EDAT);
+ const int asce_tl = asce & ASCE_TABLE_LENGTH;
+ const int asce_p = asce & ASCE_PRIVATE_SPACE;
+ hwaddr gaddr = asce & ASCE_ORIGIN;
+ uint64_t entry;
if (asce & ASCE_REAL_SPACE) {
- /* direct mapping */
*raddr = vaddr;
return 0;
}
- level = asce & ASCE_TYPE_MASK;
- switch (level) {
+ switch (asce & ASCE_TYPE_MASK) {
case ASCE_TYPE_REGION1:
- if ((vaddr >> 62) > (asce & ASCE_TABLE_LENGTH)) {
- trigger_page_fault(env, vaddr, PGM_REG_FIRST_TRANS, asc, rw, exc);
- return -1;
+ if (VADDR_REGION1_TL(vaddr) > asce_tl) {
+ return PGM_REG_FIRST_TRANS;
}
+ gaddr += VADDR_REGION1_TX(vaddr) * 8;
break;
case ASCE_TYPE_REGION2:
- if (vaddr & 0xffe0000000000000ULL) {
- DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
- " 0xffe0000000000000ULL\n", __func__, vaddr);
- trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
- return -1;
+ if (VADDR_REGION1_TX(vaddr)) {
+ return PGM_ASCE_TYPE;
}
- if ((vaddr >> 51 & 3) > (asce & ASCE_TABLE_LENGTH)) {
- trigger_page_fault(env, vaddr, PGM_REG_SEC_TRANS, asc, rw, exc);
- return -1;
+ if (VADDR_REGION2_TL(vaddr) > asce_tl) {
+ return PGM_REG_SEC_TRANS;
}
+ gaddr += VADDR_REGION2_TX(vaddr) * 8;
break;
case ASCE_TYPE_REGION3:
- if (vaddr & 0xfffffc0000000000ULL) {
- DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
- " 0xfffffc0000000000ULL\n", __func__, vaddr);
- trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
- return -1;
+ if (VADDR_REGION1_TX(vaddr) || VADDR_REGION2_TX(vaddr)) {
+ return PGM_ASCE_TYPE;
}
- if ((vaddr >> 40 & 3) > (asce & ASCE_TABLE_LENGTH)) {
- trigger_page_fault(env, vaddr, PGM_REG_THIRD_TRANS, asc, rw, exc);
- return -1;
+ if (VADDR_REGION3_TL(vaddr) > asce_tl) {
+ return PGM_REG_THIRD_TRANS;
}
+ gaddr += VADDR_REGION3_TX(vaddr) * 8;
break;
case ASCE_TYPE_SEGMENT:
- if (vaddr & 0xffffffff80000000ULL) {
- DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
- " 0xffffffff80000000ULL\n", __func__, vaddr);
- trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
- return -1;
+ if (VADDR_REGION1_TX(vaddr) || VADDR_REGION2_TX(vaddr) ||
+ VADDR_REGION3_TX(vaddr)) {
+ return PGM_ASCE_TYPE;
}
- if ((vaddr >> 29 & 3) > (asce & ASCE_TABLE_LENGTH)) {
- trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw, exc);
- return -1;
+ if (VADDR_SEGMENT_TL(vaddr) > asce_tl) {
+ return PGM_SEGMENT_TRANS;
}
+ gaddr += VADDR_SEGMENT_TX(vaddr) * 8;
+ break;
+ }
+ switch (asce & ASCE_TYPE_MASK) {
+ case ASCE_TYPE_REGION1:
+ if (read_table_entry(gaddr, &entry)) {
+ return PGM_ADDRESSING;
+ }
+ if (entry & REGION_ENTRY_I) {
+ return PGM_REG_FIRST_TRANS;
+ }
+ if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION1) {
+ return PGM_TRANS_SPEC;
+ }
+ if (VADDR_REGION2_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 ||
+ VADDR_REGION2_TL(vaddr) > (entry & REGION_ENTRY_TL)) {
+ return PGM_REG_SEC_TRANS;
+ }
+ if (edat1 && (entry & REGION_ENTRY_P)) {
+ *flags &= ~PAGE_WRITE;
+ }
+ gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_REGION2_TX(vaddr) * 8;
+ /* FALL THROUGH */
+ case ASCE_TYPE_REGION2:
+ if (read_table_entry(gaddr, &entry)) {
+ return PGM_ADDRESSING;
+ }
+ if (entry & REGION_ENTRY_I) {
+ return PGM_REG_SEC_TRANS;
+ }
+ if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION2) {
+ return PGM_TRANS_SPEC;
+ }
+ if (VADDR_REGION3_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 ||
+ VADDR_REGION3_TL(vaddr) > (entry & REGION_ENTRY_TL)) {
+ return PGM_REG_THIRD_TRANS;
+ }
+ if (edat1 && (entry & REGION_ENTRY_P)) {
+ *flags &= ~PAGE_WRITE;
+ }
+ gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_REGION3_TX(vaddr) * 8;
+ /* FALL THROUGH */
+ case ASCE_TYPE_REGION3:
+ if (read_table_entry(gaddr, &entry)) {
+ return PGM_ADDRESSING;
+ }
+ if (entry & REGION_ENTRY_I) {
+ return PGM_REG_THIRD_TRANS;
+ }
+ if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION3) {
+ return PGM_TRANS_SPEC;
+ }
+ if (edat1 && (entry & REGION_ENTRY_P)) {
+ *flags &= ~PAGE_WRITE;
+ }
+ if (VADDR_SEGMENT_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 ||
+ VADDR_SEGMENT_TL(vaddr) > (entry & REGION_ENTRY_TL)) {
+ return PGM_SEGMENT_TRANS;
+ }
+ gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_SEGMENT_TX(vaddr) * 8;
+ /* FALL THROUGH */
+ case ASCE_TYPE_SEGMENT:
+ if (read_table_entry(gaddr, &entry)) {
+ return PGM_ADDRESSING;
+ }
+ if (entry & SEGMENT_ENTRY_I) {
+ return PGM_SEGMENT_TRANS;
+ }
+ if ((entry & SEGMENT_ENTRY_TT) != SEGMENT_ENTRY_TT_SEGMENT) {
+ return PGM_TRANS_SPEC;
+ }
+ if ((entry & SEGMENT_ENTRY_CS) && asce_p) {
+ return PGM_TRANS_SPEC;
+ }
+ if (entry & SEGMENT_ENTRY_P) {
+ *flags &= ~PAGE_WRITE;
+ }
+ if (edat1 && (entry & SEGMENT_ENTRY_FC)) {
+ *raddr = entry & SEGMENT_ENTRY_SFAA;
+ return 0;
+ }
+ gaddr = (entry & SEGMENT_ENTRY_ORIGIN) + VADDR_PAGE_TX(vaddr) * 8;
break;
}
- r = mmu_translate_region(env, vaddr, asc, asce, level, raddr, flags, rw,
- exc);
- if (!r && rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE)) {
- trigger_prot_fault(env, vaddr, asc, rw, exc);
- return -1;
+ if (read_table_entry(gaddr, &entry)) {
+ return PGM_ADDRESSING;
+ }
+ if (entry & PAGE_ENTRY_I) {
+ return PGM_PAGE_TRANS;
+ }
+ if (entry & PAGE_ENTRY_0) {
+ return PGM_TRANS_SPEC;
+ }
+ if (entry & PAGE_ENTRY_P) {
+ *flags &= ~PAGE_WRITE;
}
- return r;
+ *raddr = entry & TARGET_PAGE_MASK;
+ return 0;
}
/**
@@ -347,9 +297,14 @@ static int mmu_translate_asce(CPUS390XState *env,
target_ulong vaddr,
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
target_ulong *raddr, int *flags, bool exc)
{
+ /* Code accesses have an undefined ilc, let's use 2 bytes. */
+ const int ilen = (rw == MMU_INST_FETCH) ? 2 : ILEN_AUTO;
+ uint64_t tec = (vaddr & TARGET_PAGE_MASK) | (asc >> 46) |
+ (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ);
+ uint64_t asce;
static S390SKeysState *ss;
static S390SKeysClass *skeyclass;
- int r = -1;
+ int r;
uint8_t key;
if (unlikely(!ss)) {
@@ -380,25 +335,21 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr,
int rw, uint64_t asc,
if (!(env->psw.mask & PSW_MASK_DAT)) {
*raddr = vaddr;
- r = 0;
- goto out;
+ goto nodat;
}
switch (asc) {
case PSW_ASC_PRIMARY:
PTE_DPRINTF("%s: asc=primary\n", __func__);
- r = mmu_translate_asce(env, vaddr, asc, env->cregs[1], raddr, flags,
- rw, exc);
+ asce = env->cregs[1];
break;
case PSW_ASC_HOME:
PTE_DPRINTF("%s: asc=home\n", __func__);
- r = mmu_translate_asce(env, vaddr, asc, env->cregs[13], raddr, flags,
- rw, exc);
+ asce = env->cregs[13];
break;
case PSW_ASC_SECONDARY:
PTE_DPRINTF("%s: asc=secondary\n", __func__);
- r = mmu_translate_asce(env, vaddr, PSW_ASC_SECONDARY, env->cregs[7],
- raddr, flags, rw, exc);
+ asce = env->cregs[7];
break;
case PSW_ASC_ACCREG:
default:
@@ -406,11 +357,30 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr,
int rw, uint64_t asc,
break;
}
- out:
+ /* perform the translation */
+ r = mmu_translate_asce(env, vaddr, asce, raddr, flags);
+ if (r) {
+ if (exc) {
+ trigger_access_exception(env, r, ilen, tec);
+ }
+ return -1;
+ }
+
+ /* check for DAT protection */
+ if (rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE)) {
+ if (exc) {
+ /* DAT sets bit 61 only */
+ tec |= 0x4;
+ trigger_access_exception(env, PGM_PROTECTION, ilen, tec);
+ }
+ return -1;
+ }
+
+nodat:
/* Convert real address -> absolute address */
*raddr = mmu_real2abs(env, *raddr);
- if (r == 0 && *raddr < ram_size) {
+ if (*raddr < ram_size) {
if (skeyclass->get_skeys(ss, *raddr / TARGET_PAGE_SIZE, 1, &key)) {
trace_get_skeys_nonzero(r);
return 0;
@@ -430,7 +400,7 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr,
int rw, uint64_t asc,
}
}
- return r;
+ return 0;
}
/**
--
2.21.0
- Re: [Qemu-devel] [PATCH-for-4.2 v1 1/9] s390x/mmu: Better ASC selection in s390_cpu_get_phys_page_debug(), (continued)
[Qemu-devel] [PATCH-for-4.2 v1 3/9] s390x/mmu: DAT translation rewrite,
David Hildenbrand <=
Re: [Qemu-devel] [qemu-s390x] [PATCH-for-4.2 v1 3/9] s390x/mmu: DAT translation rewrite, Thomas Huth, 2019/08/19
[Qemu-devel] [PATCH-for-4.2 v1 6/9] s390x/mmu: Implement enhanced suppression-on-protection facility 2, David Hildenbrand, 2019/08/05
[Qemu-devel] [PATCH-for-4.2 v1 8/9] s390x/cpumodel: Prepare for changes of QEMU model, David Hildenbrand, 2019/08/05