[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-arm] [PATCH 36/42] target/arm: Convert VFP round insns to decodetr
From: |
Peter Maydell |
Subject: |
[Qemu-arm] [PATCH 36/42] target/arm: Convert VFP round insns to decodetree |
Date: |
Thu, 6 Jun 2019 18:46:03 +0100 |
Convert the VFP round-to-integer instructions VRINTR, VRINTZ and
VRINTX to decodetree.
These instructions were only introduced as part of the "VFP misc"
additions in v8A, so we check this. The old decoder's implementation
was incorrectly providing them even for v7A CPUs.
Signed-off-by: Peter Maydell <address@hidden>
---
target/arm/translate-vfp.inc.c | 163 +++++++++++++++++++++++++++++++++
target/arm/translate.c | 45 +--------
target/arm/vfp.decode | 15 +++
3 files changed, 179 insertions(+), 44 deletions(-)
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
index d2ae148ca69..5768be40c3e 100644
--- a/target/arm/translate-vfp.inc.c
+++ b/target/arm/translate-vfp.inc.c
@@ -2149,3 +2149,166 @@ static bool trans_VCVT_f16_f64(DisasContext *s,
arg_VCVT_f16_f64 *a)
tcg_temp_free_i32(tmp);
return true;
}
+
+static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ neon_load_reg32(tmp, a->vm);
+ fpst = get_fpstatus_ptr(false);
+ gen_helper_rints(tmp, tmp, fpst);
+ neon_store_reg32(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i64 tmp;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i64();
+ neon_load_reg64(tmp, a->vm);
+ fpst = get_fpstatus_ptr(false);
+ gen_helper_rintd(tmp, tmp, fpst);
+ neon_store_reg64(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tmp);
+ return true;
+}
+
+static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+ TCGv_i32 tcg_rmode;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ neon_load_reg32(tmp, a->vm);
+ fpst = get_fpstatus_ptr(false);
+ tcg_rmode = tcg_const_i32(float_round_to_zero);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_helper_rints(tmp, tmp, fpst);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ neon_store_reg32(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_rmode);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i64 tmp;
+ TCGv_i32 tcg_rmode;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i64();
+ neon_load_reg64(tmp, a->vm);
+ fpst = get_fpstatus_ptr(false);
+ tcg_rmode = tcg_const_i32(float_round_to_zero);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_helper_rintd(tmp, tmp, fpst);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ neon_store_reg64(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tcg_rmode);
+ return true;
+}
+
+static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ neon_load_reg32(tmp, a->vm);
+ fpst = get_fpstatus_ptr(false);
+ gen_helper_rints_exact(tmp, tmp, fpst);
+ neon_store_reg32(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i64 tmp;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i64();
+ neon_load_reg64(tmp, a->vm);
+ fpst = get_fpstatus_ptr(false);
+ gen_helper_rintd_exact(tmp, tmp, fpst);
+ neon_store_reg64(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tmp);
+ return true;
+}
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 3edcd7beff3..e7831bf8abb 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -3050,7 +3050,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
return 1;
case 15:
switch (rn) {
- case 0 ... 11:
+ case 0 ... 14:
/* Already handled by decodetree */
return 1;
default:
@@ -3063,11 +3063,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
if (op == 15) {
/* rn is opcode, encoded as per VFP_SREG_N. */
switch (rn) {
- case 0x0c: /* vrintr */
- case 0x0d: /* vrintz */
- case 0x0e: /* vrintx */
- break;
-
case 0x0f: /* vcvt double<->single */
rd_is_dp = !dp;
break;
@@ -3190,44 +3185,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
switch (op) {
case 15: /* extension space */
switch (rn) {
- case 12: /* vrintr */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(0);
- if (dp) {
- gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
- } else {
- gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
- }
- tcg_temp_free_ptr(fpst);
- break;
- }
- case 13: /* vrintz */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(0);
- TCGv_i32 tcg_rmode;
- tcg_rmode = tcg_const_i32(float_round_to_zero);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
- if (dp) {
- gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
- } else {
- gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
- }
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
- tcg_temp_free_i32(tcg_rmode);
- tcg_temp_free_ptr(fpst);
- break;
- }
- case 14: /* vrintx */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(0);
- if (dp) {
- gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
- } else {
- gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
- }
- tcg_temp_free_ptr(fpst);
- break;
- }
case 15: /* single<->double conversion */
if (dp) {
gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
index b88d1d06f02..9942d2ae7ad 100644
--- a/target/arm/vfp.decode
+++ b/target/arm/vfp.decode
@@ -193,3 +193,18 @@ VCVT_f16_f32 ---- 1110 1.11 0011 .... 1010 t:1 1.0 .... \
vd=%vd_sp vm=%vm_sp
VCVT_f16_f64 ---- 1110 1.11 0011 .... 1011 t:1 1.0 .... \
vd=%vd_sp vm=%vm_dp
+
+VRINTR_sp ---- 1110 1.11 0110 .... 1010 01.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VRINTR_dp ---- 1110 1.11 0110 .... 1011 01.0 .... \
+ vd=%vd_dp vm=%vm_dp
+
+VRINTZ_sp ---- 1110 1.11 0110 .... 1010 11.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VRINTZ_dp ---- 1110 1.11 0110 .... 1011 11.0 .... \
+ vd=%vd_dp vm=%vm_dp
+
+VRINTX_sp ---- 1110 1.11 0111 .... 1010 01.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VRINTX_dp ---- 1110 1.11 0111 .... 1011 01.0 .... \
+ vd=%vd_dp vm=%vm_dp
--
2.20.1
- [Qemu-arm] [PATCH 15/42] target/arm: Convert VFP VLDR and VSTR to decodetree, (continued)
- [Qemu-arm] [PATCH 15/42] target/arm: Convert VFP VLDR and VSTR to decodetree, Peter Maydell, 2019/06/06
- [Qemu-arm] [PATCH 25/42] target/arm: Convert VSUB to decodetree, Peter Maydell, 2019/06/06
- [Qemu-arm] [PATCH 32/42] target/arm: Convert VMOV (register) to decodetree, Peter Maydell, 2019/06/06
- [Qemu-arm] [PATCH 33/42] target/arm: Convert VFP comparison insns to decodetree, Peter Maydell, 2019/06/06
- [Qemu-arm] [PATCH 07/42] target/arm: Convert VMINNM, VMAXNM to decodetree, Peter Maydell, 2019/06/06
- [Qemu-arm] [PATCH 36/42] target/arm: Convert VFP round insns to decodetree,
Peter Maydell <=
- [Qemu-arm] [PATCH 09/42] target/arm: Convert VCVTA/VCVTN/VCVTP/VCVTM to decodetree, Peter Maydell, 2019/06/06
- [Qemu-arm] [PATCH 05/42] target/arm: Explicitly enable VFP short-vectors for aarch32 -cpu max, Peter Maydell, 2019/06/06
- [Qemu-arm] [PATCH 39/42] target/arm: Convert VJCVT to decodetree, Peter Maydell, 2019/06/06
- [Qemu-arm] [PATCH 22/42] target/arm: Convert VMUL to decodetree, Peter Maydell, 2019/06/06