[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-ppc] [PATCH 03/20] ppc: avoid AREG0 for FPU and SPE helpers
From: |
Blue Swirl |
Subject: |
[Qemu-ppc] [PATCH 03/20] ppc: avoid AREG0 for FPU and SPE helpers |
Date: |
Sat, 31 Mar 2012 16:26:16 +0000 |
Add an explicit CPUPPCState parameter instead of relying on AREG0.
Signed-off-by: Blue Swirl <address@hidden>
---
Makefile.target | 3 -
target-ppc/fpu_helper.c | 350 ++++++++++++++++++++++++-----------------------
target-ppc/helper.h | 204 ++++++++++++++--------------
target-ppc/translate.c | 106 ++++++++-------
4 files changed, 339 insertions(+), 324 deletions(-)
diff --git a/Makefile.target b/Makefile.target
index 34ac9d1..898cdd4 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -117,9 +117,6 @@ $(libobj-y): $(GENERATED_HEADERS)
ifneq ($(TARGET_BASE_ARCH), sparc)
op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
endif
-ifeq ($(TARGET_BASE_ARCH), ppc)
-fpu_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
-endif
user-exec.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
# Note: this is a workaround. The real fix is to avoid compiling
diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index 33f6830..a0302a2 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -17,12 +17,11 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "cpu.h"
-#include "dyngen-exec.h"
#include "helper.h"
/*****************************************************************************/
/* Floating point operations helpers */
-uint64_t helper_float32_to_float64(uint32_t arg)
+uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
{
CPU_FloatU f;
CPU_DoubleU d;
@@ -32,7 +31,7 @@ uint64_t helper_float32_to_float64(uint32_t arg)
return d.ll;
}
-uint32_t helper_float64_to_float32(uint64_t arg)
+uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
{
CPU_FloatU f;
CPU_DoubleU d;
@@ -51,7 +50,7 @@ static inline int isden(float64 d)
return ((u.ll >> 52) & 0x7FF) == 0;
}
-uint32_t helper_compute_fprf(uint64_t arg, uint32_t set_fprf)
+uint32_t helper_compute_fprf(CPUPPCState *env, uint64_t arg, uint32_t set_fprf)
{
CPU_DoubleU farg;
int isneg;
@@ -107,7 +106,7 @@ uint32_t helper_compute_fprf(uint64_t arg,
uint32_t set_fprf)
}
/* Floating-point invalid operations exception */
-static inline uint64_t fload_invalid_op_excp(int op)
+static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op)
{
uint64_t ret = 0;
int ve;
@@ -190,7 +189,7 @@ static inline uint64_t fload_invalid_op_excp(int op)
return ret;
}
-static inline void float_zero_divide_excp(void)
+static inline void float_zero_divide_excp(CPUPPCState *env)
{
env->fpscr |= 1 << FPSCR_ZX;
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
@@ -206,7 +205,7 @@ static inline void float_zero_divide_excp(void)
}
}
-static inline void float_overflow_excp(void)
+static inline void float_overflow_excp(CPUPPCState *env)
{
env->fpscr |= 1 << FPSCR_OX;
/* Update the floating-point exception summary */
@@ -224,7 +223,7 @@ static inline void float_overflow_excp(void)
}
}
-static inline void float_underflow_excp(void)
+static inline void float_underflow_excp(CPUPPCState *env)
{
env->fpscr |= 1 << FPSCR_UX;
/* Update the floating-point exception summary */
@@ -239,7 +238,7 @@ static inline void float_underflow_excp(void)
}
}
-static inline void float_inexact_excp(void)
+static inline void float_inexact_excp(CPUPPCState *env)
{
env->fpscr |= 1 << FPSCR_XX;
/* Update the floating-point exception summary */
@@ -253,7 +252,7 @@ static inline void float_inexact_excp(void)
}
}
-static inline void fpscr_set_rounding_mode(void)
+static inline void fpscr_set_rounding_mode(CPUPPCState *env)
{
int rnd_type;
@@ -280,7 +279,7 @@ static inline void fpscr_set_rounding_mode(void)
set_float_rounding_mode(rnd_type, &env->fp_status);
}
-void helper_fpscr_clrbit(uint32_t bit)
+void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
{
int prev;
@@ -290,7 +289,7 @@ void helper_fpscr_clrbit(uint32_t bit)
switch (bit) {
case FPSCR_RN1:
case FPSCR_RN:
- fpscr_set_rounding_mode();
+ fpscr_set_rounding_mode(env);
break;
default:
break;
@@ -298,7 +297,7 @@ void helper_fpscr_clrbit(uint32_t bit)
}
}
-void helper_fpscr_setbit(uint32_t bit)
+void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
{
int prev;
@@ -414,7 +413,7 @@ void helper_fpscr_setbit(uint32_t bit)
break;
case FPSCR_RN1:
case FPSCR_RN:
- fpscr_set_rounding_mode();
+ fpscr_set_rounding_mode(env);
break;
default:
break;
@@ -428,7 +427,7 @@ void helper_fpscr_setbit(uint32_t bit)
}
}
-void helper_store_fpscr(uint64_t arg, uint32_t mask)
+void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
{
/*
* We use only the 32 LSB of the incoming fpr
@@ -460,10 +459,10 @@ void helper_store_fpscr(uint64_t arg, uint32_t mask)
} else {
env->fpscr &= ~(1 << FPSCR_FEX);
}
- fpscr_set_rounding_mode();
+ fpscr_set_rounding_mode(env);
}
-void helper_float_check_status(void)
+void helper_float_check_status(CPUPPCState *env)
{
if (env->exception_index == POWERPC_EXCP_PROGRAM &&
(env->error_code & POWERPC_EXCP_FP)) {
@@ -474,24 +473,24 @@ void helper_float_check_status(void)
} else {
int status = get_float_exception_flags(&env->fp_status);
if (status & float_flag_divbyzero) {
- float_zero_divide_excp();
+ float_zero_divide_excp(env);
} else if (status & float_flag_overflow) {
- float_overflow_excp();
+ float_overflow_excp(env);
} else if (status & float_flag_underflow) {
- float_underflow_excp();
+ float_underflow_excp(env);
} else if (status & float_flag_inexact) {
- float_inexact_excp();
+ float_inexact_excp(env);
}
}
}
-void helper_reset_fpstatus(void)
+void helper_reset_fpstatus(CPUPPCState *env)
{
set_float_exception_flags(0, &env->fp_status);
}
/* fadd - fadd. */
-uint64_t helper_fadd(uint64_t arg1, uint64_t arg2)
+uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
{
CPU_DoubleU farg1, farg2;
@@ -501,12 +500,12 @@ uint64_t helper_fadd(uint64_t arg1, uint64_t arg2)
if (unlikely(float64_is_infinity(farg1.d) &&
float64_is_infinity(farg2.d) &&
float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
/* Magnitude subtraction of infinities */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d))) {
/* sNaN addition */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
}
@@ -515,7 +514,7 @@ uint64_t helper_fadd(uint64_t arg1, uint64_t arg2)
}
/* fsub - fsub. */
-uint64_t helper_fsub(uint64_t arg1, uint64_t arg2)
+uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
{
CPU_DoubleU farg1, farg2;
@@ -525,12 +524,12 @@ uint64_t helper_fsub(uint64_t arg1, uint64_t arg2)
if (unlikely(float64_is_infinity(farg1.d) &&
float64_is_infinity(farg2.d) &&
float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
/* Magnitude subtraction of infinities */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d))) {
/* sNaN subtraction */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
}
@@ -539,7 +538,7 @@ uint64_t helper_fsub(uint64_t arg1, uint64_t arg2)
}
/* fmul - fmul. */
-uint64_t helper_fmul(uint64_t arg1, uint64_t arg2)
+uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
{
CPU_DoubleU farg1, farg2;
@@ -549,12 +548,12 @@ uint64_t helper_fmul(uint64_t arg1, uint64_t arg2)
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d))) {
/* sNaN multiplication */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
}
@@ -563,7 +562,7 @@ uint64_t helper_fmul(uint64_t arg1, uint64_t arg2)
}
/* fdiv - fdiv. */
-uint64_t helper_fdiv(uint64_t arg1, uint64_t arg2)
+uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
{
CPU_DoubleU farg1, farg2;
@@ -573,15 +572,15 @@ uint64_t helper_fdiv(uint64_t arg1, uint64_t arg2)
if (unlikely(float64_is_infinity(farg1.d) &&
float64_is_infinity(farg2.d))) {
/* Division of infinity by infinity */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI);
} else if (unlikely(float64_is_zero(farg1.d) &&
float64_is_zero(farg2.d))) {
/* Division of zero by zero */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d))) {
/* sNaN division */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
}
@@ -590,7 +589,7 @@ uint64_t helper_fdiv(uint64_t arg1, uint64_t arg2)
}
/* fabs */
-uint64_t helper_fabs(uint64_t arg)
+uint64_t helper_fabs(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
@@ -600,7 +599,7 @@ uint64_t helper_fabs(uint64_t arg)
}
/* fnabs */
-uint64_t helper_fnabs(uint64_t arg)
+uint64_t helper_fnabs(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
@@ -611,7 +610,7 @@ uint64_t helper_fnabs(uint64_t arg)
}
/* fneg */
-uint64_t helper_fneg(uint64_t arg)
+uint64_t helper_fneg(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
@@ -621,7 +620,7 @@ uint64_t helper_fneg(uint64_t arg)
}
/* fctiw - fctiw. */
-uint64_t helper_fctiw(uint64_t arg)
+uint64_t helper_fctiw(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
@@ -629,12 +628,12 @@ uint64_t helper_fctiw(uint64_t arg)
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN conversion */
- farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
POWERPC_EXCP_FP_VXCVI);
} else if (unlikely(float64_is_quiet_nan(farg.d) ||
float64_is_infinity(farg.d))) {
/* qNan / infinity conversion */
- farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI);
} else {
farg.ll = float64_to_int32(farg.d, &env->fp_status);
/* XXX: higher bits are not supposed to be significant.
@@ -646,7 +645,7 @@ uint64_t helper_fctiw(uint64_t arg)
}
/* fctiwz - fctiwz. */
-uint64_t helper_fctiwz(uint64_t arg)
+uint64_t helper_fctiwz(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
@@ -654,12 +653,12 @@ uint64_t helper_fctiwz(uint64_t arg)
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN conversion */
- farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
POWERPC_EXCP_FP_VXCVI);
} else if (unlikely(float64_is_quiet_nan(farg.d) ||
float64_is_infinity(farg.d))) {
/* qNan / infinity conversion */
- farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI);
} else {
farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
/* XXX: higher bits are not supposed to be significant.
@@ -672,7 +671,7 @@ uint64_t helper_fctiwz(uint64_t arg)
#if defined(TARGET_PPC64)
/* fcfid - fcfid. */
-uint64_t helper_fcfid(uint64_t arg)
+uint64_t helper_fcfid(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
@@ -681,7 +680,7 @@ uint64_t helper_fcfid(uint64_t arg)
}
/* fctid - fctid. */
-uint64_t helper_fctid(uint64_t arg)
+uint64_t helper_fctid(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
@@ -689,12 +688,12 @@ uint64_t helper_fctid(uint64_t arg)
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN conversion */
- farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
POWERPC_EXCP_FP_VXCVI);
} else if (unlikely(float64_is_quiet_nan(farg.d) ||
float64_is_infinity(farg.d))) {
/* qNan / infinity conversion */
- farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI);
} else {
farg.ll = float64_to_int64(farg.d, &env->fp_status);
}
@@ -702,7 +701,7 @@ uint64_t helper_fctid(uint64_t arg)
}
/* fctidz - fctidz. */
-uint64_t helper_fctidz(uint64_t arg)
+uint64_t helper_fctidz(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
@@ -710,12 +709,12 @@ uint64_t helper_fctidz(uint64_t arg)
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN conversion */
- farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
POWERPC_EXCP_FP_VXCVI);
} else if (unlikely(float64_is_quiet_nan(farg.d) ||
float64_is_infinity(farg.d))) {
/* qNan / infinity conversion */
- farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI);
} else {
farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
}
@@ -724,7 +723,8 @@ uint64_t helper_fctidz(uint64_t arg)
#endif
-static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
+static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
+ int rounding_mode)
{
CPU_DoubleU farg;
@@ -732,43 +732,44 @@ static inline uint64_t do_fri(uint64_t arg, int
rounding_mode)
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN round */
- farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
POWERPC_EXCP_FP_VXCVI);
} else if (unlikely(float64_is_quiet_nan(farg.d) ||
float64_is_infinity(farg.d))) {
/* qNan / infinity round */
- farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI);
} else {
set_float_rounding_mode(rounding_mode, &env->fp_status);
farg.ll = float64_round_to_int(farg.d, &env->fp_status);
/* Restore rounding mode from FPSCR */
- fpscr_set_rounding_mode();
+ fpscr_set_rounding_mode(env);
}
return farg.ll;
}
-uint64_t helper_frin(uint64_t arg)
+uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
{
- return do_fri(arg, float_round_nearest_even);
+ return do_fri(env, arg, float_round_nearest_even);
}
-uint64_t helper_friz(uint64_t arg)
+uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
{
- return do_fri(arg, float_round_to_zero);
+ return do_fri(env, arg, float_round_to_zero);
}
-uint64_t helper_frip(uint64_t arg)
+uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
{
- return do_fri(arg, float_round_up);
+ return do_fri(env, arg, float_round_up);
}
-uint64_t helper_frim(uint64_t arg)
+uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
{
- return do_fri(arg, float_round_down);
+ return do_fri(env, arg, float_round_down);
}
/* fmadd - fmadd. */
-uint64_t helper_fmadd(uint64_t arg1, uint64_t arg2, uint64_t arg3)
+uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint64_t arg3)
{
CPU_DoubleU farg1, farg2, farg3;
@@ -779,13 +780,13 @@ uint64_t helper_fmadd(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d) ||
float64_is_signaling_nan(farg3.d))) {
/* sNaN operation */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
@@ -797,7 +798,7 @@ uint64_t helper_fmadd(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
@@ -809,7 +810,8 @@ uint64_t helper_fmadd(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
}
/* fmsub - fmsub. */
-uint64_t helper_fmsub(uint64_t arg1, uint64_t arg2, uint64_t arg3)
+uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint64_t arg3)
{
CPU_DoubleU farg1, farg2, farg3;
@@ -821,13 +823,13 @@ uint64_t helper_fmsub(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
(float64_is_zero(farg1.d) &&
float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d) ||
float64_is_signaling_nan(farg3.d))) {
/* sNaN operation */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
@@ -839,7 +841,7 @@ uint64_t helper_fmsub(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
@@ -850,7 +852,8 @@ uint64_t helper_fmsub(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
}
/* fnmadd - fnmadd. */
-uint64_t helper_fnmadd(uint64_t arg1, uint64_t arg2, uint64_t arg3)
+uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint64_t arg3)
{
CPU_DoubleU farg1, farg2, farg3;
@@ -861,13 +864,13 @@ uint64_t helper_fnmadd(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d) ||
float64_is_signaling_nan(farg3.d))) {
/* sNaN operation */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
@@ -879,7 +882,7 @@ uint64_t helper_fnmadd(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
@@ -893,7 +896,8 @@ uint64_t helper_fnmadd(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
}
/* fnmsub - fnmsub. */
-uint64_t helper_fnmsub(uint64_t arg1, uint64_t arg2, uint64_t arg3)
+uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint64_t arg3)
{
CPU_DoubleU farg1, farg2, farg3;
@@ -905,13 +909,13 @@ uint64_t helper_fnmsub(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
(float64_is_zero(farg1.d) &&
float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d) ||
float64_is_signaling_nan(farg3.d))) {
/* sNaN operation */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
@@ -923,7 +927,7 @@ uint64_t helper_fnmsub(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
- farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
+ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
@@ -937,7 +941,7 @@ uint64_t helper_fnmsub(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
}
/* frsp - frsp. */
-uint64_t helper_frsp(uint64_t arg)
+uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
float32 f32;
@@ -946,7 +950,7 @@ uint64_t helper_frsp(uint64_t arg)
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN square root */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
f32 = float64_to_float32(farg.d, &env->fp_status);
farg.d = float32_to_float64(f32, &env->fp_status);
@@ -955,7 +959,7 @@ uint64_t helper_frsp(uint64_t arg)
}
/* fsqrt - fsqrt. */
-uint64_t helper_fsqrt(uint64_t arg)
+uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
@@ -963,11 +967,11 @@ uint64_t helper_fsqrt(uint64_t arg)
if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
/* Square root of a negative nonzero number */
- farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT);
} else {
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN square root */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
farg.d = float64_sqrt(farg.d, &env->fp_status);
}
@@ -975,7 +979,7 @@ uint64_t helper_fsqrt(uint64_t arg)
}
/* fre - fre. */
-uint64_t helper_fre(uint64_t arg)
+uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
@@ -983,14 +987,14 @@ uint64_t helper_fre(uint64_t arg)
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN reciprocal */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
return farg.d;
}
/* fres - fres. */
-uint64_t helper_fres(uint64_t arg)
+uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
float32 f32;
@@ -999,7 +1003,7 @@ uint64_t helper_fres(uint64_t arg)
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN reciprocal */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
f32 = float64_to_float32(farg.d, &env->fp_status);
@@ -1009,7 +1013,7 @@ uint64_t helper_fres(uint64_t arg)
}
/* frsqrte - frsqrte. */
-uint64_t helper_frsqrte(uint64_t arg)
+uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
{
CPU_DoubleU farg;
float32 f32;
@@ -1018,11 +1022,11 @@ uint64_t helper_frsqrte(uint64_t arg)
if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
/* Reciprocal square root of a negative nonzero number */
- farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
+ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT);
} else {
if (unlikely(float64_is_signaling_nan(farg.d))) {
/* sNaN reciprocal square root */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
farg.d = float64_sqrt(farg.d, &env->fp_status);
farg.d = float64_div(float64_one, farg.d, &env->fp_status);
@@ -1033,7 +1037,8 @@ uint64_t helper_frsqrte(uint64_t arg)
}
/* fsel - fsel. */
-uint64_t helper_fsel(uint64_t arg1, uint64_t arg2, uint64_t arg3)
+uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint64_t arg3)
{
CPU_DoubleU farg1;
@@ -1047,7 +1052,8 @@ uint64_t helper_fsel(uint64_t arg1, uint64_t
arg2, uint64_t arg3)
}
}
-void helper_fcmpu(uint64_t arg1, uint64_t arg2, uint32_t crfD)
+void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint32_t crfD)
{
CPU_DoubleU farg1, farg2;
uint32_t ret = 0;
@@ -1073,11 +1079,12 @@ void helper_fcmpu(uint64_t arg1, uint64_t
arg2, uint32_t crfD)
&& (float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d)))) {
/* sNaN comparison */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
}
-void helper_fcmpo(uint64_t arg1, uint64_t arg2, uint32_t crfD)
+void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint32_t crfD)
{
CPU_DoubleU farg1, farg2;
uint32_t ret = 0;
@@ -1103,17 +1110,17 @@ void helper_fcmpo(uint64_t arg1, uint64_t
arg2, uint32_t crfD)
if (float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d)) {
/* sNaN comparison */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
POWERPC_EXCP_FP_VXVC);
} else {
/* qNaN comparison */
- fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
+ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC);
}
}
}
/* Single-precision floating-point conversions */
-static inline uint32_t efscfsi(uint32_t val)
+static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
{
CPU_FloatU u;
@@ -1122,7 +1129,7 @@ static inline uint32_t efscfsi(uint32_t val)
return u.l;
}
-static inline uint32_t efscfui(uint32_t val)
+static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
{
CPU_FloatU u;
@@ -1131,7 +1138,7 @@ static inline uint32_t efscfui(uint32_t val)
return u.l;
}
-static inline int32_t efsctsi(uint32_t val)
+static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
{
CPU_FloatU u;
@@ -1144,7 +1151,7 @@ static inline int32_t efsctsi(uint32_t val)
return float32_to_int32(u.f, &env->vec_status);
}
-static inline uint32_t efsctui(uint32_t val)
+static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
{
CPU_FloatU u;
@@ -1157,7 +1164,7 @@ static inline uint32_t efsctui(uint32_t val)
return float32_to_uint32(u.f, &env->vec_status);
}
-static inline uint32_t efsctsiz(uint32_t val)
+static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
{
CPU_FloatU u;
@@ -1170,7 +1177,7 @@ static inline uint32_t efsctsiz(uint32_t val)
return float32_to_int32_round_to_zero(u.f, &env->vec_status);
}
-static inline uint32_t efsctuiz(uint32_t val)
+static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
{
CPU_FloatU u;
@@ -1183,7 +1190,7 @@ static inline uint32_t efsctuiz(uint32_t val)
return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
}
-static inline uint32_t efscfsf(uint32_t val)
+static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
{
CPU_FloatU u;
float32 tmp;
@@ -1195,7 +1202,7 @@ static inline uint32_t efscfsf(uint32_t val)
return u.l;
}
-static inline uint32_t efscfuf(uint32_t val)
+static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
{
CPU_FloatU u;
float32 tmp;
@@ -1207,7 +1214,7 @@ static inline uint32_t efscfuf(uint32_t val)
return u.l;
}
-static inline uint32_t efsctsf(uint32_t val)
+static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
{
CPU_FloatU u;
float32 tmp;
@@ -1223,7 +1230,7 @@ static inline uint32_t efsctsf(uint32_t val)
return float32_to_int32(u.f, &env->vec_status);
}
-static inline uint32_t efsctuf(uint32_t val)
+static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
{
CPU_FloatU u;
float32 tmp;
@@ -1239,10 +1246,10 @@ static inline uint32_t efsctuf(uint32_t val)
return float32_to_uint32(u.f, &env->vec_status);
}
-#define HELPER_SPE_SINGLE_CONV(name) \
- uint32_t helper_e##name(uint32_t val) \
- { \
- return e##name(val); \
+#define HELPER_SPE_SINGLE_CONV(name) \
+ uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
+ { \
+ return e##name(env, val); \
}
/* efscfsi */
HELPER_SPE_SINGLE_CONV(fscfsi);
@@ -1265,11 +1272,11 @@ HELPER_SPE_SINGLE_CONV(fsctsf);
/* efsctuf */
HELPER_SPE_SINGLE_CONV(fsctuf);
-#define HELPER_SPE_VECTOR_CONV(name) \
- uint64_t helper_ev##name(uint64_t val) \
- { \
- return ((uint64_t)e##name(val >> 32) << 32) | \
- (uint64_t)e##name(val); \
+#define HELPER_SPE_VECTOR_CONV(name) \
+ uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
+ { \
+ return ((uint64_t)e##name(env, val >> 32) << 32) | \
+ (uint64_t)e##name(env, val); \
}
/* evfscfsi */
HELPER_SPE_VECTOR_CONV(fscfsi);
@@ -1293,7 +1300,7 @@ HELPER_SPE_VECTOR_CONV(fsctsf);
HELPER_SPE_VECTOR_CONV(fsctuf);
/* Single-precision floating-point arithmetic */
-static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
+static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
{
CPU_FloatU u1, u2;
@@ -1303,7 +1310,7 @@ static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
return u1.l;
}
-static inline uint32_t efssub(uint32_t op1, uint32_t op2)
+static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
{
CPU_FloatU u1, u2;
@@ -1313,7 +1320,7 @@ static inline uint32_t efssub(uint32_t op1, uint32_t op2)
return u1.l;
}
-static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
+static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
{
CPU_FloatU u1, u2;
@@ -1323,7 +1330,7 @@ static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
return u1.l;
}
-static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
+static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
{
CPU_FloatU u1, u2;
@@ -1333,10 +1340,10 @@ static inline uint32_t efsdiv(uint32_t op1,
uint32_t op2)
return u1.l;
}
-#define HELPER_SPE_SINGLE_ARITH(name) \
- uint32_t helper_e##name(uint32_t op1, uint32_t op2) \
- { \
- return e##name(op1, op2); \
+#define HELPER_SPE_SINGLE_ARITH(name) \
+ uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
+ { \
+ return e##name(env, op1, op2); \
}
/* efsadd */
HELPER_SPE_SINGLE_ARITH(fsadd);
@@ -1348,10 +1355,10 @@ HELPER_SPE_SINGLE_ARITH(fsmul);
HELPER_SPE_SINGLE_ARITH(fsdiv);
#define HELPER_SPE_VECTOR_ARITH(name) \
- uint64_t helper_ev##name(uint64_t op1, uint64_t op2) \
+ uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
{ \
- return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
- (uint64_t)e##name(op1, op2); \
+ return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
+ (uint64_t)e##name(env, op1, op2); \
}
/* evfsadd */
HELPER_SPE_VECTOR_ARITH(fsadd);
@@ -1363,7 +1370,7 @@ HELPER_SPE_VECTOR_ARITH(fsmul);
HELPER_SPE_VECTOR_ARITH(fsdiv);
/* Single-precision floating-point comparisons */
-static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
+static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
{
CPU_FloatU u1, u2;
@@ -1372,7 +1379,7 @@ static inline uint32_t efscmplt(uint32_t op1,
uint32_t op2)
return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
}
-static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
+static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
{
CPU_FloatU u1, u2;
@@ -1381,7 +1388,7 @@ static inline uint32_t efscmpgt(uint32_t op1,
uint32_t op2)
return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
}
-static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
+static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
{
CPU_FloatU u1, u2;
@@ -1390,28 +1397,28 @@ static inline uint32_t efscmpeq(uint32_t op1,
uint32_t op2)
return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
}
-static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
+static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
{
/* XXX: TODO: ignore special values (NaN, infinites, ...) */
- return efscmplt(op1, op2);
+ return efscmplt(env, op1, op2);
}
-static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
+static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
{
/* XXX: TODO: ignore special values (NaN, infinites, ...) */
- return efscmpgt(op1, op2);
+ return efscmpgt(env, op1, op2);
}
-static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
+static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
{
/* XXX: TODO: ignore special values (NaN, infinites, ...) */
- return efscmpeq(op1, op2);
+ return efscmpeq(env, op1, op2);
}
-#define HELPER_SINGLE_SPE_CMP(name) \
- uint32_t helper_e##name(uint32_t op1, uint32_t op2) \
- { \
- return e##name(op1, op2) << 2; \
+#define HELPER_SINGLE_SPE_CMP(name) \
+ uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
+ { \
+ return e##name(env, op1, op2) << 2; \
}
/* efststlt */
HELPER_SINGLE_SPE_CMP(fststlt);
@@ -1432,9 +1439,10 @@ static inline uint32_t evcmp_merge(int t0, int t1)
}
#define HELPER_VECTOR_SPE_CMP(name) \
- uint32_t helper_ev##name(uint64_t op1, uint64_t op2) \
+ uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
{ \
- return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
+ return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
+ e##name(env, op1, op2)); \
}
/* evfststlt */
HELPER_VECTOR_SPE_CMP(fststlt);
@@ -1450,7 +1458,7 @@ HELPER_VECTOR_SPE_CMP(fscmpgt);
HELPER_VECTOR_SPE_CMP(fscmpeq);
/* Double-precision floating-point conversion */
-uint64_t helper_efdcfsi(uint32_t val)
+uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
{
CPU_DoubleU u;
@@ -1459,7 +1467,7 @@ uint64_t helper_efdcfsi(uint32_t val)
return u.ll;
}
-uint64_t helper_efdcfsid(uint64_t val)
+uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
{
CPU_DoubleU u;
@@ -1468,7 +1476,7 @@ uint64_t helper_efdcfsid(uint64_t val)
return u.ll;
}
-uint64_t helper_efdcfui(uint32_t val)
+uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
{
CPU_DoubleU u;
@@ -1477,7 +1485,7 @@ uint64_t helper_efdcfui(uint32_t val)
return u.ll;
}
-uint64_t helper_efdcfuid(uint64_t val)
+uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
{
CPU_DoubleU u;
@@ -1486,7 +1494,7 @@ uint64_t helper_efdcfuid(uint64_t val)
return u.ll;
}
-uint32_t helper_efdctsi(uint64_t val)
+uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
{
CPU_DoubleU u;
@@ -1499,7 +1507,7 @@ uint32_t helper_efdctsi(uint64_t val)
return float64_to_int32(u.d, &env->vec_status);
}
-uint32_t helper_efdctui(uint64_t val)
+uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
{
CPU_DoubleU u;
@@ -1512,7 +1520,7 @@ uint32_t helper_efdctui(uint64_t val)
return float64_to_uint32(u.d, &env->vec_status);
}
-uint32_t helper_efdctsiz(uint64_t val)
+uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
{
CPU_DoubleU u;
@@ -1525,7 +1533,7 @@ uint32_t helper_efdctsiz(uint64_t val)
return float64_to_int32_round_to_zero(u.d, &env->vec_status);
}
-uint64_t helper_efdctsidz(uint64_t val)
+uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
{
CPU_DoubleU u;
@@ -1538,7 +1546,7 @@ uint64_t helper_efdctsidz(uint64_t val)
return float64_to_int64_round_to_zero(u.d, &env->vec_status);
}
-uint32_t helper_efdctuiz(uint64_t val)
+uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
{
CPU_DoubleU u;
@@ -1551,7 +1559,7 @@ uint32_t helper_efdctuiz(uint64_t val)
return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
}
-uint64_t helper_efdctuidz(uint64_t val)
+uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
{
CPU_DoubleU u;
@@ -1564,7 +1572,7 @@ uint64_t helper_efdctuidz(uint64_t val)
return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
}
-uint64_t helper_efdcfsf(uint32_t val)
+uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
{
CPU_DoubleU u;
float64 tmp;
@@ -1576,7 +1584,7 @@ uint64_t helper_efdcfsf(uint32_t val)
return u.ll;
}
-uint64_t helper_efdcfuf(uint32_t val)
+uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
{
CPU_DoubleU u;
float64 tmp;
@@ -1588,7 +1596,7 @@ uint64_t helper_efdcfuf(uint32_t val)
return u.ll;
}
-uint32_t helper_efdctsf(uint64_t val)
+uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
{
CPU_DoubleU u;
float64 tmp;
@@ -1604,7 +1612,7 @@ uint32_t helper_efdctsf(uint64_t val)
return float64_to_int32(u.d, &env->vec_status);
}
-uint32_t helper_efdctuf(uint64_t val)
+uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
{
CPU_DoubleU u;
float64 tmp;
@@ -1620,7 +1628,7 @@ uint32_t helper_efdctuf(uint64_t val)
return float64_to_uint32(u.d, &env->vec_status);
}
-uint32_t helper_efscfd(uint64_t val)
+uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
{
CPU_DoubleU u1;
CPU_FloatU u2;
@@ -1631,7 +1639,7 @@ uint32_t helper_efscfd(uint64_t val)
return u2.l;
}
-uint64_t helper_efdcfs(uint32_t val)
+uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
{
CPU_DoubleU u2;
CPU_FloatU u1;
@@ -1643,7 +1651,7 @@ uint64_t helper_efdcfs(uint32_t val)
}
/* Double precision fixed-point arithmetic */
-uint64_t helper_efdadd(uint64_t op1, uint64_t op2)
+uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
{
CPU_DoubleU u1, u2;
@@ -1653,7 +1661,7 @@ uint64_t helper_efdadd(uint64_t op1, uint64_t op2)
return u1.ll;
}
-uint64_t helper_efdsub(uint64_t op1, uint64_t op2)
+uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
{
CPU_DoubleU u1, u2;
@@ -1663,7 +1671,7 @@ uint64_t helper_efdsub(uint64_t op1, uint64_t op2)
return u1.ll;
}
-uint64_t helper_efdmul(uint64_t op1, uint64_t op2)
+uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
{
CPU_DoubleU u1, u2;
@@ -1673,7 +1681,7 @@ uint64_t helper_efdmul(uint64_t op1, uint64_t op2)
return u1.ll;
}
-uint64_t helper_efddiv(uint64_t op1, uint64_t op2)
+uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
{
CPU_DoubleU u1, u2;
@@ -1684,7 +1692,7 @@ uint64_t helper_efddiv(uint64_t op1, uint64_t op2)
}
/* Double precision floating point helpers */
-uint32_t helper_efdtstlt(uint64_t op1, uint64_t op2)
+uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
{
CPU_DoubleU u1, u2;
@@ -1693,7 +1701,7 @@ uint32_t helper_efdtstlt(uint64_t op1, uint64_t op2)
return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
}
-uint32_t helper_efdtstgt(uint64_t op1, uint64_t op2)
+uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
{
CPU_DoubleU u1, u2;
@@ -1702,7 +1710,7 @@ uint32_t helper_efdtstgt(uint64_t op1, uint64_t op2)
return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
}
-uint32_t helper_efdtsteq(uint64_t op1, uint64_t op2)
+uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
{
CPU_DoubleU u1, u2;
@@ -1711,20 +1719,20 @@ uint32_t helper_efdtsteq(uint64_t op1, uint64_t op2)
return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
}
-uint32_t helper_efdcmplt(uint64_t op1, uint64_t op2)
+uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
{
/* XXX: TODO: test special values (NaN, infinites, ...) */
- return helper_efdtstlt(op1, op2);
+ return helper_efdtstlt(env, op1, op2);
}
-uint32_t helper_efdcmpgt(uint64_t op1, uint64_t op2)
+uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
{
/* XXX: TODO: test special values (NaN, infinites, ...) */
- return helper_efdtstgt(op1, op2);
+ return helper_efdtstgt(env, op1, op2);
}
-uint32_t helper_efdcmpeq(uint64_t op1, uint64_t op2)
+uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
{
/* XXX: TODO: test special values (NaN, infinites, ...) */
- return helper_efdtsteq(op1, op2);
+ return helper_efdtsteq(env, op1, op2);
}
diff --git a/target-ppc/helper.h b/target-ppc/helper.h
index 148543a..2be1069 100644
--- a/target-ppc/helper.h
+++ b/target-ppc/helper.h
@@ -50,47 +50,47 @@ DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_CONST |
TCG_CALL_PURE, i32, i32)
DEF_HELPER_FLAGS_1(cntlzw32, TCG_CALL_CONST | TCG_CALL_PURE, i32, i32)
DEF_HELPER_FLAGS_2(brinc, TCG_CALL_CONST | TCG_CALL_PURE, tl, tl, tl)
-DEF_HELPER_0(float_check_status, void)
-DEF_HELPER_0(reset_fpstatus, void)
-DEF_HELPER_2(compute_fprf, i32, i64, i32)
-DEF_HELPER_2(store_fpscr, void, i64, i32)
-DEF_HELPER_1(fpscr_clrbit, void, i32)
-DEF_HELPER_1(fpscr_setbit, void, i32)
-DEF_HELPER_1(float64_to_float32, i32, i64)
-DEF_HELPER_1(float32_to_float64, i64, i32)
+DEF_HELPER_1(float_check_status, void, env)
+DEF_HELPER_1(reset_fpstatus, void, env)
+DEF_HELPER_3(compute_fprf, i32, env, i64, i32)
+DEF_HELPER_3(store_fpscr, void, env, i64, i32)
+DEF_HELPER_2(fpscr_clrbit, void, env, i32)
+DEF_HELPER_2(fpscr_setbit, void, env, i32)
+DEF_HELPER_2(float64_to_float32, i32, env, i64)
+DEF_HELPER_2(float32_to_float64, i64, env, i32)
-DEF_HELPER_3(fcmpo, void, i64, i64, i32)
-DEF_HELPER_3(fcmpu, void, i64, i64, i32)
+DEF_HELPER_4(fcmpo, void, env, i64, i64, i32)
+DEF_HELPER_4(fcmpu, void, env, i64, i64, i32)
-DEF_HELPER_1(fctiw, i64, i64)
-DEF_HELPER_1(fctiwz, i64, i64)
+DEF_HELPER_2(fctiw, i64, env, i64)
+DEF_HELPER_2(fctiwz, i64, env, i64)
#if defined(TARGET_PPC64)
-DEF_HELPER_1(fcfid, i64, i64)
-DEF_HELPER_1(fctid, i64, i64)
-DEF_HELPER_1(fctidz, i64, i64)
+DEF_HELPER_2(fcfid, i64, env, i64)
+DEF_HELPER_2(fctid, i64, env, i64)
+DEF_HELPER_2(fctidz, i64, env, i64)
#endif
-DEF_HELPER_1(frsp, i64, i64)
-DEF_HELPER_1(frin, i64, i64)
-DEF_HELPER_1(friz, i64, i64)
-DEF_HELPER_1(frip, i64, i64)
-DEF_HELPER_1(frim, i64, i64)
+DEF_HELPER_2(frsp, i64, env, i64)
+DEF_HELPER_2(frin, i64, env, i64)
+DEF_HELPER_2(friz, i64, env, i64)
+DEF_HELPER_2(frip, i64, env, i64)
+DEF_HELPER_2(frim, i64, env, i64)
-DEF_HELPER_2(fadd, i64, i64, i64)
-DEF_HELPER_2(fsub, i64, i64, i64)
-DEF_HELPER_2(fmul, i64, i64, i64)
-DEF_HELPER_2(fdiv, i64, i64, i64)
-DEF_HELPER_3(fmadd, i64, i64, i64, i64)
-DEF_HELPER_3(fmsub, i64, i64, i64, i64)
-DEF_HELPER_3(fnmadd, i64, i64, i64, i64)
-DEF_HELPER_3(fnmsub, i64, i64, i64, i64)
-DEF_HELPER_1(fabs, i64, i64)
-DEF_HELPER_1(fnabs, i64, i64)
-DEF_HELPER_1(fneg, i64, i64)
-DEF_HELPER_1(fsqrt, i64, i64)
-DEF_HELPER_1(fre, i64, i64)
-DEF_HELPER_1(fres, i64, i64)
-DEF_HELPER_1(frsqrte, i64, i64)
-DEF_HELPER_3(fsel, i64, i64, i64, i64)
+DEF_HELPER_3(fadd, i64, env, i64, i64)
+DEF_HELPER_3(fsub, i64, env, i64, i64)
+DEF_HELPER_3(fmul, i64, env, i64, i64)
+DEF_HELPER_3(fdiv, i64, env, i64, i64)
+DEF_HELPER_4(fmadd, i64, env, i64, i64, i64)
+DEF_HELPER_4(fmsub, i64, env, i64, i64, i64)
+DEF_HELPER_4(fnmadd, i64, env, i64, i64, i64)
+DEF_HELPER_4(fnmsub, i64, env, i64, i64, i64)
+DEF_HELPER_2(fabs, i64, env, i64)
+DEF_HELPER_2(fnabs, i64, env, i64)
+DEF_HELPER_2(fneg, i64, env, i64)
+DEF_HELPER_2(fsqrt, i64, env, i64)
+DEF_HELPER_2(fre, i64, env, i64)
+DEF_HELPER_2(fres, i64, env, i64)
+DEF_HELPER_2(frsqrte, i64, env, i64)
+DEF_HELPER_4(fsel, i64, env, i64, i64, i64)
#define dh_alias_avr ptr
#define dh_ctype_avr ppc_avr_t *
@@ -256,72 +256,72 @@ DEF_HELPER_3(vcfsx, void, avr, avr, i32)
DEF_HELPER_3(vctuxs, void, avr, avr, i32)
DEF_HELPER_3(vctsxs, void, avr, avr, i32)
-DEF_HELPER_1(efscfsi, i32, i32)
-DEF_HELPER_1(efscfui, i32, i32)
-DEF_HELPER_1(efscfuf, i32, i32)
-DEF_HELPER_1(efscfsf, i32, i32)
-DEF_HELPER_1(efsctsi, i32, i32)
-DEF_HELPER_1(efsctui, i32, i32)
-DEF_HELPER_1(efsctsiz, i32, i32)
-DEF_HELPER_1(efsctuiz, i32, i32)
-DEF_HELPER_1(efsctsf, i32, i32)
-DEF_HELPER_1(efsctuf, i32, i32)
-DEF_HELPER_1(evfscfsi, i64, i64)
-DEF_HELPER_1(evfscfui, i64, i64)
-DEF_HELPER_1(evfscfuf, i64, i64)
-DEF_HELPER_1(evfscfsf, i64, i64)
-DEF_HELPER_1(evfsctsi, i64, i64)
-DEF_HELPER_1(evfsctui, i64, i64)
-DEF_HELPER_1(evfsctsiz, i64, i64)
-DEF_HELPER_1(evfsctuiz, i64, i64)
-DEF_HELPER_1(evfsctsf, i64, i64)
-DEF_HELPER_1(evfsctuf, i64, i64)
-DEF_HELPER_2(efsadd, i32, i32, i32)
-DEF_HELPER_2(efssub, i32, i32, i32)
-DEF_HELPER_2(efsmul, i32, i32, i32)
-DEF_HELPER_2(efsdiv, i32, i32, i32)
-DEF_HELPER_2(evfsadd, i64, i64, i64)
-DEF_HELPER_2(evfssub, i64, i64, i64)
-DEF_HELPER_2(evfsmul, i64, i64, i64)
-DEF_HELPER_2(evfsdiv, i64, i64, i64)
-DEF_HELPER_2(efststlt, i32, i32, i32)
-DEF_HELPER_2(efststgt, i32, i32, i32)
-DEF_HELPER_2(efststeq, i32, i32, i32)
-DEF_HELPER_2(efscmplt, i32, i32, i32)
-DEF_HELPER_2(efscmpgt, i32, i32, i32)
-DEF_HELPER_2(efscmpeq, i32, i32, i32)
-DEF_HELPER_2(evfststlt, i32, i64, i64)
-DEF_HELPER_2(evfststgt, i32, i64, i64)
-DEF_HELPER_2(evfststeq, i32, i64, i64)
-DEF_HELPER_2(evfscmplt, i32, i64, i64)
-DEF_HELPER_2(evfscmpgt, i32, i64, i64)
-DEF_HELPER_2(evfscmpeq, i32, i64, i64)
-DEF_HELPER_1(efdcfsi, i64, i32)
-DEF_HELPER_1(efdcfsid, i64, i64)
-DEF_HELPER_1(efdcfui, i64, i32)
-DEF_HELPER_1(efdcfuid, i64, i64)
-DEF_HELPER_1(efdctsi, i32, i64)
-DEF_HELPER_1(efdctui, i32, i64)
-DEF_HELPER_1(efdctsiz, i32, i64)
-DEF_HELPER_1(efdctsidz, i64, i64)
-DEF_HELPER_1(efdctuiz, i32, i64)
-DEF_HELPER_1(efdctuidz, i64, i64)
-DEF_HELPER_1(efdcfsf, i64, i32)
-DEF_HELPER_1(efdcfuf, i64, i32)
-DEF_HELPER_1(efdctsf, i32, i64)
-DEF_HELPER_1(efdctuf, i32, i64)
-DEF_HELPER_1(efscfd, i32, i64)
-DEF_HELPER_1(efdcfs, i64, i32)
-DEF_HELPER_2(efdadd, i64, i64, i64)
-DEF_HELPER_2(efdsub, i64, i64, i64)
-DEF_HELPER_2(efdmul, i64, i64, i64)
-DEF_HELPER_2(efddiv, i64, i64, i64)
-DEF_HELPER_2(efdtstlt, i32, i64, i64)
-DEF_HELPER_2(efdtstgt, i32, i64, i64)
-DEF_HELPER_2(efdtsteq, i32, i64, i64)
-DEF_HELPER_2(efdcmplt, i32, i64, i64)
-DEF_HELPER_2(efdcmpgt, i32, i64, i64)
-DEF_HELPER_2(efdcmpeq, i32, i64, i64)
+DEF_HELPER_2(efscfsi, i32, env, i32)
+DEF_HELPER_2(efscfui, i32, env, i32)
+DEF_HELPER_2(efscfuf, i32, env, i32)
+DEF_HELPER_2(efscfsf, i32, env, i32)
+DEF_HELPER_2(efsctsi, i32, env, i32)
+DEF_HELPER_2(efsctui, i32, env, i32)
+DEF_HELPER_2(efsctsiz, i32, env, i32)
+DEF_HELPER_2(efsctuiz, i32, env, i32)
+DEF_HELPER_2(efsctsf, i32, env, i32)
+DEF_HELPER_2(efsctuf, i32, env, i32)
+DEF_HELPER_2(evfscfsi, i64, env, i64)
+DEF_HELPER_2(evfscfui, i64, env, i64)
+DEF_HELPER_2(evfscfuf, i64, env, i64)
+DEF_HELPER_2(evfscfsf, i64, env, i64)
+DEF_HELPER_2(evfsctsi, i64, env, i64)
+DEF_HELPER_2(evfsctui, i64, env, i64)
+DEF_HELPER_2(evfsctsiz, i64, env, i64)
+DEF_HELPER_2(evfsctuiz, i64, env, i64)
+DEF_HELPER_2(evfsctsf, i64, env, i64)
+DEF_HELPER_2(evfsctuf, i64, env, i64)
+DEF_HELPER_3(efsadd, i32, env, i32, i32)
+DEF_HELPER_3(efssub, i32, env, i32, i32)
+DEF_HELPER_3(efsmul, i32, env, i32, i32)
+DEF_HELPER_3(efsdiv, i32, env, i32, i32)
+DEF_HELPER_3(evfsadd, i64, env, i64, i64)
+DEF_HELPER_3(evfssub, i64, env, i64, i64)
+DEF_HELPER_3(evfsmul, i64, env, i64, i64)
+DEF_HELPER_3(evfsdiv, i64, env, i64, i64)
+DEF_HELPER_3(efststlt, i32, env, i32, i32)
+DEF_HELPER_3(efststgt, i32, env, i32, i32)
+DEF_HELPER_3(efststeq, i32, env, i32, i32)
+DEF_HELPER_3(efscmplt, i32, env, i32, i32)
+DEF_HELPER_3(efscmpgt, i32, env, i32, i32)
+DEF_HELPER_3(efscmpeq, i32, env, i32, i32)
+DEF_HELPER_3(evfststlt, i32, env, i64, i64)
+DEF_HELPER_3(evfststgt, i32, env, i64, i64)
+DEF_HELPER_3(evfststeq, i32, env, i64, i64)
+DEF_HELPER_3(evfscmplt, i32, env, i64, i64)
+DEF_HELPER_3(evfscmpgt, i32, env, i64, i64)
+DEF_HELPER_3(evfscmpeq, i32, env, i64, i64)
+DEF_HELPER_2(efdcfsi, i64, env, i32)
+DEF_HELPER_2(efdcfsid, i64, env, i64)
+DEF_HELPER_2(efdcfui, i64, env, i32)
+DEF_HELPER_2(efdcfuid, i64, env, i64)
+DEF_HELPER_2(efdctsi, i32, env, i64)
+DEF_HELPER_2(efdctui, i32, env, i64)
+DEF_HELPER_2(efdctsiz, i32, env, i64)
+DEF_HELPER_2(efdctsidz, i64, env, i64)
+DEF_HELPER_2(efdctuiz, i32, env, i64)
+DEF_HELPER_2(efdctuidz, i64, env, i64)
+DEF_HELPER_2(efdcfsf, i64, env, i32)
+DEF_HELPER_2(efdcfuf, i64, env, i32)
+DEF_HELPER_2(efdctsf, i32, env, i64)
+DEF_HELPER_2(efdctuf, i32, env, i64)
+DEF_HELPER_2(efscfd, i32, env, i64)
+DEF_HELPER_2(efdcfs, i64, env, i32)
+DEF_HELPER_3(efdadd, i64, env, i64, i64)
+DEF_HELPER_3(efdsub, i64, env, i64, i64)
+DEF_HELPER_3(efdmul, i64, env, i64, i64)
+DEF_HELPER_3(efddiv, i64, env, i64, i64)
+DEF_HELPER_3(efdtstlt, i32, env, i64, i64)
+DEF_HELPER_3(efdtstgt, i32, env, i64, i64)
+DEF_HELPER_3(efdtsteq, i32, env, i64, i64)
+DEF_HELPER_3(efdcmplt, i32, env, i64, i64)
+DEF_HELPER_3(efdcmpgt, i32, env, i64, i64)
+DEF_HELPER_3(efdcmpeq, i32, env, i64, i64)
#if !defined(CONFIG_USER_ONLY)
DEF_HELPER_1(4xx_tlbre_hi, tl, tl)
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index c9a503a..3edef34 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -219,7 +219,7 @@ struct opc_handler_t {
static inline void gen_reset_fpstatus(void)
{
- gen_helper_reset_fpstatus();
+ gen_helper_reset_fpstatus(cpu_env);
}
static inline void gen_compute_fprf(TCGv_i64 arg, int set_fprf, int set_rc)
@@ -229,15 +229,15 @@ static inline void gen_compute_fprf(TCGv_i64
arg, int set_fprf, int set_rc)
if (set_fprf != 0) {
/* This case might be optimized later */
tcg_gen_movi_i32(t0, 1);
- gen_helper_compute_fprf(t0, arg, t0);
+ gen_helper_compute_fprf(t0, cpu_env, arg, t0);
if (unlikely(set_rc)) {
tcg_gen_mov_i32(cpu_crf[1], t0);
}
- gen_helper_float_check_status();
+ gen_helper_float_check_status(cpu_env);
} else if (unlikely(set_rc)) {
/* We always need to compute fpcc */
tcg_gen_movi_i32(t0, 0);
- gen_helper_compute_fprf(t0, arg, t0);
+ gen_helper_compute_fprf(t0, cpu_env, arg, t0);
tcg_gen_mov_i32(cpu_crf[1], t0);
}
@@ -2027,10 +2027,12 @@ static void gen_f##name(DisasContext *ctx)
\
/* NIP cannot be restored if the memory exception comes from an helper */ \
gen_update_nip(ctx, ctx->nip - 4); \
gen_reset_fpstatus(); \
- gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rA(ctx->opcode)], \
cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
if (isfloat) { \
- gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rD(ctx->opcode)]); \
} \
gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], set_fprf, \
Rc(ctx->opcode) != 0); \
@@ -2050,10 +2052,12 @@ static void gen_f##name(DisasContext *ctx)
\
/* NIP cannot be restored if the memory exception comes from an helper */ \
gen_update_nip(ctx, ctx->nip - 4); \
gen_reset_fpstatus(); \
- gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rA(ctx->opcode)], \
cpu_fpr[rB(ctx->opcode)]); \
if (isfloat) { \
- gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rD(ctx->opcode)]); \
} \
gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
set_fprf, Rc(ctx->opcode) != 0); \
@@ -2072,10 +2076,12 @@ static void gen_f##name(DisasContext *ctx)
\
/* NIP cannot be restored if the memory exception comes from an helper */ \
gen_update_nip(ctx, ctx->nip - 4); \
gen_reset_fpstatus(); \
- gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)], \
- cpu_fpr[rC(ctx->opcode)]); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rC(ctx->opcode)]); \
if (isfloat) { \
- gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]); \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rD(ctx->opcode)]); \
} \
gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
set_fprf, Rc(ctx->opcode) != 0); \
@@ -2094,7 +2100,8 @@ static void gen_f##name(DisasContext *ctx)
\
/* NIP cannot be restored if the memory exception comes from an helper */ \
gen_update_nip(ctx, ctx->nip - 4); \
gen_reset_fpstatus(); \
- gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
+ gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rB(ctx->opcode)]); \
gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
set_fprf, Rc(ctx->opcode) != 0); \
}
@@ -2109,7 +2116,8 @@ static void gen_f##name(DisasContext *ctx)
\
/* NIP cannot be restored if the memory exception comes from an helper */ \
gen_update_nip(ctx, ctx->nip - 4); \
gen_reset_fpstatus(); \
- gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
+ gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rB(ctx->opcode)]); \
gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], \
set_fprf, Rc(ctx->opcode) != 0); \
}
@@ -2140,8 +2148,8 @@ static void gen_frsqrtes(DisasContext *ctx)
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
gen_reset_fpstatus();
- gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
- gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]);
+ gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_env,
cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
cpu_fpr[rD(ctx->opcode)]);
gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
}
@@ -2161,7 +2169,7 @@ static void gen_fsqrt(DisasContext *ctx)
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
gen_reset_fpstatus();
- gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
cpu_fpr[rB(ctx->opcode)]);
gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
}
@@ -2174,8 +2182,8 @@ static void gen_fsqrts(DisasContext *ctx)
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
gen_reset_fpstatus();
- gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
- gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rD(ctx->opcode)]);
+ gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
cpu_fpr[rD(ctx->opcode)]);
gen_compute_fprf(cpu_fpr[rD(ctx->opcode)], 1, Rc(ctx->opcode) != 0);
}
@@ -2228,9 +2236,9 @@ static void gen_fcmpo(DisasContext *ctx)
gen_update_nip(ctx, ctx->nip - 4);
gen_reset_fpstatus();
crf = tcg_const_i32(crfD(ctx->opcode));
- gen_helper_fcmpo(cpu_fpr[rA(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], crf);
+ gen_helper_fcmpo(cpu_env, cpu_fpr[rA(ctx->opcode)],
cpu_fpr[rB(ctx->opcode)], crf);
tcg_temp_free_i32(crf);
- gen_helper_float_check_status();
+ gen_helper_float_check_status(cpu_env);
}
/* fcmpu */
@@ -2245,9 +2253,9 @@ static void gen_fcmpu(DisasContext *ctx)
gen_update_nip(ctx, ctx->nip - 4);
gen_reset_fpstatus();
crf = tcg_const_i32(crfD(ctx->opcode));
- gen_helper_fcmpu(cpu_fpr[rA(ctx->opcode)], cpu_fpr[rB(ctx->opcode)], crf);
+ gen_helper_fcmpu(cpu_env, cpu_fpr[rA(ctx->opcode)],
cpu_fpr[rB(ctx->opcode)], crf);
tcg_temp_free_i32(crf);
- gen_helper_float_check_status();
+ gen_helper_float_check_status(cpu_env);
}
/*** Floating-point move ***/
@@ -2319,7 +2327,7 @@ static void gen_mtfsb0(DisasContext *ctx)
/* NIP cannot be restored if the memory exception comes from
an helper */
gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_const_i32(crb);
- gen_helper_fpscr_clrbit(t0);
+ gen_helper_fpscr_clrbit(cpu_env, t0);
tcg_temp_free_i32(t0);
}
if (unlikely(Rc(ctx->opcode) != 0)) {
@@ -2344,14 +2352,14 @@ static void gen_mtfsb1(DisasContext *ctx)
/* NIP cannot be restored if the memory exception comes from
an helper */
gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_const_i32(crb);
- gen_helper_fpscr_setbit(t0);
+ gen_helper_fpscr_setbit(cpu_env, t0);
tcg_temp_free_i32(t0);
}
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
}
/* We can raise a differed exception */
- gen_helper_float_check_status();
+ gen_helper_float_check_status(cpu_env);
}
/* mtfsf */
@@ -2371,13 +2379,13 @@ static void gen_mtfsf(DisasContext *ctx)
t0 = tcg_const_i32(0xff);
else
t0 = tcg_const_i32(FM(ctx->opcode));
- gen_helper_store_fpscr(cpu_fpr[rB(ctx->opcode)], t0);
+ gen_helper_store_fpscr(cpu_env, cpu_fpr[rB(ctx->opcode)], t0);
tcg_temp_free_i32(t0);
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
}
/* We can raise a differed exception */
- gen_helper_float_check_status();
+ gen_helper_float_check_status(cpu_env);
}
/* mtfsfi */
@@ -2398,14 +2406,14 @@ static void gen_mtfsfi(DisasContext *ctx)
gen_reset_fpstatus();
t0 = tcg_const_i64(FPIMM(ctx->opcode) << (4 * sh));
t1 = tcg_const_i32(1 << sh);
- gen_helper_store_fpscr(t0, t1);
+ gen_helper_store_fpscr(cpu_env, t0, t1);
tcg_temp_free_i64(t0);
tcg_temp_free_i32(t1);
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_shri_i32(cpu_crf[1], cpu_fpscr, FPSCR_OX);
}
/* We can raise a differed exception */
- gen_helper_float_check_status();
+ gen_helper_float_check_status(cpu_env);
}
/*** Addressing modes ***/
@@ -3303,7 +3311,7 @@ static inline void gen_qemu_ld32fs(DisasContext
*ctx, TCGv_i64 arg1, TCGv arg2)
gen_qemu_ld32u(ctx, t0, arg2);
tcg_gen_trunc_tl_i32(t1, t0);
tcg_temp_free(t0);
- gen_helper_float32_to_float64(arg1, t1);
+ gen_helper_float32_to_float64(arg1, cpu_env, t1);
tcg_temp_free_i32(t1);
}
@@ -3393,7 +3401,7 @@ static inline void gen_qemu_st32fs(DisasContext
*ctx, TCGv_i64 arg1, TCGv arg2)
{
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv t1 = tcg_temp_new();
- gen_helper_float64_to_float32(t0, arg1);
+ gen_helper_float64_to_float32(t0, cpu_env, arg1);
tcg_gen_extu_i32_tl(t1, t0);
tcg_temp_free_i32(t0);
gen_qemu_st32(ctx, t1, arg2);
@@ -8008,7 +8016,7 @@ static inline void gen_##name(DisasContext *ctx)
\
TCGv t1; \
t0 = tcg_temp_new_i32(); \
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(t0, t0); \
+ gen_helper_##name(t0, cpu_env, t0); \
t1 = tcg_temp_new(); \
tcg_gen_extu_i32_tl(t1, t0); \
tcg_temp_free_i32(t0); \
@@ -8023,7 +8031,7 @@ static inline void gen_##name(DisasContext *ctx)
\
TCGv_i32 t0; \
TCGv t1; \
t0 = tcg_temp_new_i32(); \
- gen_helper_##name(t0, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, cpu_env, cpu_gpr[rB(ctx->opcode)]); \
t1 = tcg_temp_new(); \
tcg_gen_extu_i32_tl(t1, t0); \
tcg_temp_free_i32(t0); \
@@ -8037,13 +8045,14 @@ static inline void gen_##name(DisasContext
*ctx) \
{ \
TCGv_i32 t0 = tcg_temp_new_i32(); \
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(cpu_gpr[rD(ctx->opcode)], t0); \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); \
tcg_temp_free_i32(t0); \
}
#define GEN_SPEFPUOP_CONV_64_64(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
- gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_env, \
+ cpu_gpr[rB(ctx->opcode)]); \
}
#define GEN_SPEFPUOP_ARITH2_32_32(name) \
static inline void gen_##name(DisasContext *ctx) \
@@ -8058,7 +8067,7 @@ static inline void gen_##name(DisasContext *ctx)
\
t1 = tcg_temp_new_i32(); \
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(t0, t0, t1); \
+ gen_helper_##name(t0, cpu_env, t0, t1); \
tcg_temp_free_i32(t1); \
t2 = tcg_temp_new(); \
tcg_gen_extu_i32_tl(t2, t0); \
@@ -8075,8 +8084,8 @@ static inline void gen_##name(DisasContext *ctx)
\
gen_exception(ctx, POWERPC_EXCP_SPEU); \
return; \
} \
- gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
- cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_env, \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
}
#define GEN_SPEFPUOP_COMP_32(name) \
static inline void gen_##name(DisasContext *ctx) \
@@ -8090,7 +8099,7 @@ static inline void gen_##name(DisasContext *ctx)
\
t1 = tcg_temp_new_i32(); \
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], t0, t1); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
tcg_temp_free_i32(t0); \
tcg_temp_free_i32(t1); \
}
@@ -8101,28 +8110,29 @@ static inline void gen_##name(DisasContext
*ctx) \
gen_exception(ctx, POWERPC_EXCP_SPEU); \
return; \
} \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, \
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
}
#else
#define GEN_SPEFPUOP_CONV_32_32(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
- gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_env, \
+ cpu_gpr[rB(ctx->opcode)]); \
}
#define GEN_SPEFPUOP_CONV_32_64(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
TCGv_i64 t0 = tcg_temp_new_i64(); \
gen_load_gpr64(t0, rB(ctx->opcode)); \
- gen_helper_##name(cpu_gpr[rD(ctx->opcode)], t0); \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); \
tcg_temp_free_i64(t0); \
}
#define GEN_SPEFPUOP_CONV_64_32(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
TCGv_i64 t0 = tcg_temp_new_i64(); \
- gen_helper_##name(t0, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, cpu_env, cpu_gpr[rB(ctx->opcode)]); \
gen_store_gpr64(rD(ctx->opcode), t0); \
tcg_temp_free_i64(t0); \
}
@@ -8131,7 +8141,7 @@ static inline void gen_##name(DisasContext *ctx)
\
{ \
TCGv_i64 t0 = tcg_temp_new_i64(); \
gen_load_gpr64(t0, rB(ctx->opcode)); \
- gen_helper_##name(t0, t0); \
+ gen_helper_##name(t0, cpu_env, t0); \
gen_store_gpr64(rD(ctx->opcode), t0); \
tcg_temp_free_i64(t0); \
}
@@ -8142,7 +8152,7 @@ static inline void gen_##name(DisasContext *ctx)
\
gen_exception(ctx, POWERPC_EXCP_SPEU); \
return; \
} \
- gen_helper_##name(cpu_gpr[rD(ctx->opcode)], \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_env, \
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
}
#define GEN_SPEFPUOP_ARITH2_64_64(name) \
@@ -8157,7 +8167,7 @@ static inline void gen_##name(DisasContext *ctx)
\
t1 = tcg_temp_new_i64(); \
gen_load_gpr64(t0, rA(ctx->opcode)); \
gen_load_gpr64(t1, rB(ctx->opcode)); \
- gen_helper_##name(t0, t0, t1); \
+ gen_helper_##name(t0, cpu_env, t0, t1); \
gen_store_gpr64(rD(ctx->opcode), t0); \
tcg_temp_free_i64(t0); \
tcg_temp_free_i64(t1); \
@@ -8169,7 +8179,7 @@ static inline void gen_##name(DisasContext *ctx)
\
gen_exception(ctx, POWERPC_EXCP_SPEU); \
return; \
} \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, \
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
}
#define GEN_SPEFPUOP_COMP_64(name) \
@@ -8184,7 +8194,7 @@ static inline void gen_##name(DisasContext *ctx)
\
t1 = tcg_temp_new_i64(); \
gen_load_gpr64(t0, rA(ctx->opcode)); \
gen_load_gpr64(t1, rB(ctx->opcode)); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], t0, t1); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
tcg_temp_free_i64(t0); \
tcg_temp_free_i64(t1); \
}
--
1.7.9
0003-ppc-avoid-AREG0-for-FPU-and-SPE-helpers.patch
Description: Text Data
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- [Qemu-ppc] [PATCH 03/20] ppc: avoid AREG0 for FPU and SPE helpers,
Blue Swirl <=