qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v6 11/13] hardfloat: implement float32/64 fused


From: Alex Bennée
Subject: Re: [Qemu-devel] [PATCH v6 11/13] hardfloat: implement float32/64 fused multiply-add
Date: Wed, 05 Dec 2018 12:25:33 +0000
User-agent: mu4e 1.1.0; emacs 26.1.90

Emilio G. Cota <address@hidden> writes:

> Performance results for fp-bench:
>
> 1. Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz
> - before:
> fma-single: 74.73 MFlops
> fma-double: 74.54 MFlops
> - after:
> fma-single: 203.37 MFlops
> fma-double: 169.37 MFlops
>
> 2. ARM Aarch64 A57 @ 2.4GHz
> - before:
> fma-single: 23.24 MFlops
> fma-double: 23.70 MFlops
> - after:
> fma-single: 66.14 MFlops
> fma-double: 63.10 MFlops
>
> 3. IBM POWER8E @ 2.1 GHz
> - before:
> fma-single: 37.26 MFlops
> fma-double: 37.29 MFlops
> - after:
> fma-single: 48.90 MFlops
> fma-double: 59.51 MFlops
>
> Here having 3FP64 set to 1 pays off for x86_64:
> [1] 170.15 vs [0] 153.12 MFlops
>
> Signed-off-by: Emilio G. Cota <address@hidden>

Reviewed-by: Alex Bennée <address@hidden>


> ---
>  fpu/softfloat.c | 132 ++++++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 128 insertions(+), 4 deletions(-)
>
> diff --git a/fpu/softfloat.c b/fpu/softfloat.c
> index e35ebfaae7..e03feafb6f 100644
> --- a/fpu/softfloat.c
> +++ b/fpu/softfloat.c
> @@ -1514,8 +1514,9 @@ float16 QEMU_FLATTEN float16_muladd(float16 a, float16 
> b, float16 c,
>      return float16_round_pack_canonical(pr, status);
>  }
>
> -float32 QEMU_FLATTEN float32_muladd(float32 a, float32 b, float32 c,
> -                                                int flags, float_status 
> *status)
> +static float32 QEMU_SOFTFLOAT_ATTR
> +soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
> +                float_status *status)
>  {
>      FloatParts pa = float32_unpack_canonical(a, status);
>      FloatParts pb = float32_unpack_canonical(b, status);
> @@ -1525,8 +1526,9 @@ float32 QEMU_FLATTEN float32_muladd(float32 a, float32 
> b, float32 c,
>      return float32_round_pack_canonical(pr, status);
>  }
>
> -float64 QEMU_FLATTEN float64_muladd(float64 a, float64 b, float64 c,
> -                                                int flags, float_status 
> *status)
> +static float64 QEMU_SOFTFLOAT_ATTR
> +soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
> +                float_status *status)
>  {
>      FloatParts pa = float64_unpack_canonical(a, status);
>      FloatParts pb = float64_unpack_canonical(b, status);
> @@ -1536,6 +1538,128 @@ float64 QEMU_FLATTEN float64_muladd(float64 a, 
> float64 b, float64 c,
>      return float64_round_pack_canonical(pr, status);
>  }
>
> +float32 QEMU_FLATTEN
> +float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status 
> *s)
> +{
> +    union_float32 ua, ub, uc, ur;
> +
> +    ua.s = xa;
> +    ub.s = xb;
> +    uc.s = xc;
> +
> +    if (unlikely(!can_use_fpu(s))) {
> +        goto soft;
> +    }
> +    if (unlikely(flags & float_muladd_halve_result)) {
> +        goto soft;
> +    }
> +
> +    float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
> +    if (unlikely(!f32_is_zon3(ua, ub, uc))) {
> +        goto soft;
> +    }
> +    /*
> +     * When (a || b) == 0, there's no need to check for under/over flow,
> +     * since we know the addend is (normal || 0) and the product is 0.
> +     */
> +    if (float32_is_zero(ua.s) || float32_is_zero(ub.s)) {
> +        union_float32 up;
> +        bool prod_sign;
> +
> +        prod_sign = float32_is_neg(ua.s) ^ float32_is_neg(ub.s);
> +        prod_sign ^= !!(flags & float_muladd_negate_product);
> +        up.s = float32_set_sign(float32_zero, prod_sign);
> +
> +        if (flags & float_muladd_negate_c) {
> +            uc.h = -uc.h;
> +        }
> +        ur.h = up.h + uc.h;
> +    } else {
> +        if (flags & float_muladd_negate_product) {
> +            ua.h = -ua.h;
> +        }
> +        if (flags & float_muladd_negate_c) {
> +            uc.h = -uc.h;
> +        }
> +
> +        ur.h = fmaf(ua.h, ub.h, uc.h);
> +
> +        if (unlikely(f32_is_inf(ur))) {
> +            s->float_exception_flags |= float_flag_overflow;
> +        } else if (unlikely(fabsf(ur.h) <= FLT_MIN)) {
> +            goto soft;
> +        }
> +    }
> +    if (flags & float_muladd_negate_result) {
> +        return float32_chs(ur.s);
> +    }
> +    return ur.s;
> +
> + soft:
> +    return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
> +}
> +
> +float64 QEMU_FLATTEN
> +float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status 
> *s)
> +{
> +    union_float64 ua, ub, uc, ur;
> +
> +    ua.s = xa;
> +    ub.s = xb;
> +    uc.s = xc;
> +
> +    if (unlikely(!can_use_fpu(s))) {
> +        goto soft;
> +    }
> +    if (unlikely(flags & float_muladd_halve_result)) {
> +        goto soft;
> +    }
> +
> +    float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
> +    if (unlikely(!f64_is_zon3(ua, ub, uc))) {
> +        goto soft;
> +    }
> +    /*
> +     * When (a || b) == 0, there's no need to check for under/over flow,
> +     * since we know the addend is (normal || 0) and the product is 0.
> +     */
> +    if (float64_is_zero(ua.s) || float64_is_zero(ub.s)) {
> +        union_float64 up;
> +        bool prod_sign;
> +
> +        prod_sign = float64_is_neg(ua.s) ^ float64_is_neg(ub.s);
> +        prod_sign ^= !!(flags & float_muladd_negate_product);
> +        up.s = float64_set_sign(float64_zero, prod_sign);
> +
> +        if (flags & float_muladd_negate_c) {
> +            uc.h = -uc.h;
> +        }
> +        ur.h = up.h + uc.h;
> +    } else {
> +        if (flags & float_muladd_negate_product) {
> +            ua.h = -ua.h;
> +        }
> +        if (flags & float_muladd_negate_c) {
> +            uc.h = -uc.h;
> +        }
> +
> +        ur.h = fma(ua.h, ub.h, uc.h);
> +
> +        if (unlikely(f64_is_inf(ur))) {
> +            s->float_exception_flags |= float_flag_overflow;
> +        } else if (unlikely(fabs(ur.h) <= FLT_MIN)) {
> +            goto soft;
> +        }
> +    }
> +    if (flags & float_muladd_negate_result) {
> +        return float64_chs(ur.s);
> +    }
> +    return ur.s;
> +
> + soft:
> +    return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
> +}
> +
>  /*
>   * Returns the result of dividing the floating-point value `a' by the
>   * corresponding value `b'. The operation is performed according to


--
Alex Bennée



reply via email to

[Prev in Thread] Current Thread [Next in Thread]