Index: gcc/dwarf2out.c
===================================================================
--- gcc/dwarf2out.c (revision 143507)
+++ gcc/dwarf2out.c (working copy)
@@ -9344,6 +9344,12 @@
add_AT_unsigned (base_type_result, DW_AT_byte_size,
int_size_in_bytes (type));
+
+ /* version 3 dwarf specifies that for fixed-point types DW_AT_binary_scale
+ describes the location of the decimal place */
+ if (TREE_CODE (type) == FIXED_POINT_TYPE)
+ add_AT_int (base_type_result, DW_AT_binary_scale, -TYPE_FBIT (type));
+
add_AT_unsigned (base_type_result, DW_AT_encoding, encoding);
return base_type_result;
@@ -11227,6 +11233,13 @@
}
break;
+ case CONST_FIXED:
+ {
+ add_AT_long_long (die, DW_AT_const_value,
+ CONST_FIXED_VALUE_HIGH (rtl), CONST_FIXED_VALUE_LOW (rtl));
+ }
+ break;
+
case CONST_VECTOR:
{
enum machine_mode mode = GET_MODE (rtl);
Index: gcc/varasm.c
===================================================================
--- gcc/varasm.c (revision 143507)
+++ gcc/varasm.c (working copy)
@@ -2638,7 +2638,7 @@
else
mclass = MODE_INT;
- omode = mode_for_size (subsize * BITS_PER_UNIT, mclass, 0);
+ omode = mode_for_size (subsize * BITS_PER_UNIT, MODE_INT, 0);
imode = mode_for_size (size * BITS_PER_UNIT, mclass, 0);
for (i = 0; i < size; i += subsize)
Index: gcc/config/avr/libgcc-fixed.S
===================================================================
--- gcc/config/avr/libgcc-fixed.S (revision 0)
+++ gcc/config/avr/libgcc-fixed.S (revision 0)
@@ -0,0 +1,1122 @@
+/* -*- Mode: Asm -*- */
+/* Copyright (C) 2009
+ Free Software Foundation, Inc.
+ Contributed by Sean D'Epagnier
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Fixed point library routines for avr. */
+
+#define __zero_reg__ r1
+#define __tmp_reg__ r0
+#define __SREG__ 0x3f
+#define __SP_H__ 0x3e
+#define __SP_L__ 0x3d
+#define __RAMPZ__ 0x3B
+
+/* Conversions to float. */
+#if defined (L_fractqqsf)
+ .global __fractqqsf
+ .func __fractqqsf
+__fractqqsf:
+ clr r25
+ sbrc r24, 7 ; if negative
+ ser r25 ; sign extend
+ mov r23, r24 ; move in place
+ mov r24, r25 ; sign extend lower byte
+ lsl r23
+ clr r22
+ rjmp __fractsasf ; call larger conversion
+.endfunc
+#endif /* defined (L_fractqqsf) */
+
+#if defined (L_fractuqqsf)
+ .global __fractuqqsf
+ .func __fractuqqsf
+__fractuqqsf:
+ clr r22
+ mov r23, r24
+ clr r24
+ clr r25
+ rjmp __fractsasf ; call larger conversion
+.endfunc
+#endif /* defined (L_fractuqqsf) */
+
+#if defined (L_fracthqsf)
+ .global __fracthqsf
+ .func __fracthqsf
+__fracthqsf:
+ mov_l r22, r24 ; put fractional part in place
+ mov_h r23, r25
+ clr r25
+ sbrc r23, 7 ; if negative
+ ser r25 ; sign extend
+ mov r24, r25 ; sign extend lower byte
+ lsl r22
+ rol r23
+ rjmp __fractsasf ; call larger conversion
+.endfunc
+#endif /* defined (L_fracthqsf) */
+
+#if defined (L_fractuhqsf)
+ .global __fractuhqsf
+ .func __fractuhqsf
+__fractuhqsf:
+ mov_l r22, r24 ; put fractional part in place
+ mov_h r23, r25
+ clr r24
+ clr r25
+ rjmp __fractsasf ; call larger conversion
+.endfunc
+#endif /* defined (L_fractuhqsf) */
+
+#if defined (L_fracthasf)
+ .global __fracthasf
+ .func __fracthasf
+__fracthasf:
+ clr r22
+ mov r23, r24 ; move into place
+ mov r24, r25
+ clr r25
+ sbrc r24, 7 ; if negative
+ ser r25 ; sign extend
+ rjmp __fractsasf ; call larger conversion
+#endif /* defined (L_fracthasf) */
+
+#if defined (L_fractuhasf)
+ .global __fractuhasf
+ .func __fractuhasf
+__fractuhasf:
+ clr r22
+ mov r23, r24 ; move into place
+ rjmp __fractsasf ; call larger conversion
+.endfunc
+#endif /* defined (L_fractuhasf) */
+
+#if defined (L_fractsasf)
+ .global __fractsasf
+ .func __fractsasf
+__fractsasf:
+ rcall __floatsisf
+ tst r25
+ breq __fractsasf_exit ; skip if zero
+ subi r25, 0x08 ; adjust exponent
+__fractsasf_exit:
+ ret
+.endfunc
+#endif /* defined (L_fractsasf) */
+
+#if defined (L_fractusasf)
+ .global __fractusasf
+ .func __fractusasf
+__fractusasf:
+ rcall __floatunsisf
+ tst r25
+ breq __fractusasf_exit ; skip if zero
+ subi r25, 0x08 ; adjust exponent
+__fractusasf_exit:
+ ret
+.endfunc
+#endif /* defined (L_fractusasf) */
+
+#if defined (L_fractsfqq) /* Conversions from float. */
+ .global __fractsfqq
+ .func __fractsfqq
+__fractsfqq:
+ subi r25, -11 ; adjust exponent
+ subi r24, 128
+ rjmp __fixsfsi
+.endfunc
+#endif /* defined (L_fractqq) */
+
+#if defined (L_fractsfuqq)
+ .global __fractsfuqq
+ .func __fractsfuqq
+__fractsfuqq:
+ subi r25, -12 ; adjust exponent
+ rjmp __fixsfsi
+.endfunc
+#endif /* defined (L_fractuqq) */
+
+#if defined (L_fractsfhq)
+ .global __fractsfhq
+ .func __fractsfhq
+__fractsfhq:
+ subi r25, -15 ; adjust exponent
+ subi r24, 128
+ rjmp __fixsfsi
+.endfunc
+#endif /* defined (L_fractsfhq) */
+
+#if defined (L_fractsfuhq)
+ .global __fractsfuhq
+ .func __fractsfuhq
+__fractsfuhq:
+ subi r25, -16 ; adjust exponent
+ rjmp __fixsfsi
+.endfunc
+#endif /* defined (L_fractsfuhq) */
+
+#if defined (L_fractsfha)
+ .global __fractsfha
+ .func __fractsfha
+__fractsfha:
+.endfunc
+ .global __fractsfuha
+ .func __fractsfuha
+__fractsfuha:
+ subi r25, -12 ; adjust exponent
+ rjmp __fixsfsi
+.endfunc
+#endif /* defined (L_fractsfha) */
+
+#if defined (L_fractsfsa)
+ .global __fractsfsa
+ .func __fractsfsa
+__fractsfsa:
+.endfunc
+ .global __fractsfusa
+ .func __fractsfusa
+__fractsfusa:
+ subi r25, -8 ; adjust exponent
+ rjmp __fixsfsi
+.endfunc
+#endif /* defined (L_fractsfsa) */
+
+/* For multiplication the functions here are called directly from
+ avr-fixed.md patterns, instead of using the standard libcall mechanisms.
+ This can make better code because GCC knows exactly which
+ of the call-used registers (not all of them) are clobbered. */
+
+/* mulqq and muluqq open coded on the enhanced core */
+#if !defined (__AVR_HAVE_MUL__)
+/*******************************************************
+ Fractional Multiplication 8 x 8
+*******************************************************/
+#define r_arg2 r22 /* multiplicand */
+#define r_arg1 r24 /* multiplier */
+#define r_res __tmp_reg__ /* result */
+
+#if defined (L_mulqq3)
+ .global __mulqq3
+ .func __mulqq3
+__mulqq3:
+ mov r_res, r_arg1
+ eor r_res, r_arg2
+ bst r_res, 7
+ lsl r_arg1
+ lsl r_arg2
+ brcc __mulqq3_skipneg
+ neg r_arg2
+__mulqq3_skipneg:
+ rcall __muluqq3
+ lsr r_arg1
+ brtc __mulqq3_exit
+ neg r_arg1
+__mulqq3_exit:
+ ret
+
+.endfunc
+#endif /* defined (L_mulqq3) */
+
+#if defined (L_muluqq3)
+ .global __muluqq3
+ .func __muluqq3
+__muluqq3:
+ clr r_res ; clear result
+__muluqq3_loop:
+ lsr r_arg2 ; shift multiplicand
+ sbrc r_arg1,7
+ add r_res,r_arg2
+ breq __muluqq3_exit ; while multiplicand != 0
+ lsl r_arg1
+ brne __muluqq3_loop ; exit if multiplier = 0
+__muluqq3_exit:
+ mov r_arg1,r_res ; result to return register
+ ret
+#undef r_arg2
+#undef r_arg1
+#undef r_res
+
+.endfunc
+#endif /* defined (L_muluqq3) */
+#endif /* !defined (__AVR_HAVE_MUL__) */
+
+/*******************************************************
+ Fractional Multiplication 16 x 16
+*******************************************************/
+
+#if defined (__AVR_HAVE_MUL__)
+#define r_arg1L r22 /* multiplier Low */
+#define r_arg1H r23 /* multiplier High */
+#define r_arg2L r20 /* multiplicand Low */
+#define r_arg2H r21 /* multiplicand High */
+#define r_resL r18 /* result Low */
+#define r_resH r19 /* result High */
+
+#if defined (L_mulhq3)
+ .global __mulhq3
+ .func __mulhq3
+__mulhq3:
+ fmuls r_arg1H, r_arg2H
+ movw r_resL, r0
+ fmulsu r_arg2H, r_arg1L
+ clr r_arg1L
+ sbc r_resH, r_arg1L
+ add r_resL, r1
+ adc r_resH, r_arg1L
+ fmulsu r_arg1H, r_arg2L
+ sbc r_resH, r_arg1L
+ add r_resL, r1
+ adc r_resH, r_arg1L
+ clr __zero_reg__
+ ret
+.endfunc
+#endif /* defined (L_mulhq3) */
+
+#if defined (L_muluhq3)
+ .global __muluhq3
+ .func __muluhq3
+__muluhq3:
+ mul r_arg1H, r_arg2H
+ movw r_resL, r0
+ mul r_arg1H, r_arg2L
+ add r_resL, r1
+ clr __zero_reg__
+ adc r_resH, __zero_reg__
+ mul r_arg1L, r_arg2H
+ add r_resL, r1
+ clr __zero_reg__
+ adc r_resH, __zero_reg__
+ ret
+.endfunc
+#endif /* defined (L_muluhq3) */
+
+#else
+#define r_arg1L r24 /* multiplier Low */
+#define r_arg1H r25 /* multiplier High */
+#define r_arg2L r22 /* multiplicand Low */
+#define r_arg2H r23 /* multiplicand High */
+#define r_resL __tmp_reg__ /* result Low */
+#define r_resH __zero_reg__ /* result High */
+
+#if defined (L_mulhq3)
+ .global __mulhq3
+ .func __mulhq3
+__mulhq3:
+ mov r_resL, r_arg1H
+ eor r_resL, r_arg2H
+ bst r_resL, 7
+ lsl r_arg1L
+ rol r_arg1H
+ lsl r_arg2L
+ rol r_arg2H
+ brcc mulhq3_skipneg
+ com r_arg2H
+ neg r_arg2L
+ sbci r_arg2H, -1
+mulhq3_skipneg:
+ rcall __muluhq3
+ lsr r_arg1H
+ ror r_arg1L
+ brtc mulhq3_exit
+ com r_arg1H
+ neg r_arg1L
+ sbci r_arg1H, -1
+mulhq3_exit:
+ ret
+.endfunc
+#endif /* defined (L_mulhq3) */
+
+#if defined (L_muluhq3)
+ .global __muluhq3
+ .func __muluhq3
+__muluhq3:
+ clr r_resL ; clear result
+__muluhq3_loop:
+ lsr r_arg2H ; shift multiplicand
+ ror r_arg2L
+ sbrs r_arg1H,7
+ rjmp __muluhq3_skip
+ add r_resL,r_arg2L ; result + multiplicand
+ adc r_resH,r_arg2H
+__muluhq3_skip:
+ lsl r_arg1L ; shift multiplier
+ rol r_arg1H
+ brne __muluhq3_loop
+ cpi r_arg1L, 0
+ brne __muluhq3_loop ; exit multiplier = 0
+ mov_l r_arg1L,r_resL
+ mov_h r_arg1H,r_resH ; result to return register
+ clr __zero_reg__ ; zero the zero reg
+ ret
+.endfunc
+#endif /* defined (L_muluhq3) */
+
+#endif /* defined (__AVR_HAVE_MUL__) */
+
+#undef r_arg1L
+#undef r_arg1H
+#undef r_arg2L
+#undef r_arg2H
+#undef r_resL
+#undef r_resH
+
+/*******************************************************
+ Fixed Multiplication 8.8 x 8.8
+*******************************************************/
+
+#if defined (__AVR_HAVE_MUL__)
+#define r_arg1L r22 /* multiplier Low */
+#define r_arg1H r23 /* multiplier High */
+#define r_arg2L r20 /* multiplicand Low */
+#define r_arg2H r21 /* multiplicand High */
+#define r_resL r18 /* result Low */
+#define r_resH r19 /* result High */
+
+#if defined (L_mulha3)
+ .global __mulha3
+ .func __mulha3
+__mulha3:
+ mul r_arg1L, r_arg2L
+ mov r_resL, r1
+ muls r_arg1H, r_arg2H
+ mov r_resH, r0
+ mulsu r_arg1H, r_arg2L
+ add r_resL, r0
+ adc r_resH, r1
+ mulsu r_arg2H, r_arg1L
+ add r_resL, r0
+ adc r_resH, r1
+ clr __zero_reg__
+ ret
+.endfunc
+#endif /* defined (L_mulha3) */
+
+#if defined (L_muluha3)
+ .global __muluha3
+ .func __muluha3
+__muluha3:
+ mul r_arg1L, r_arg2L
+ mov r_resL, r1
+ mul r_arg1H, r_arg2H
+ mov r_resH, r0
+ mul r_arg1H, r_arg2L
+ add r_resL, r0
+ adc r_resH, r1
+ mul r_arg1L, r_arg2H
+ add r_resL, r0
+ adc r_resH, r1
+ clr __zero_reg__
+ ret
+.endfunc
+#endif /* defined (L_muluha3) */
+
+#else
+
+#define r_arg1L r24 /* multiplier Low */
+#define r_arg1H r25 /* multiplier High */
+#define r_arg2L r22 /* multiplicand Low */
+#define r_arg2H r23 /* multiplicand High */
+#define r_resL r18 /* result Low */
+#define r_resH r19 /* result High */
+#define r_scratchL r0 /* scratch Low */
+#define r_scratchH r1
+
+#if defined (L_mulha3)
+ .global __mulha3
+ .func __mulha3
+__mulha3:
+ mov r_resL, r_arg1H
+ eor r_resL, r_arg2H
+ bst r_resL, 7
+ sbrs r_arg1H, 7
+ rjmp __mulha3_arg1pos
+ com r_arg1H
+ neg r_arg1L
+ sbci r_arg1H,-1
+__mulha3_arg1pos:
+ sbrs r_arg2H, 7
+ rjmp __mulha3_arg2pos
+ com r_arg2H
+ neg r_arg2L
+ sbci r_arg2H,-1
+__mulha3_arg2pos:
+ rcall __muluha3
+ brtc __mulha3_exit
+ com r_resH
+ neg r_resL
+ sbci r_resH,-1
+__mulha3_exit:
+ ret
+.endfunc
+#endif /* defined (L_mulha3) */
+
+#if defined (L_muluha3)
+ .global __muluha3
+ .func __muluha3
+__muluha3:
+ clr r_resL ; clear result
+ clr r_resH
+ mov_l r0, r_arg1L ; save multiplicand
+ mov_h r1, r_arg1H
+__muluha3_loop1:
+ sbrs r_arg2H,0
+ rjmp __muluha3_skip1
+ add r_resL,r_arg1L ; result + multiplicand
+ adc r_resH,r_arg1H
+__muluha3_skip1:
+ lsl r_arg1L ; shift multiplicand
+ rol r_arg1H
+ sbiw r_arg1L,0
+ breq __muluha3_loop1_done ; exit multiplicand = 0
+ lsr r_arg2H
+ brne __muluha3_loop1 ; exit multiplier = 0
+__muluha3_loop1_done:
+ mov_l r_arg1L, r_scratchL ; restore multiplicand
+ mov_h r_arg1H, r_scratchH
+__muluha3_loop2:
+ lsr r_arg1H ; shift multiplicand
+ ror r_arg1L
+ sbiw r_arg1L,0
+ breq __muluha3_exit ; exit if multiplicand = 0
+ sbrs r_arg2L,7
+ rjmp __muluha3_skip2
+ add r_resL,r_arg1L ; result + multiplicand
+ adc r_resH,r_arg1H
+__muluha3_skip2:
+ lsl r_arg2L
+ brne __muluha3_loop2 ; exit if multiplier = 0
+__muluha3_exit:
+ clr __zero_reg__ ; got clobbered
+ ret
+.endfunc
+#endif /* defined (L_muluha3) */
+
+#endif /* defined (__AVR_HAVE_MUL__) */
+
+#undef r_arg1L
+#undef r_arg1H
+#undef r_arg2L
+#undef r_arg2H
+#undef r_resL
+#undef r_resH
+
+/*******************************************************
+ Fixed Multiplication 16.16 x 16.16
+*******************************************************/
+
+#if defined (__AVR_HAVE_MUL__)
+/* uses nonstandard registers because mulus only works from 16-23 */
+#define r_clr r30
+#define r_arg1L r16 /* multiplier Low */
+#define r_arg1H r17
+#define r_arg1HL r18
+#define r_arg1HH r19 /* multiplier High */
+
+#define r_arg2L r20 /* multiplicand Low */
+#define r_arg2H r21
+#define r_arg2HL r22
+#define r_arg2HH r23 /* multiplicand High */
+
+#define r_resL r24 /* result Low */
+#define r_resH r25
+#define r_resHL r26
+#define r_resHH r27 /* result High */
+
+#if defined (L_mulsa3)
+ .global __mulsa3
+ .func __mulsa3
+__mulsa3:
+ clr r_clr
+ clr r_resH
+ clr r_resHL
+ clr r_resHH
+ mul r_arg1H, r_arg2L
+ mov r_resL, r1
+ mul r_arg1L, r_arg2H
+ add r_resL, r1
+ adc r_resH, r_clr
+ mul r_arg1L, r_arg2HL
+ add r_resL, r0
+ adc r_resH, r1
+ adc r_resHL, r_clr
+ mul r_arg1H, r_arg2H
+ add r_resL, r0
+ adc r_resH, r1
+ adc r_resHL, r_clr
+ mul r_arg1HL, r_arg2L
+ add r_resL, r0
+ adc r_resH, r1
+ adc r_resHL, r_clr
+ mulsu r_arg2HH, r_arg1L
+ sbc r_resHH, r_clr
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mul r_arg1H, r_arg2HL
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mul r_arg1HL, r_arg2H
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mulsu r_arg1HH, r_arg2L
+ sbc r_resHH, r_clr
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mulsu r_arg2HH, r_arg1H
+ add r_resHL, r0
+ adc r_resHH, r1
+ mul r_arg1HL, r_arg2HL
+ add r_resHL, r0
+ adc r_resHH, r1
+ mulsu r_arg1HH, r_arg2H
+ add r_resHL, r0
+ adc r_resHH, r1
+ mulsu r_arg2HH, r_arg1HL
+ add r_resHH, r0
+ mulsu r_arg1HH, r_arg2HL
+ add r_resHH, r0
+ clr __zero_reg__
+ ret
+.endfunc
+#endif
+
+#if defined (L_mulusa3)
+ .global __mulusa3
+ .func __mulusa3
+__mulusa3:
+ clr r_clr
+ clr r_resH
+ clr r_resHL
+ clr r_resHH
+ mul r_arg1H, r_arg2L
+ mov r_resL, r1
+ mul r_arg1L, r_arg2H
+ add r_resL, r1
+ adc r_resH, r_clr
+ mul r_arg1L, r_arg2HL
+ add r_resL, r0
+ adc r_resH, r1
+ adc r_resHL, r_clr
+ mul r_arg1H, r_arg2H
+ add r_resL, r0
+ adc r_resH, r1
+ adc r_resHL, r_clr
+ mul r_arg1HL, r_arg2L
+ add r_resL, r0
+ adc r_resH, r1
+ adc r_resHL, r_clr
+ mul r_arg1L, r_arg2HH
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mul r_arg1H, r_arg2HL
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mul r_arg1HL, r_arg2H
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mul r_arg1HH, r_arg2L
+ add r_resH, r0
+ adc r_resHL, r1
+ adc r_resHH, r_clr
+ mul r_arg1H, r_arg2HH
+ add r_resHL, r0
+ adc r_resHH, r1
+ mul r_arg1HL, r_arg2HL
+ add r_resHL, r0
+ adc r_resHH, r1
+ mul r_arg1HH, r_arg2H
+ add r_resHL, r0
+ adc r_resHH, r1
+ mul r_arg1HL, r_arg2HH
+ add r_resHH, r0
+ mul r_arg1HH, r_arg2HL
+ add r_resHH, r0
+ clr __zero_reg__
+ ret
+.endfunc
+#endif
+
+#else
+
+#define r_arg1L r18 /* multiplier Low */
+#define r_arg1H r19
+#define r_arg1HL r20
+#define r_arg1HH r21 /* multiplier High */
+
+/* these registers needed for sbiw */
+#define r_arg2L r24 /* multiplicand Low */
+#define r_arg2H r25
+#define r_arg2HL r26
+#define r_arg2HH r27 /* multiplicand High */
+
+#define r_resL r14 /* result Low */
+#define r_resH r15
+#define r_resHL r16
+#define r_resHH r17 /* result High */
+
+#define r_scratchL r0 /* scratch Low */
+#define r_scratchH r1
+#define r_scratchHL r22
+#define r_scratchHH r23 /* scratch High */
+
+#if defined (L_mulsa3)
+ .global __mulsa3
+ .func __mulsa3
+__mulsa3:
+ mov r_resL, r_arg1HH
+ eor r_resL, r_arg2HH
+ bst r_resL, 7
+ sbrs r_arg1HH, 7
+ rjmp __mulsa3_arg1pos
+ com r_arg1HH
+ com r_arg1HL
+ com r_arg1H
+ neg r_arg1L
+ sbci r_arg1H,-1
+ sbci r_arg1HL,-1
+ sbci r_arg1HH,-1
+__mulsa3_arg1pos:
+ sbrs r_arg2HH, 7
+ rjmp __mulsa3_arg2pos
+ com r_arg2HH
+ com r_arg2HL
+ com r_arg2H
+ neg r_arg2L
+ sbci r_arg2H,-1
+ sbci r_arg2HL,-1
+ sbci r_arg2HH,-1
+__mulsa3_arg2pos:
+ rcall __mulusa3
+ brtc __mulsa3_exit
+ com r_resHH
+ com r_resHL
+ com r_resH
+ com r_resL
+ adc r_resL,__zero_reg__
+ adc r_resH,__zero_reg__
+ adc r_resHL,__zero_reg__
+ adc r_resHH,__zero_reg__
+__mulsa3_exit:
+ ret
+.endfunc
+#endif /* defined (L_mulsa3) */
+
+#if defined (L_mulusa3)
+ .global __mulusa3
+ .func __mulusa3
+__mulusa3:
+ clr r_resL ; clear result
+ clr r_resH
+ mov_l r_resHL, r_resL
+ mov_h r_resHH, r_resH
+ mov_l r_scratchL, r_arg1L ; save multiplicand
+ mov_h r_scratchH, r_arg1H
+ mov_l r_scratchHL, r_arg1HL
+ mov_h r_scratchHH, r_arg1HH
+__mulusa3_loop1:
+ sbrs r_arg2HL,0
+ rjmp __mulusa3_skip1
+ add r_resL,r_arg1L ; result + multiplicand
+ adc r_resH,r_arg1H
+ adc r_resHL,r_arg1HL
+ adc r_resHH,r_arg1HH
+__mulusa3_skip1:
+ lsl r_arg1L ; shift multiplicand
+ rol r_arg1H
+ rol r_arg1HL
+ rol r_arg1HH
+ lsr r_arg2HH
+ ror r_arg2HL
+ sbiw r_arg2HL,0
+ brne __mulusa3_loop1 ; exit multiplier = 0
+__mulusa3_loop1_done:
+ mov_l r_arg1L, r_scratchL ; restore multiplicand
+ mov_h r_arg1H, r_scratchH
+ mov_l r_arg1HL, r_scratchHL
+ mov_h r_arg1HH, r_scratchHH
+__mulusa3_loop2:
+ lsr r_arg1HH ; shift multiplicand
+ ror r_arg1HL
+ ror r_arg1H
+ ror r_arg1L
+ sbrs r_arg2H,7
+ rjmp __mulusa3_skip2
+ add r_resL,r_arg1L ; result + multiplicand
+ adc r_resH,r_arg1H
+ adc r_resHL,r_arg1HL
+ adc r_resHH,r_arg1HH
+__mulusa3_skip2:
+ lsl r_arg2L
+ rol r_arg2H
+ sbiw r_arg2L,0
+ brne __mulusa3_loop2 ; exit if multiplier = 0
+__mulusa3_exit:
+ clr __zero_reg__ ; got clobbered
+ ret
+.endfunc
+#endif /* defined (L_mulusa3) */
+
+#undef r_scratchL
+#undef r_scratchH
+#undef r_scratchHL
+#undef r_scratchHH
+
+#endif
+
+#undef r_arg1L
+#undef r_arg1H
+#undef r_arg1HL
+#undef r_arg1HH
+
+#undef r_arg2L
+#undef r_arg2H
+#undef r_arg2HL
+#undef r_arg2HH
+
+#undef r_resL
+#undef r_resH
+#undef r_resHL
+#undef r_resHH
+
+/*******************************************************
+ Fractional Division 8 / 8
+*******************************************************/
+#define r_divd r25 /* dividend */
+#define r_quo r24 /* quotient */
+#define r_div r22 /* divisor */
+#define r_cnt r23 /* loop count */
+
+#if defined (L_divqq3)
+ .global __divqq3
+ .func __divqq3
+__divqq3:
+ mov r0, r_divd
+ eor r0, r_div
+ sbrc r_div, 7
+ neg r_div
+ sbrc r_divd, 7
+ neg r_divd
+ cp r_divd, r_div
+ breq __divqq3_minus1 ; if equal return -1
+ rcall __udivuqq3
+ lsr r_quo
+ sbrc r0, 7 ; negate result if needed
+ neg r_quo
+ ret
+__divqq3_minus1:
+ ldi r_quo, 0x80
+ ret
+.endfunc
+#endif /* defined (L_divqq3) */
+
+#if defined (L_udivuqq3)
+ .global __udivuqq3
+ .func __udivuqq3
+__udivuqq3:
+ clr r_quo ; clear quotient
+ ldi r_cnt,8 ; init loop counter
+__udivuqq3_loop:
+ lsl r_divd ; shift dividend
+ brcs __udivuqq3_ep ; dividend overflow
+ cp r_divd,r_div ; compare dividend & divisor
+ brcc __udivuqq3_ep ; dividend >= divisor
+ rol r_quo ; shift quotient (with CARRY)
+ rjmp __udivuqq3_cont
+__udivuqq3_ep:
+ sub r_divd,r_div ; restore dividend
+ lsl r_quo ; shift quotient (without CARRY)
+__udivuqq3_cont:
+ dec r_cnt ; decrement loop counter
+ brne __udivuqq3_loop
+ com r_quo ; complement result
+ ; because C flag was complemented in loop
+ ret
+.endfunc
+#endif /* defined (L_udivuqq3) */
+
+#undef r_divd
+#undef r_quo
+#undef r_div
+#undef r_cnt
+
+
+/*******************************************************
+ Fractional Division 16 / 16
+*******************************************************/
+#define r_divdL r26 /* dividend Low */
+#define r_divdH r27 /* dividend Hig */
+#define r_quoL r24 /* quotient Low */
+#define r_quoH r25 /* quotient High */
+#define r_divL r22 /* divisor */
+#define r_divH r23 /* divisor */
+#define r_cnt 21
+
+#if defined (L_divhq3)
+ .global __divhq3
+ .func __divhq3
+__divhq3:
+ mov r0, r_divdH
+ eor r0, r_divH
+ sbrs r_divH, 7
+ rjmp __divhq3_divpos
+ com r_divH
+ neg r_divL
+ sbci r_divH,-1
+__divhq3_divpos:
+ sbrs r_divdH, 7
+ rjmp __divhq3_divdpos
+ com r_divdH
+ neg r_divdL
+ sbci r_divdH,-1
+__divhq3_divdpos:
+ cp r_divdL, r_divL
+ cpc r_divdH, r_divH
+ breq __divhq3_minus1 ; if equal return -1
+ rcall __udivuhq3
+ lsr r_quoH
+ ror r_quoL
+ sbrs r0, 7 ; negate result if needed
+ ret
+ com r_quoH
+ neg r_quoL
+ sbci r_quoH,-1
+ ret
+__divhq3_minus1:
+ ldi r_quoH, 0x80
+ clr r_quoL
+ ret
+.endfunc
+#endif /* defined (L_divhq3) */
+
+#if defined (L_udivuhq3)
+ .global __udivuhq3
+ .func __udivuhq3
+__udivuhq3:
+ sub r_quoH,r_quoH ; clear quotient and carry
+ .global __udivuha3_entry
+__udivuha3_entry:
+ clr r_quoL ; clear quotient
+ ldi r_cnt,16 ; init loop counter
+__udivuhq3_loop:
+ rol r_divdL ; shift dividend (with CARRY)
+ rol r_divdH
+ brcs __udivuhq3_ep ; dividend overflow
+ cp r_divdL,r_divL ; compare dividend & divisor
+ cpc r_divdH,r_divH
+ brcc __udivuhq3_ep ; dividend >= divisor
+ rol r_quoL ; shift quotient (with CARRY)
+ rjmp __udivuhq3_cont
+__udivuhq3_ep:
+ sub r_divdL,r_divL ; restore dividend
+ sbc r_divdH,r_divH
+ lsl r_quoL ; shift quotient (without CARRY)
+__udivuhq3_cont:
+ rol r_quoH ; shift quotient
+ dec r_cnt ; decrement loop counter
+ brne __udivuhq3_loop
+ com r_quoL ; complement result
+ com r_quoH ; because C flag was complemented in loop
+ ret
+.endfunc
+#endif /* defined (L_udivuhq3) */
+
+/*******************************************************
+ Fixed Division 8.8 / 8.8
+*******************************************************/
+#if defined (L_divha3)
+ .global __divha3
+ .func __divha3
+__divha3:
+ mov r0, r_divdH
+ eor r0, r_divH
+ sbrs r_divH, 7
+ rjmp __divha3_divpos
+ com r_divH
+ neg r_divL
+ sbci r_divH,-1
+__divha3_divpos:
+ sbrs r_divdH, 7
+ rjmp __divha3_divdpos
+ com r_divdH
+ neg r_divdL
+ sbci r_divdH,-1
+__divha3_divdpos:
+ rcall __udivuha3
+ sbrs r0, 7 ; negate result if needed
+ ret
+ com r_quoH
+ neg r_quoL
+ sbci r_quoH,-1
+ ret
+.endfunc
+#endif /* defined (L_divha3) */
+
+#if defined (L_udivuha3)
+ .global __udivuha3
+ .func __udivuha3
+__udivuha3:
+ mov r_quoH, r_divdL
+ mov r_divdL, r_divdH
+ clr r_divdH
+ lsl r_quoH ; shift quotient into carry
+ rjmp __udivuha3_entry ; same as fractional after rearrange
+.endfunc
+#endif /* defined (L_udivuha3) */
+
+#undef r_divdL
+#undef r_divdH
+#undef r_quoL
+#undef r_quoH
+#undef r_divL
+#undef r_divH
+#undef r_cnt
+
+/*******************************************************
+ Fixed Division 16.16 / 16.16
+*******************************************************/
+#define r_arg1L r24 /* arg1 gets passed already in place */
+#define r_arg1H r25
+#define r_arg1HL r26
+#define r_arg1HH r27
+#define r_divdL r26 /* dividend Low */
+#define r_divdH r27
+#define r_divdHL r30
+#define r_divdHH r31 /* dividend High */
+#define r_quoL r22 /* quotient Low */
+#define r_quoH r23
+#define r_quoHL r24
+#define r_quoHH r25 /* quotient High */
+#define r_divL r18 /* divisor Low */
+#define r_divH r19
+#define r_divHL r20
+#define r_divHH r21 /* divisor High */
+#define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
+
+#if defined (L_divsa3)
+ .global __divsa3
+ .func __divsa3
+__divsa3:
+ mov r0, r27
+ eor r0, r_divHH
+ sbrs r_divHH, 7
+ rjmp __divsa3_divpos
+ com r_divHH
+ com r_divHL
+ com r_divH
+ neg r_divL
+ sbci r_divH,-1
+ sbci r_divHL,-1
+ sbci r_divHH,-1
+__divsa3_divpos:
+ sbrs r_arg1HH, 7
+ rjmp __divsa3_arg1pos
+ com r_arg1HH
+ com r_arg1HL
+ com r_arg1H
+ neg r_arg1L
+ sbci r_arg1H,-1
+ sbci r_arg1HL,-1
+ sbci r_arg1HH,-1
+__divsa3_arg1pos:
+ rcall __udivusa3
+ sbrs r0, 7 ; negate result if needed
+ ret
+ com r_quoHH
+ com r_quoHL
+ com r_quoH
+ neg r_quoL
+ sbci r_quoH,-1
+ sbci r_quoHL,-1
+ sbci r_quoHH,-1
+ ret
+.endfunc
+#endif /* defined (L_divsa3) */
+
+#if defined (L_udivusa3)
+ .global __udivusa3
+ .func __udivusa3
+__udivusa3:
+ ldi r_divdHL, 32 ; init loop counter
+ mov r_cnt, r_divdHL
+ clr r_divdHL
+ clr r_divdHH
+ mov_l r_quoL, r_divdHL
+ mov_h r_quoH, r_divdHH
+ lsl r_quoHL ; shift quotient into carry
+ rol r_quoHH
+__udivusa3_loop:
+ rol r_divdL ; shift dividend (with CARRY)
+ rol r_divdH
+ rol r_divdHL
+ rol r_divdHH
+ brcs __udivusa3_ep ; dividend overflow
+ cp r_divdL,r_divL ; compare dividend & divisor
+ cpc r_divdH,r_divH
+ cpc r_divdHL,r_divHL
+ cpc r_divdHH,r_divHH
+ brcc __udivusa3_ep ; dividend >= divisor
+ rol r_quoL ; shift quotient (with CARRY)
+ rjmp __udivusa3_cont
+__udivusa3_ep:
+ sub r_divdL,r_divL ; restore dividend
+ sbc r_divdH,r_divH
+ sbc r_divdHL,r_divHL
+ sbc r_divdHH,r_divHH
+ lsl r_quoL ; shift quotient (without CARRY)
+__udivusa3_cont:
+ rol r_quoH ; shift quotient
+ rol r_quoHL
+ rol r_quoHH
+ dec r_cnt ; decrement loop counter
+ brne __udivusa3_loop
+ com r_quoL ; complement result
+ com r_quoH ; because C flag was complemented in loop
+ com r_quoHL
+ com r_quoHH
+ ret
+.endfunc
+#endif /* defined (L_udivusa3) */
+
+#undef r_divdL
+#undef r_divdH
+#undef r_divdHL
+#undef r_divdHH
+#undef r_quoL
+#undef r_quoH
+#undef r_quoHL
+#undef r_quoHH
+#undef r_divL
+#undef r_divH
+#undef r_divHL
+#undef r_divHH
+#undef r_cnt
Index: gcc/config/avr/avr-protos.h
===================================================================
--- gcc/config/avr/avr-protos.h (revision 143507)
+++ gcc/config/avr/avr-protos.h (working copy)
@@ -86,6 +86,8 @@
extern const char *lshrhi3_out (rtx insn, rtx operands[], int *len);
extern const char *lshrsi3_out (rtx insn, rtx operands[], int *len);
+extern const char *fract_out (rtx insn, rtx operands[], int intsigned, int *l);
+
extern void expand_prologue (void);
extern void expand_epilogue (void);
extern int avr_epilogue_uses (int regno);
Index: gcc/config/avr/libgcc.S
===================================================================
--- gcc/config/avr/libgcc.S (revision 143507)
+++ gcc/config/avr/libgcc.S (working copy)
@@ -204,7 +204,6 @@
#define r_arg1HL r24
#define r_arg1HH r25 /* multiplier High */
-
#define r_arg2L r18 /* multiplicand Low */
#define r_arg2H r19
#define r_arg2HL r20
@@ -560,6 +559,23 @@
.endfunc
#endif /* defined (L_divmodsi4) */
+#undef r_remHH
+#undef r_remHL
+#undef r_remH
+#undef r_remL
+
+#undef r_arg1HH
+#undef r_arg1HL
+#undef r_arg1H
+#undef r_arg1L
+
+#undef r_arg2HH
+#undef r_arg2HL
+#undef r_arg2H
+#undef r_arg2L
+
+#undef r_cnt
+
/**********************************
* This is a prologue subroutine
**********************************/
@@ -902,3 +918,4 @@
.endfunc
#endif /* defined (L_tablejump_elpm) */
+#include "libgcc-fixed.S"
Index: gcc/config/avr/avr-fixed.md
===================================================================
--- gcc/config/avr/avr-fixed.md (revision 0)
+++ gcc/config/avr/avr-fixed.md (revision 0)
@@ -0,0 +1,367 @@
+;; -*- Mode: Scheme -*-
+;; This file contains instructions that support fixed-point operations
+;; for ATMEL AVR micro controllers.
+;; Copyright (C) 2009
+;; Free Software Foundation, Inc.
+;; Contributed by Sean D'Epagnier (address@hidden)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; .
+
+(define_mode_iterator ALLQQ [(QQ "") (UQQ "")])
+(define_mode_iterator ALLHQ [(HQ "") (UHQ "")])
+(define_mode_iterator ALLHA [(HA "") (UHA "")])
+(define_mode_iterator ALLHQHA [(HQ "") (UHQ "") (HA "") (UHA "")])
+(define_mode_iterator ALLSA [(SA "") (USA "")])
+
+;;; Conversions
+
+(define_mode_iterator FIXED1 [(QQ "") (UQQ "") (HQ "") (UHQ "")
+ (SQ "") (USQ "") (DQ "") (UDQ "")
+ (HA "") (UHA "") (SA "") (USA "")
+ (DA "") (UDA "") (TA "") (UTA "")
+ (QI "") (HI "") (SI "") (DI "")])
+(define_mode_iterator FIXED2 [(QQ "") (UQQ "") (HQ "") (UHQ "")
+ (SQ "") (USQ "") (DQ "") (UDQ "")
+ (HA "") (UHA "") (SA "") (USA "")
+ (DA "") (UDA "") (TA "") (UTA "")
+ (QI "") (HI "") (SI "") (DI "")])
+
+(define_insn "fract2"
+ [(set (match_operand:FIXED1 0 "register_operand" "=r")
+ (fract_convert:FIXED1 (match_operand:FIXED2 1 "register_operand" "r")))]
+ ""
+ "* return fract_out (insn, operands, 1, NULL);"
+ [(set_attr "cc" "clobber")])
+
+(define_insn "fractuns2"
+ [(set (match_operand:FIXED1 0 "register_operand" "=r")
+ (unsigned_fract_convert:FIXED1 (match_operand:FIXED2 1 "register_operand" "r")))]
+ ""
+ "* return fract_out (insn, operands, 0, NULL);"
+ [(set_attr "cc" "clobber")])
+
+;;; Addition/Subtraction, mostly identical to integer versions
+
+(define_insn "add3"
+ [(set (match_operand:ALLQQ 0 "register_operand" "=r,d")
+ (plus:ALLQQ (match_operand:ALLQQ 1 "register_operand" "%0,0")
+ (match_operand:ALLQQ 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ add %0,%2
+ subi %0,lo8(-(%2))"
+ [(set_attr "length" "1,1")
+ (set_attr "cc" "set_czn,set_czn")])
+
+(define_insn "sub3"
+ [(set (match_operand:ALLQQ 0 "register_operand" "=r,d")
+ (minus:ALLQQ (match_operand:ALLQQ 1 "register_operand" "0,0")
+ (match_operand:ALLQQ 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ sub %0,%2
+ subi %0,lo8(%2)"
+ [(set_attr "length" "1,1")
+ (set_attr "cc" "set_czn,set_czn")])
+
+
+(define_insn "add3"
+ [(set (match_operand:ALLHQHA 0 "register_operand" "=r,d")
+ (plus:ALLHQHA (match_operand:ALLHQHA 1 "register_operand" "%0,0")
+ (match_operand:ALLHQHA 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ add %A0,%A2\;adc %B0,%B2
+ subi %A0,lo8(-(%2))\;sbci %B0,hi8(-(%2))"
+ [(set_attr "length" "2,2")
+ (set_attr "cc" "set_n,set_czn")])
+
+(define_insn "sub3"
+ [(set (match_operand:ALLHQHA 0 "register_operand" "=r,d")
+ (minus:ALLHQHA (match_operand:ALLHQHA 1 "register_operand" "0,0")
+ (match_operand:ALLHQHA 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ sub %A0,%A2\;sbc %B0,%B2
+ subi %A0,lo8(%2)\;sbci %B0,hi8(%2)"
+ [(set_attr "length" "2,2")
+ (set_attr "cc" "set_czn,set_czn")])
+
+(define_insn "add3"
+ [(set (match_operand:ALLSA 0 "register_operand" "=r,d")
+ (plus:ALLSA (match_operand:ALLSA 1 "register_operand" "%0,0")
+ (match_operand:ALLSA 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ add %A0,%A2\;adc %B0,%B2\;adc %C0,%C2\;adc %D0,%D2
+ subi %0,lo8(-(%2))\;sbci %B0,hi8(-(%2))\;sbci %C0,hlo8(-(%2))\;sbci %D0,hhi8(-(%2))"
+ [(set_attr "length" "4,4")
+ (set_attr "cc" "set_n,set_czn")])
+
+(define_insn "sub3"
+ [(set (match_operand:ALLSA 0 "register_operand" "=r,d")
+ (minus:ALLSA (match_operand:ALLSA 1 "register_operand" "0,0")
+ (match_operand:ALLSA 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ sub %0,%2\;sbc %B0,%B2\;sbc %C0,%C2\;sbc %D0,%D2
+ subi %A0,lo8(%2)\;sbci %B0,hi8(%2)\;sbci %C0,hlo8(%2)\;sbci %D0,hhi8(%2)"
+ [(set_attr "length" "4,4")
+ (set_attr "cc" "set_czn,set_czn")])
+
+;;; Movement, needed for specialized function calls
+
+(define_insn "*mov"
+ [(set (match_operand:ALLQQ 0 "nonimmediate_operand" "=r,d,Qm,r,q,r,*r")
+ (match_operand:ALLQQ 1 "general_operand" "r,i,rL,Qm,r,q,i"))]
+ ""
+ "* return output_movqi (insn, operands, NULL);"
+ [(set_attr "length" "1,1,5,5,1,1,4")
+ (set_attr "cc" "none,none,clobber,clobber,none,none,clobber")])
+
+(define_insn "*mov"
+ [(set (match_operand:ALLHQHA 0 "nonimmediate_operand" "=r,r,m,d,*r,q,r")
+ (match_operand:ALLHQHA 1 "general_operand" "r,m,rL,i,i,r,q"))]
+ ""
+ "* return output_movhi (insn, operands, NULL);"
+ [(set_attr "length" "2,6,7,2,6,5,2")
+ (set_attr "cc" "none,clobber,clobber,none,clobber,none,none")])
+
+(define_insn "*mov"
+ [(set (match_operand:ALLSA 0 "nonimmediate_operand" "=r,r,r,Qm,!d,r")
+ (match_operand:ALLSA 1 "general_operand" "r,L,Qm,rL,i,i"))]
+ ""
+ "* return output_movsisf (insn, operands, NULL);"
+ [(set_attr "length" "4,4,8,9,4,10")
+ (set_attr "cc" "none,set_zn,clobber,clobber,none,clobber")])
+
+;******************************************************************************
+; mul
+
+(define_insn "mulqq3"
+ [(set (match_operand:QQ 0 "register_operand" "=r")
+ (mult:QQ (match_operand:QQ 1 "register_operand" "a")
+ (match_operand:QQ 2 "register_operand" "a")))]
+ "AVR_HAVE_MUL"
+ "fmuls %1,%2\;mov %0,r1\;clr r1"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "muluqq3"
+ [(set (match_operand:UQQ 0 "register_operand" "=r")
+ (mult:UQQ (match_operand:UQQ 1 "register_operand" "r")
+ (match_operand:UQQ 2 "register_operand" "r")))]
+ "AVR_HAVE_MUL"
+ "mul %1,%2\;mov %0,r1\;clr r1"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+;; (reg:ALLHQ 20) not clobbered on the enhanced core.
+;; use registers from 16-23 so we can use fmuls
+;; All call-used registers clobbered otherwise - normal library call.
+(define_expand "mul3"
+ [(set (reg:ALLHQ 22) (match_operand:ALLHQ 1 "register_operand" ""))
+ (set (reg:ALLHQ 20) (match_operand:ALLHQ 2 "register_operand" ""))
+ (parallel [(set (reg:ALLHQ 18) (mult:ALLHQ (reg:ALLHQ 22) (reg:ALLHQ 20)))
+ (clobber (reg:ALLHQ 22))])
+ (set (match_operand:ALLHQ 0 "register_operand" "") (reg:ALLHQ 18))]
+ "AVR_HAVE_MUL"
+ "")
+
+(define_insn "*mul3_enh_call"
+ [(set (reg:ALLHQ 18) (mult:ALLHQ (reg:ALLHQ 22) (reg:ALLHQ 20)))
+ (clobber (reg:ALLHQ 22))]
+ "AVR_HAVE_MUL"
+ "%~call __mul3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; Special calls for with and without mul.
+(define_expand "mul3"
+ [(set (reg:ALLHA 22) (match_operand:ALLHA 1 "register_operand" ""))
+ (set (reg:ALLHA 20) (match_operand:ALLHA 2 "register_operand" ""))
+ (parallel [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 20)))
+ (clobber (reg:ALLHA 22))])
+ (set (match_operand:ALLHA 0 "register_operand" "") (reg:ALLHA 18))]
+ ""
+ "
+{
+ if (!AVR_HAVE_MUL)
+ {
+ emit_insn (gen_mul3_call (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "*mul3_enh"
+ [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 20)))
+ (clobber (reg:ALLHA 22))]
+ "AVR_HAVE_MUL"
+ "%~call __mul3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; Without multiplier, clobbers both inputs, and needs a separate output register
+(define_expand "mul3_call"
+ [(set (reg:ALLHA 24) (match_operand:ALLHA 1 "register_operand" ""))
+ (set (reg:ALLHA 22) (match_operand:ALLHA 2 "register_operand" ""))
+ (parallel [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 24)))
+ (clobber (reg:ALLHA 22))
+ (clobber (reg:ALLHA 24))])
+ (set (match_operand:ALLHA 0 "register_operand" "") (reg:ALLHA 18))]
+ "!AVR_HAVE_MUL"
+ "")
+
+(define_insn "*mul3_call"
+ [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 24)))
+ (clobber (reg:ALLHA 22))
+ (clobber (reg:ALLHA 24))]
+ "!AVR_HAVE_MUL"
+ "%~call __mul3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; On the enhanced core, don't clobber either input, and use a separate output,
+;; r2 is needed as a zero register since r1 is used for mul
+(define_expand "mul3"
+ [(set (reg:ALLSA 16) (match_operand:ALLSA 1 "register_operand" ""))
+ (set (reg:ALLSA 20) (match_operand:ALLSA 2 "register_operand" ""))
+ (parallel [(set (reg:ALLSA 24) (mult:ALLSA (reg:ALLSA 16) (reg:ALLSA 20)))
+ (clobber (reg:QI 30))])
+ (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 24))]
+ ""
+ "
+{
+ if (!AVR_HAVE_MUL)
+ {
+ emit_insn (gen_mul3_call (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "*mul3_enh"
+ [(set (reg:ALLSA 24) (mult:ALLSA (reg:ALLSA 16) (reg:ALLSA 20)))
+ (clobber (reg:QI 30))]
+ "AVR_HAVE_MUL"
+ "%~call __mul3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; Without multiplier, clobbers both inputs, needs a separate output, and also
+; needs two more scratch registers
+(define_expand "mul3_call"
+ [(set (reg:ALLSA 18) (match_operand:ALLSA 1 "register_operand" ""))
+ (set (reg:ALLSA 24) (match_operand:ALLSA 2 "register_operand" ""))
+ (parallel [(set (reg:ALLSA 14) (mult:ALLSA (reg:ALLSA 18) (reg:ALLSA 24)))
+ (clobber (reg:ALLSA 18))
+ (clobber (reg:ALLSA 24))
+ (clobber (reg:HI 22))])
+ (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 14))]
+ "!AVR_HAVE_MUL"
+ "")
+
+(define_insn "*mul3_call"
+ [(set (reg:ALLSA 14) (mult:ALLSA (reg:ALLSA 18) (reg:ALLSA 24)))
+ (clobber (reg:ALLSA 18))
+ (clobber (reg:ALLSA 24))
+ (clobber (reg:HI 22))]
+ "!AVR_HAVE_MUL"
+ "%~call __mul3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
+; div
+
+(define_code_iterator usdiv [udiv div]) ; do signed and unsigned in one shot
+
+(define_expand "3"
+ [(set (reg:ALLQQ 25) (match_operand:ALLQQ 1 "register_operand" ""))
+ (set (reg:ALLQQ 22) (match_operand:ALLQQ 2 "register_operand" ""))
+ (parallel [(set (reg:ALLQQ 24) (usdiv:ALLQQ (reg:ALLQQ 25) (reg:ALLQQ 22)))
+ (clobber (reg:ALLQQ 25))
+ (clobber (reg:QI 23))])
+ (set (match_operand:ALLQQ 0 "register_operand" "") (reg:ALLQQ 24))]
+ ""
+ "")
+
+(define_insn "*3_call"
+ [(set (reg:ALLQQ 24) (usdiv:ALLQQ (reg:ALLQQ 25) (reg:ALLQQ 22)))
+ (clobber (reg:ALLQQ 25))
+ (clobber (reg:QI 23))]
+ ""
+ "%~call __3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_expand "3"
+ [(set (reg:ALLHQHA 26) (match_operand:ALLHQHA 1 "register_operand" ""))
+ (set (reg:ALLHQHA 22) (match_operand:ALLHQHA 2 "register_operand" ""))
+ (parallel [(set (reg:ALLHQHA 24) (usdiv:ALLHQHA (reg:ALLHQHA 26) (reg:ALLHQHA 22)))
+ (clobber (reg:ALLHQHA 26))
+ (clobber (reg:QI 21))])
+ (set (match_operand:ALLHQHA 0 "register_operand" "") (reg:ALLHQHA 24))]
+ ""
+ "")
+
+(define_insn "*3_call"
+ [(set (reg:ALLHQHA 24) (usdiv:ALLHQHA (reg:ALLHQHA 26) (reg:ALLHQHA 22)))
+ (clobber (reg:ALLHQHA 26))
+ (clobber (reg:QI 21))]
+ ""
+ "%~call __3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; note the first parameter gets passed in already offset by 2 bytes
+(define_expand "3"
+ [(set (reg:ALLSA 24) (match_operand:ALLSA 1 "register_operand" ""))
+ (set (reg:ALLSA 18) (match_operand:ALLSA 2 "register_operand" ""))
+ (parallel [(set (reg:ALLSA 22) (usdiv:ALLSA (reg:ALLSA 24) (reg:ALLSA 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))])
+ (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 22))]
+ ""
+ "")
+
+(define_insn "*3_call"
+ [(set (reg:ALLSA 22) (usdiv:ALLSA (reg:ALLSA 24) (reg:ALLSA 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))]
+ ""
+ "%~call __3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+
+;; Note: eventually cmp, neg, abs, lshr, and ashl should be merged to use the
+;; same routines as integers, but for now I'm leaving that out to minimize changes
+;; to avr.md
+;; abs must be defined for fixed types for correct operation
+
+;; abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x)
+
+;; abs
+
+(define_insn "abs2"
+ [(set (match_operand:ALLQQ 0 "register_operand" "=r")
+ (abs:ALLQQ (match_operand:ALLQQ 1 "register_operand" "0")))]
+ ""
+ "sbrc %0,7
+ neg %0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "clobber")])
Index: gcc/config/avr/avr.md
===================================================================
--- gcc/config/avr/avr.md (revision 143507)
+++ gcc/config/avr/avr.md (working copy)
@@ -63,7 +63,10 @@
(include "predicates.md")
(include "constraints.md")
-
+
+; fixed-point instructions.
+(include "avr-fixed.md")
+
;; Condition code settings.
(define_attr "cc" "none,set_czn,set_zn,set_n,compare,clobber"
(const_string "none"))
Index: gcc/config/avr/avr.c
===================================================================
--- gcc/config/avr/avr.c (revision 143507)
+++ gcc/config/avr/avr.c (working copy)
@@ -304,6 +304,16 @@
{ NULL, ARCH_UNKNOWN, NULL }
};
+/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
+static bool
+avr_scalar_mode_supported_p (enum machine_mode mode)
+{
+ if (ALL_FIXED_POINT_MODE_P (mode))
+ return true;
+
+ return default_scalar_mode_supported_p (mode);
+}
+
int avr_case_values_threshold = 30000;
/* Initialize the GCC target structure. */
@@ -357,6 +367,9 @@
#undef TARGET_HARD_REGNO_SCRATCH_OK
#define TARGET_HARD_REGNO_SCRATCH_OK avr_hard_regno_scratch_ok
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P avr_scalar_mode_supported_p
+
struct gcc_target targetm = TARGET_INITIALIZER;
void
@@ -1704,9 +1717,9 @@
*l = 1;
- if (register_operand (dest, QImode))
+ if (register_operand (dest, VOIDmode))
{
- if (register_operand (src, QImode)) /* mov r,r */
+ if (register_operand (src, VOIDmode)) /* mov r,r */
{
if (test_hard_reg_class (STACK_REG, dest))
return AS2 (out,%0,%1);
@@ -1794,9 +1807,9 @@
if (!l)
l = &dummy;
- if (register_operand (dest, HImode))
+ if (register_operand (dest, VOIDmode))
{
- if (register_operand (src, HImode)) /* mov r,r */
+ if (register_operand (src, VOIDmode)) /* mov r,r */
{
if (test_hard_reg_class (STACK_REG, dest))
{
@@ -4313,6 +4326,196 @@
return "";
}
+/* Outputs instructions needed for fixed point conversion. */
+
+const char *
+fract_out (rtx insn ATTRIBUTE_UNUSED, rtx operands[], int intsigned, int *len)
+{
+ int i, k = 0;
+ int sbit[2], ilen[2], flen[2], tlen[2];
+ int rdest, rsource, offset;
+ int start, end, dir;
+ int hadbst = 0, hadlsl = 0;
+ int clrword = -1, lastclr = 0, clr = 0;
+ char buf[20];
+
+ if (!len)
+ len = &k;
+
+ for (i = 0; i < 2; i++)
+ {
+ enum machine_mode mode = GET_MODE (operands[i]);
+ tlen[i] = GET_MODE_SIZE (mode);
+ if (SCALAR_INT_MODE_P (mode))
+ {
+ sbit[i] = intsigned;
+ ilen[i] = GET_MODE_BITSIZE(mode) / 8;
+ flen[i] = 0;
+ }
+ else if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
+ {
+ sbit[i] = SIGNED_SCALAR_FIXED_POINT_MODE_P (mode);
+ ilen[i] = (GET_MODE_IBIT (mode) + 1) / 8;
+ flen[i] = (GET_MODE_FBIT (mode) + 1) / 8;
+ }
+ else
+ fatal_insn ("unsupported fixed-point conversion", insn);
+ }
+
+ rdest = true_regnum (operands[0]);
+ rsource = true_regnum (operands[1]);
+ offset = flen[1] - flen[0];
+
+ /* Store the sign bit if the destination is a signed
+ fract and the source has a sign in the integer part. */
+ if (sbit[0] && !ilen[0] && sbit[1] && ilen[1])
+ {
+ /* To avoid using bst and bld if the source and
+ destination registers overlap we can use a single lsl
+ since we don't care about preserving the source register. */
+ if (rdest < rsource + tlen[1] && rdest + tlen[0] > rsource)
+ {
+ sprintf (buf, "lsl r%d", rsource + tlen[1] - 1);
+ hadlsl = 1;
+ }
+ else
+ {
+ sprintf (buf, "bst r%d, 7", rsource + tlen[1] - 1);
+ hadbst = 1;
+ }
+ output_asm_insn (buf, operands);
+ ++*len;
+ }
+
+ /* Pick the correct direction. */
+ if (rdest < rsource + offset)
+ {
+ dir = 1;
+ start = 0;
+ end = tlen[0];
+ }
+ else
+ {
+ dir = -1;
+ start = tlen[0] - 1;
+ end = -1;
+ }
+
+ /* Move registers into place, clearing registers that do not overlap. */
+ for (i = start; i != end; i += dir)
+ {
+ int destloc = rdest + i, sourceloc = rsource + i + offset;
+ if (sourceloc < rsource || sourceloc >= rsource + tlen[1])
+ {
+ if (AVR_HAVE_MOVW && i+dir != end
+ && (sourceloc+dir < rsource || sourceloc+dir >= rsource + tlen[1])
+ && ((dir == 1 && !(destloc%2) && !(sourceloc%2))
+ || (dir == -1 && (destloc%2) && (sourceloc%2)))
+ && clrword != -1)
+ {
+ sprintf (buf, "movw r%d, r%d", destloc&0xfe, clrword&0xfe);
+ i += dir;
+ }
+ else
+ {
+ /* Do not clear the register if it is going to get
+ sign extended with a mov later. */
+ if (sbit[0] && sbit[1] && i != tlen[0] - 1 && i >= flen[0])
+ continue;
+
+ sprintf (buf, "clr r%d", destloc);
+ if (lastclr)
+ clrword = destloc;
+ clr=1;
+ }
+ }
+ else if (destloc == sourceloc)
+ continue;
+ else
+ if (AVR_HAVE_MOVW && i+dir != end
+ && sourceloc+dir >= rsource && sourceloc+dir < rsource + tlen[1]
+ && ((dir == 1 && !(destloc%2) && !(sourceloc%2))
+ || (dir == -1 && (destloc%2) && (sourceloc%2))))
+ {
+ sprintf (buf, "movw r%d, r%d", destloc&0xfe, sourceloc&0xfe);
+ i += dir;
+ }
+ else
+ sprintf (buf, "mov r%d, r%d", destloc, sourceloc);
+
+ output_asm_insn (buf, operands);
+ ++*len;
+
+ lastclr = clr;
+ clr = 0;
+ }
+
+ /* Perform sign extension if needed. */
+ if (sbit[0] && sbit[1] && ilen[0] > ilen[1])
+ {
+ sprintf (buf, "sbrc r%d, 7", rdest+tlen[1]-1-offset);
+ output_asm_insn (buf, operands);
+ sprintf (buf, "com r%d", rdest+tlen[0]-1);
+ output_asm_insn (buf, operands);
+ *len += 2;
+ /* Sign extend additional bytes. */
+ start = rdest + tlen[0] - 2;
+ end = rdest + flen[0] + ilen[1] - 1;
+ for (i = start; i != end; i--)
+ {
+ if (AVR_HAVE_MOVW && i != start && i-1 != end)
+ sprintf (buf, "movw r%d, r%d", --i, rdest+tlen[0]-2);
+ else
+ sprintf (buf, "mov r%d, r%d", i, rdest+tlen[0]-1);
+ output_asm_insn (buf, operands);
+ ++*len;
+ }
+ }
+
+ /* Perform shifts, only needed if one operand
+ is a signed fract, and the other is not. */
+ if (sbit[0] && !ilen[0] && (!sbit[1] || ilen[1]))
+ {
+ start = rdest+flen[0]-1;
+ end = rdest + flen[0] - flen[1];
+ if (end < rdest)
+ end = rdest;
+ for (i = start; i >= end; i--)
+ {
+ if (i == start && !hadlsl)
+ sprintf (buf, "lsr r%d", i);
+ else
+ sprintf (buf, "ror r%d", i);
+ output_asm_insn (buf, operands);
+ ++*len;
+ }
+
+ if (hadbst)
+ {
+ sprintf (buf, "bld r%d, 7", rdest + tlen[0] - 1);
+ output_asm_insn (buf, operands);
+ ++*len;
+ }
+ }
+ else if (sbit[1] && !ilen[1] && (!sbit[0] || ilen[0]))
+ {
+ start = rdest + flen[0] - flen[1];
+ if (start < rdest)
+ start = rdest;
+ for (i = start; i. */
+
+/* On 8 bit machines it requires fewer instructions for fixed point
+ routines if the decimal place is on a byte boundary which is not
+ the default for signed accum types. */
+
+ADJUST_IBIT (HA, 7);
+ADJUST_FBIT (HA, 8);
+
+ADJUST_IBIT (SA, 15);
+ADJUST_FBIT (SA, 16);
+
+ADJUST_IBIT (DA, 31);
+ADJUST_FBIT (DA, 32);
+
+ADJUST_IBIT (TA, 63);
+ADJUST_FBIT (TA, 64);