]> git.pld-linux.org Git - packages/crossavr-gcc.git/blame - 300-gcc-fixedpoint-3-4-2010.patch
crossavr-gcc: Synchronized with official AVR toolchain 3.4.0.663.
[packages/crossavr-gcc.git] / 300-gcc-fixedpoint-3-4-2010.patch
CommitLineData
6ef8d480
PZ
1diff -Naurp gcc/config/avr/avr.c gcc/config/avr/avr.c
2--- gcc/config/avr/avr.c 2011-10-27 16:45:17.000000000 +0530
3+++ gcc/config/avr/avr.c 2011-10-27 16:55:55.000000000 +0530
4@@ -236,6 +236,19 @@ static const struct default_options avr_
5 #undef TARGET_EXCEPT_UNWIND_INFO
6 #define TARGET_EXCEPT_UNWIND_INFO sjlj_except_unwind_info
dbe7ab63 7
8+#undef TARGET_SCALAR_MODE_SUPPORTED_P
9+#define TARGET_SCALAR_MODE_SUPPORTED_P avr_scalar_mode_supported_p
10+
11+ /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
12+ static bool
13+ avr_scalar_mode_supported_p (enum machine_mode mode)
14+ {
15+ if (ALL_FIXED_POINT_MODE_P (mode))
16+ return true;
17+
18+ return default_scalar_mode_supported_p (mode);
19+ }
20+
21 struct gcc_target targetm = TARGET_INITIALIZER;
22 \f
6ef8d480
PZ
23 static void
24@@ -1767,9 +1780,9 @@ output_movqi (rtx insn, rtx operands[],
dbe7ab63 25
26 *l = 1;
27
28- if (register_operand (dest, QImode))
29+ if (register_operand (dest, VOIDmode))
30 {
31- if (register_operand (src, QImode)) /* mov r,r */
32+ if (register_operand (src, VOIDmode)) /* mov r,r */
33 {
34 if (test_hard_reg_class (STACK_REG, dest))
35 return AS2 (out,%0,%1);
6ef8d480 36@@ -1857,9 +1870,9 @@ output_movhi (rtx insn, rtx operands[],
dbe7ab63 37 if (!l)
38 l = &dummy;
39
40- if (register_operand (dest, HImode))
41+ if (register_operand (dest, VOIDmode))
42 {
43- if (register_operand (src, HImode)) /* mov r,r */
44+ if (register_operand (src, VOIDmode)) /* mov r,r */
45 {
46 if (test_hard_reg_class (STACK_REG, dest))
47 {
6ef8d480 48@@ -2582,6 +2595,14 @@ output_movsisf(rtx insn, rtx operands[],
dbe7ab63 49 {
50 if (test_hard_reg_class (LD_REGS, dest)) /* ldi d,i */
51 {
52+ if (AVR_HAVE_MOVW
53+ && (UINTVAL (src) >> 16) == (UINTVAL (src) & 0xffff))
54+ {
55+ *l = 3;
56+ return (AS2 (ldi,%A0,lo8(%1)) CR_TAB
57+ AS2 (ldi,%B0,hi8(%1)) CR_TAB
58+ AS2 (movw,%C0,%A0));
59+ }
60 *l = 4;
61 return (AS2 (ldi,%A0,lo8(%1)) CR_TAB
62 AS2 (ldi,%B0,hi8(%1)) CR_TAB
6ef8d480 63@@ -4527,6 +4548,196 @@ avr_rotate_bytes (rtx operands[])
dbe7ab63 64 return true;
65 }
66
67+/* Outputs instructions needed for fixed point conversion. */
68+
69+const char *
70+fract_out (rtx insn ATTRIBUTE_UNUSED, rtx operands[], int intsigned, int *len)
71+{
72+ int i, k = 0;
73+ int sbit[2], ilen[2], flen[2], tlen[2];
74+ int rdest, rsource, offset;
75+ int start, end, dir;
76+ int hadbst = 0, hadlsl = 0;
77+ int clrword = -1, lastclr = 0, clr = 0;
78+ char buf[20];
79+
80+ if (!len)
81+ len = &k;
82+
83+ for (i = 0; i < 2; i++)
84+ {
85+ enum machine_mode mode = GET_MODE (operands[i]);
86+ tlen[i] = GET_MODE_SIZE (mode);
87+ if (SCALAR_INT_MODE_P (mode))
88+ {
89+ sbit[i] = intsigned;
90+ ilen[i] = GET_MODE_BITSIZE(mode) / 8;
91+ flen[i] = 0;
92+ }
93+ else if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
94+ {
95+ sbit[i] = SIGNED_SCALAR_FIXED_POINT_MODE_P (mode);
96+ ilen[i] = (GET_MODE_IBIT (mode) + 1) / 8;
97+ flen[i] = (GET_MODE_FBIT (mode) + 1) / 8;
98+ }
99+ else
100+ fatal_insn ("unsupported fixed-point conversion", insn);
101+ }
102+
103+ rdest = true_regnum (operands[0]);
104+ rsource = true_regnum (operands[1]);
105+ offset = flen[1] - flen[0];
106+
107+ /* Store the sign bit if the destination is a signed
108+ fract and the source has a sign in the integer part. */
109+ if (sbit[0] && !ilen[0] && sbit[1] && ilen[1])
110+ {
111+ /* To avoid using bst and bld if the source and
112+ destination registers overlap we can use a single lsl
113+ since we don't care about preserving the source register. */
114+ if (rdest < rsource + tlen[1] && rdest + tlen[0] > rsource)
115+ {
116+ sprintf (buf, "lsl r%d", rsource + tlen[1] - 1);
117+ hadlsl = 1;
118+ }
119+ else
120+ {
121+ sprintf (buf, "bst r%d, 7", rsource + tlen[1] - 1);
122+ hadbst = 1;
123+ }
124+ output_asm_insn (buf, operands);
125+ ++*len;
126+ }
127+
128+ /* Pick the correct direction. */
129+ if (rdest < rsource + offset)
130+ {
131+ dir = 1;
132+ start = 0;
133+ end = tlen[0];
134+ }
135+ else
136+ {
137+ dir = -1;
138+ start = tlen[0] - 1;
139+ end = -1;
140+ }
141+
142+ /* Move registers into place, clearing registers that do not overlap. */
143+ for (i = start; i != end; i += dir)
144+ {
145+ int destloc = rdest + i, sourceloc = rsource + i + offset;
146+ if (sourceloc < rsource || sourceloc >= rsource + tlen[1])
147+ {
148+ if (AVR_HAVE_MOVW && i+dir != end
149+ && (sourceloc+dir < rsource || sourceloc+dir >= rsource + tlen[1])
150+ && ((dir == 1 && !(destloc%2) && !(sourceloc%2))
151+ || (dir == -1 && (destloc%2) && (sourceloc%2)))
152+ && clrword != -1)
153+ {
154+ sprintf (buf, "movw r%d, r%d", destloc&0xfe, clrword&0xfe);
155+ i += dir;
156+ }
157+ else
158+ {
159+ /* Do not clear the register if it is going to get
160+ sign extended with a mov later. */
161+ if (sbit[0] && sbit[1] && i != tlen[0] - 1 && i >= flen[0])
162+ continue;
163+
164+ sprintf (buf, "clr r%d", destloc);
165+ if (lastclr)
166+ clrword = destloc;
167+ clr=1;
168+ }
169+ }
170+ else if (destloc == sourceloc)
171+ continue;
172+ else
173+ if (AVR_HAVE_MOVW && i+dir != end
174+ && sourceloc+dir >= rsource && sourceloc+dir < rsource + tlen[1]
175+ && ((dir == 1 && !(destloc%2) && !(sourceloc%2))
176+ || (dir == -1 && (destloc%2) && (sourceloc%2))))
177+ {
178+ sprintf (buf, "movw r%d, r%d", destloc&0xfe, sourceloc&0xfe);
179+ i += dir;
180+ }
181+ else
182+ sprintf (buf, "mov r%d, r%d", destloc, sourceloc);
183+
184+ output_asm_insn (buf, operands);
185+ ++*len;
186+
187+ lastclr = clr;
188+ clr = 0;
189+ }
190+
191+ /* Perform sign extension if needed. */
192+ if (sbit[0] && sbit[1] && ilen[0] > ilen[1])
193+ {
194+ sprintf (buf, "sbrc r%d, 7", rdest+tlen[1]-1-offset);
195+ output_asm_insn (buf, operands);
196+ sprintf (buf, "com r%d", rdest+tlen[0]-1);
197+ output_asm_insn (buf, operands);
198+ *len += 2;
199+ /* Sign extend additional bytes. */
200+ start = rdest + tlen[0] - 2;
201+ end = rdest + flen[0] + ilen[1] - 1;
202+ for (i = start; i != end; i--)
203+ {
204+ if (AVR_HAVE_MOVW && i != start && i-1 != end)
205+ sprintf (buf, "movw r%d, r%d", --i, rdest+tlen[0]-2);
206+ else
207+ sprintf (buf, "mov r%d, r%d", i, rdest+tlen[0]-1);
208+ output_asm_insn (buf, operands);
209+ ++*len;
210+ }
211+ }
212+
213+ /* Perform shifts, only needed if one operand
214+ is a signed fract, and the other is not. */
215+ if (sbit[0] && !ilen[0] && (!sbit[1] || ilen[1]))
216+ {
217+ start = rdest+flen[0]-1;
218+ end = rdest + flen[0] - flen[1];
219+ if (end < rdest)
220+ end = rdest;
221+ for (i = start; i >= end; i--)
222+ {
223+ if (i == start && !hadlsl)
224+ sprintf (buf, "lsr r%d", i);
225+ else
226+ sprintf (buf, "ror r%d", i);
227+ output_asm_insn (buf, operands);
228+ ++*len;
229+ }
230+
231+ if (hadbst)
232+ {
233+ sprintf (buf, "bld r%d, 7", rdest + tlen[0] - 1);
234+ output_asm_insn (buf, operands);
235+ ++*len;
236+ }
237+ }
238+ else if (sbit[1] && !ilen[1] && (!sbit[0] || ilen[0]))
239+ {
240+ start = rdest + flen[0] - flen[1];
241+ if (start < rdest)
242+ start = rdest;
243+ for (i = start; i<rdest+flen[0]; i++)
244+ {
245+ if (i == start)
246+ sprintf (buf, "lsl r%d", i);
247+ else
248+ sprintf (buf, "rol r%d", i);
249+ output_asm_insn (buf, operands);
250+ ++*len;
251+ }
252+ }
253+
254+ return "";
255+}
256+
257 /* Modifies the length assigned to instruction INSN
258 LEN is the initially computed length of the insn. */
259
6ef8d480
PZ
260diff -Naurp gcc/config/avr/avr-fixed.md gcc/config/avr/avr-fixed.md
261--- gcc/config/avr/avr-fixed.md 1970-01-01 05:30:00.000000000 +0530
262+++ gcc/config/avr/avr-fixed.md 2011-10-27 16:55:55.000000000 +0530
dbe7ab63 263@@ -0,0 +1,338 @@
264+;; -*- Mode: Scheme -*-
265+;; This file contains instructions that support fixed-point operations
266+;; for ATMEL AVR micro controllers.
267+;; Copyright (C) 2009
268+;; Free Software Foundation, Inc.
269+;; Contributed by Sean D'Epagnier (sean@depagnier.com)
270+
271+;; This file is part of GCC.
272+
273+;; GCC is free software; you can redistribute it and/or modify
274+;; it under the terms of the GNU General Public License as published by
275+;; the Free Software Foundation; either version 3, or (at your option)
276+;; any later version.
277+
278+;; GCC is distributed in the hope that it will be useful,
279+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
280+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
281+;; GNU General Public License for more details.
282+
283+;; You should have received a copy of the GNU General Public License
284+;; along with GCC; see the file COPYING3. If not see
285+;; <http://www.gnu.org/licenses/>.
286+
287+(define_mode_iterator ALLQQ [(QQ "") (UQQ "")])
288+(define_mode_iterator ALLHQ [(HQ "") (UHQ "")])
289+(define_mode_iterator ALLHA [(HA "") (UHA "")])
290+(define_mode_iterator ALLHQHA [(HQ "") (UHQ "") (HA "") (UHA "")])
291+(define_mode_iterator ALLSA [(SA "") (USA "")])
292+
293+;;; Conversions
294+
295+(define_mode_iterator FIXED1 [(QQ "") (UQQ "") (HQ "") (UHQ "")
296+ (SQ "") (USQ "") (DQ "") (UDQ "")
297+ (HA "") (UHA "") (SA "") (USA "")
298+ (DA "") (UDA "") (TA "") (UTA "")
299+ (QI "") (HI "") (SI "") (DI "")])
300+(define_mode_iterator FIXED2 [(QQ "") (UQQ "") (HQ "") (UHQ "")
301+ (SQ "") (USQ "") (DQ "") (UDQ "")
302+ (HA "") (UHA "") (SA "") (USA "")
303+ (DA "") (UDA "") (TA "") (UTA "")
304+ (QI "") (HI "") (SI "") (DI "")])
305+
306+(define_insn "fract<FIXED2:mode><FIXED1:mode>2"
307+ [(set (match_operand:FIXED1 0 "register_operand" "=r")
308+ (fract_convert:FIXED1 (match_operand:FIXED2 1 "register_operand" "r")))]
309+ ""
310+ "* return fract_out (insn, operands, 1, NULL);"
311+ [(set_attr "cc" "clobber")])
312+
313+(define_insn "fractuns<FIXED2:mode><FIXED1:mode>2"
314+ [(set (match_operand:FIXED1 0 "register_operand" "=r")
315+ (unsigned_fract_convert:FIXED1 (match_operand:FIXED2 1 "register_operand" "r")))]
316+ ""
317+ "* return fract_out (insn, operands, 0, NULL);"
318+ [(set_attr "cc" "clobber")])
319+
320+;;; Addition/Subtraction, mostly identical to integer versions
321+
322+(define_insn "add<ALLQQ:mode>3"
323+ [(set (match_operand:ALLQQ 0 "register_operand" "=r,d")
324+ (plus:ALLQQ (match_operand:ALLQQ 1 "register_operand" "%0,0")
325+ (match_operand:ALLQQ 2 "nonmemory_operand" "r,i")))]
326+ ""
327+ "@
328+ add %0,%2
329+ subi %0,lo8(-(%2))"
330+ [(set_attr "length" "1,1")
331+ (set_attr "cc" "set_czn,set_czn")])
332+
333+(define_insn "sub<ALLQQ:mode>3"
334+ [(set (match_operand:ALLQQ 0 "register_operand" "=r,d")
335+ (minus:ALLQQ (match_operand:ALLQQ 1 "register_operand" "0,0")
336+ (match_operand:ALLQQ 2 "nonmemory_operand" "r,i")))]
337+ ""
338+ "@
339+ sub %0,%2
340+ subi %0,lo8(%2)"
341+ [(set_attr "length" "1,1")
342+ (set_attr "cc" "set_czn,set_czn")])
343+
344+
345+(define_insn "add<ALLHQHA:mode>3"
346+ [(set (match_operand:ALLHQHA 0 "register_operand" "=r,d")
347+ (plus:ALLHQHA (match_operand:ALLHQHA 1 "register_operand" "%0,0")
348+ (match_operand:ALLHQHA 2 "nonmemory_operand" "r,i")))]
349+ ""
350+ "@
351+ add %A0,%A2\;adc %B0,%B2
352+ subi %A0,lo8(-(%2))\;sbci %B0,hi8(-(%2))"
353+ [(set_attr "length" "2,2")
354+ (set_attr "cc" "set_n,set_czn")])
355+
356+(define_insn "sub<ALLHQHA:mode>3"
357+ [(set (match_operand:ALLHQHA 0 "register_operand" "=r,d")
358+ (minus:ALLHQHA (match_operand:ALLHQHA 1 "register_operand" "0,0")
359+ (match_operand:ALLHQHA 2 "nonmemory_operand" "r,i")))]
360+ ""
361+ "@
362+ sub %A0,%A2\;sbc %B0,%B2
363+ subi %A0,lo8(%2)\;sbci %B0,hi8(%2)"
364+ [(set_attr "length" "2,2")
365+ (set_attr "cc" "set_czn,set_czn")])
366+
367+(define_insn "add<ALLSA:mode>3"
368+ [(set (match_operand:ALLSA 0 "register_operand" "=r,d")
369+ (plus:ALLSA (match_operand:ALLSA 1 "register_operand" "%0,0")
370+ (match_operand:ALLSA 2 "nonmemory_operand" "r,i")))]
371+ ""
372+ "@
373+ add %A0,%A2\;adc %B0,%B2\;adc %C0,%C2\;adc %D0,%D2
374+ subi %0,lo8(-(%2))\;sbci %B0,hi8(-(%2))\;sbci %C0,hlo8(-(%2))\;sbci %D0,hhi8(-(%2))"
375+ [(set_attr "length" "4,4")
376+ (set_attr "cc" "set_n,set_czn")])
377+
378+(define_insn "sub<ALLSA:mode>3"
379+ [(set (match_operand:ALLSA 0 "register_operand" "=r,d")
380+ (minus:ALLSA (match_operand:ALLSA 1 "register_operand" "0,0")
381+ (match_operand:ALLSA 2 "nonmemory_operand" "r,i")))]
382+ ""
383+ "@
384+ sub %0,%2\;sbc %B0,%B2\;sbc %C0,%C2\;sbc %D0,%D2
385+ subi %A0,lo8(%2)\;sbci %B0,hi8(%2)\;sbci %C0,hlo8(%2)\;sbci %D0,hhi8(%2)"
386+ [(set_attr "length" "4,4")
387+ (set_attr "cc" "set_czn,set_czn")])
388+
389+;******************************************************************************
390+; mul
391+
392+(define_insn "mulqq3"
393+ [(set (match_operand:QQ 0 "register_operand" "=r")
394+ (mult:QQ (match_operand:QQ 1 "register_operand" "a")
395+ (match_operand:QQ 2 "register_operand" "a")))]
396+ "AVR_HAVE_MUL"
397+ "fmuls %1,%2\;mov %0,r1\;clr r1"
398+ [(set_attr "length" "3")
399+ (set_attr "cc" "clobber")])
400+
401+(define_insn "muluqq3"
402+ [(set (match_operand:UQQ 0 "register_operand" "=r")
403+ (mult:UQQ (match_operand:UQQ 1 "register_operand" "r")
404+ (match_operand:UQQ 2 "register_operand" "r")))]
405+ "AVR_HAVE_MUL"
406+ "mul %1,%2\;mov %0,r1\;clr r1"
407+ [(set_attr "length" "3")
408+ (set_attr "cc" "clobber")])
409+
410+;; (reg:ALLHQ 20) not clobbered on the enhanced core.
411+;; use registers from 16-23 so we can use fmuls
412+;; All call-used registers clobbered otherwise - normal library call.
413+(define_expand "mul<ALLHQ:mode>3"
414+ [(set (reg:ALLHQ 22) (match_operand:ALLHQ 1 "register_operand" ""))
415+ (set (reg:ALLHQ 20) (match_operand:ALLHQ 2 "register_operand" ""))
416+ (parallel [(set (reg:ALLHQ 18) (mult:ALLHQ (reg:ALLHQ 22) (reg:ALLHQ 20)))
417+ (clobber (reg:ALLHQ 22))])
418+ (set (match_operand:ALLHQ 0 "register_operand" "") (reg:ALLHQ 18))]
419+ "AVR_HAVE_MUL"
420+ "")
421+
422+(define_insn "*mul<ALLHQ:mode>3_enh_call"
423+ [(set (reg:ALLHQ 18) (mult:ALLHQ (reg:ALLHQ 22) (reg:ALLHQ 20)))
424+ (clobber (reg:ALLHQ 22))]
425+ "AVR_HAVE_MUL"
426+ "%~call __mul<ALLHQ:mode>3"
427+ [(set_attr "type" "xcall")
428+ (set_attr "cc" "clobber")])
429+
430+; Special calls for with and without mul.
431+(define_expand "mul<ALLHA:mode>3"
432+ [(set (reg:ALLHA 22) (match_operand:ALLHA 1 "register_operand" ""))
433+ (set (reg:ALLHA 20) (match_operand:ALLHA 2 "register_operand" ""))
434+ (parallel [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 20)))
435+ (clobber (reg:ALLHA 22))])
436+ (set (match_operand:ALLHA 0 "register_operand" "") (reg:ALLHA 18))]
437+ ""
438+ "
439+{
440+ if (!AVR_HAVE_MUL)
441+ {
442+ emit_insn (gen_mul<ALLHA:mode>3_call (operands[0], operands[1], operands[2]));
443+ DONE;
444+ }
445+}")
446+
447+(define_insn "*mul<ALLHA:mode>3_enh"
448+ [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 20)))
449+ (clobber (reg:ALLHA 22))]
450+ "AVR_HAVE_MUL"
451+ "%~call __mul<ALLHA:mode>3"
452+ [(set_attr "type" "xcall")
453+ (set_attr "cc" "clobber")])
454+
455+; Without multiplier, clobbers both inputs, and needs a separate output register
456+(define_expand "mul<ALLHA:mode>3_call"
457+ [(set (reg:ALLHA 24) (match_operand:ALLHA 1 "register_operand" ""))
458+ (set (reg:ALLHA 22) (match_operand:ALLHA 2 "register_operand" ""))
459+ (parallel [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 24)))
460+ (clobber (reg:ALLHA 22))
461+ (clobber (reg:ALLHA 24))])
462+ (set (match_operand:ALLHA 0 "register_operand" "") (reg:ALLHA 18))]
463+ "!AVR_HAVE_MUL"
464+ "")
465+
466+(define_insn "*mul<ALLHA:mode>3_call"
467+ [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 24)))
468+ (clobber (reg:ALLHA 22))
469+ (clobber (reg:ALLHA 24))]
470+ "!AVR_HAVE_MUL"
471+ "%~call __mul<ALLHA:mode>3"
472+ [(set_attr "type" "xcall")
473+ (set_attr "cc" "clobber")])
474+
475+;; On the enhanced core, don't clobber either input, and use a separate output,
476+;; r2 is needed as a zero register since r1 is used for mul
477+(define_expand "mul<ALLSA:mode>3"
478+ [(set (reg:ALLSA 16) (match_operand:ALLSA 1 "register_operand" ""))
479+ (set (reg:ALLSA 20) (match_operand:ALLSA 2 "register_operand" ""))
480+ (parallel [(set (reg:ALLSA 24) (mult:ALLSA (reg:ALLSA 16) (reg:ALLSA 20)))
481+ (clobber (reg:QI 15))])
482+ (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 24))]
483+ ""
484+ "
485+{
486+ if (!AVR_HAVE_MUL)
487+ {
488+ emit_insn (gen_mul<ALLSA:mode>3_call (operands[0], operands[1], operands[2]));
489+ DONE;
490+ }
491+}")
492+
493+(define_insn "*mul<ALLSA:mode>3_enh"
494+ [(set (reg:ALLSA 24) (mult:ALLSA (reg:ALLSA 16) (reg:ALLSA 20)))
495+ (clobber (reg:QI 15))]
496+ "AVR_HAVE_MUL"
497+ "%~call __mul<ALLSA:mode>3"
498+ [(set_attr "type" "xcall")
499+ (set_attr "cc" "clobber")])
500+
501+; Without multiplier, clobbers both inputs, needs a separate output, and also
502+; needs two more scratch registers
503+(define_expand "mul<ALLSA:mode>3_call"
504+ [(set (reg:ALLSA 18) (match_operand:ALLSA 1 "register_operand" ""))
505+ (set (reg:ALLSA 24) (match_operand:ALLSA 2 "register_operand" ""))
506+ (parallel [(set (reg:ALLSA 14) (mult:ALLSA (reg:ALLSA 18) (reg:ALLSA 24)))
507+ (clobber (reg:ALLSA 18))
508+ (clobber (reg:ALLSA 24))
509+ (clobber (reg:HI 22))])
510+ (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 14))]
511+ "!AVR_HAVE_MUL"
512+ "")
513+
514+(define_insn "*mul<ALLSA:mode>3_call"
515+ [(set (reg:ALLSA 14) (mult:ALLSA (reg:ALLSA 18) (reg:ALLSA 24)))
516+ (clobber (reg:ALLSA 18))
517+ (clobber (reg:ALLSA 24))
518+ (clobber (reg:HI 22))]
519+ "!AVR_HAVE_MUL"
520+ "%~call __mul<ALLSA:mode>3"
521+ [(set_attr "type" "xcall")
522+ (set_attr "cc" "clobber")])
523+
524+; / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
525+; div
526+
527+(define_code_iterator usdiv [udiv div]) ; do signed and unsigned in one shot
528+
529+(define_expand "<usdiv:code><ALLQQ:mode>3"
530+ [(set (reg:ALLQQ 25) (match_operand:ALLQQ 1 "register_operand" ""))
531+ (set (reg:ALLQQ 22) (match_operand:ALLQQ 2 "register_operand" ""))
532+ (parallel [(set (reg:ALLQQ 24) (usdiv:ALLQQ (reg:ALLQQ 25) (reg:ALLQQ 22)))
533+ (clobber (reg:ALLQQ 25))
534+ (clobber (reg:QI 23))])
535+ (set (match_operand:ALLQQ 0 "register_operand" "") (reg:ALLQQ 24))]
536+ ""
537+ "")
538+
539+(define_insn "*<usdiv:code><ALLQQ:mode>3_call"
540+ [(set (reg:ALLQQ 24) (usdiv:ALLQQ (reg:ALLQQ 25) (reg:ALLQQ 22)))
541+ (clobber (reg:ALLQQ 25))
542+ (clobber (reg:QI 23))]
543+ ""
544+ "%~call __<usdiv:code><ALLQQ:mode>3"
545+ [(set_attr "type" "xcall")
546+ (set_attr "cc" "clobber")])
547+
548+(define_expand "<usdiv:code><ALLHQHA:mode>3"
549+ [(set (reg:ALLHQHA 26) (match_operand:ALLHQHA 1 "register_operand" ""))
550+ (set (reg:ALLHQHA 22) (match_operand:ALLHQHA 2 "register_operand" ""))
551+ (parallel [(set (reg:ALLHQHA 24) (usdiv:ALLHQHA (reg:ALLHQHA 26) (reg:ALLHQHA 22)))
552+ (clobber (reg:ALLHQHA 26))
553+ (clobber (reg:QI 21))])
554+ (set (match_operand:ALLHQHA 0 "register_operand" "") (reg:ALLHQHA 24))]
555+ ""
556+ "")
557+
558+(define_insn "*<usdiv:code><ALLHQHA:mode>3_call"
559+ [(set (reg:ALLHQHA 24) (usdiv:ALLHQHA (reg:ALLHQHA 26) (reg:ALLHQHA 22)))
560+ (clobber (reg:ALLHQHA 26))
561+ (clobber (reg:QI 21))]
562+ ""
563+ "%~call __<usdiv:code><ALLHQHA:mode>3"
564+ [(set_attr "type" "xcall")
565+ (set_attr "cc" "clobber")])
566+
567+; note the first parameter gets passed in already offset by 2 bytes
568+(define_expand "<usdiv:code><ALLSA:mode>3"
569+ [(set (reg:ALLSA 24) (match_operand:ALLSA 1 "register_operand" ""))
570+ (set (reg:ALLSA 18) (match_operand:ALLSA 2 "register_operand" ""))
571+ (parallel [(set (reg:ALLSA 22) (usdiv:ALLSA (reg:ALLSA 24) (reg:ALLSA 18)))
572+ (clobber (reg:HI 26))
573+ (clobber (reg:HI 30))])
574+ (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 22))]
575+ ""
576+ "")
577+
578+(define_insn "*<usdiv:code><ALLSA:mode>3_call"
579+ [(set (reg:ALLSA 22) (usdiv:ALLSA (reg:ALLSA 24) (reg:ALLSA 18)))
580+ (clobber (reg:HI 26))
581+ (clobber (reg:HI 30))]
582+ ""
583+ "%~call __<usdiv:code><ALLSA:mode>3"
584+ [(set_attr "type" "xcall")
585+ (set_attr "cc" "clobber")])
586+
587+
588+;; abs must be defined for fixed types for correct operation
589+
590+;; abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x)
591+
592+;; abs
593+
594+(define_insn "abs<ALLQQ:mode>2"
595+ [(set (match_operand:ALLQQ 0 "register_operand" "=r")
596+ (abs:ALLQQ (match_operand:ALLQQ 1 "register_operand" "0")))]
597+ ""
598+ "sbrc %0,7
599+ neg %0"
600+ [(set_attr "length" "2")
601+ (set_attr "cc" "clobber")])
6ef8d480
PZ
602diff -Naurp gcc/config/avr/avr.md gcc/config/avr/avr.md
603--- gcc/config/avr/avr.md 2011-10-27 16:45:17.000000000 +0530
604+++ gcc/config/avr/avr.md 2011-10-27 16:55:55.000000000 +0530
605@@ -65,6 +65,15 @@
dbe7ab63 606 (include "predicates.md")
607 (include "constraints.md")
608
609+; fixed-point instructions.
610+(include "avr-fixed.md")
611+(define_mode_iterator ALLQ [(QI "") (QQ "") (UQQ "")])
612+(define_mode_iterator ALLH [(HI "") (HQ "") (UHQ "") (HA "") (UHA "")])
613+(define_mode_iterator ALLS [(SI "") (SA "") (USA "")])
614+(define_mode_iterator ALLQS [(QI "") (QQ "") (UQQ "")
615+ (HI "") (HQ "") (UHQ "") (HA "") (UHA "")
616+ (SI "") (SA "") (USA "")])
617+
618 ;; Condition code settings.
619 (define_attr "cc" "none,set_czn,set_zn,set_n,compare,clobber"
620 (const_string "none"))
6ef8d480
PZ
621@@ -179,28 +188,27 @@
622 DONE;
dbe7ab63 623 })
624
6ef8d480 625-
dbe7ab63 626-(define_insn "*pushqi"
6ef8d480 627- [(set (mem:QI (post_dec:HI (reg:HI REG_SP)))
dbe7ab63 628- (match_operand:QI 0 "reg_or_0_operand" "r,L"))]
629+(define_insn "*push<ALLQ:mode>"
6ef8d480
PZ
630+ [(set (mem:ALLQ (post_dec:HI (reg:HI REG_SP)))
631+ (match_operand:ALLQ 0 "reg_or_0_operand" "r,L"))]
dbe7ab63 632 ""
633 "@
634 push %0
6ef8d480 635 push __zero_reg__"
dbe7ab63 636 [(set_attr "length" "1,1")])
637
dbe7ab63 638-(define_insn "*pushhi"
6ef8d480 639- [(set (mem:HI (post_dec:HI (reg:HI REG_SP)))
dbe7ab63 640- (match_operand:HI 0 "reg_or_0_operand" "r,L"))]
641+(define_insn "*push<ALLH:mode>"
6ef8d480
PZ
642+ [(set (mem:ALLH (post_dec:HI (reg:HI REG_SP)))
643+ (match_operand:ALLH 0 "reg_or_0_operand" "r,L"))]
dbe7ab63 644 ""
645 "@
646 push %B0\;push %A0
647 push __zero_reg__\;push __zero_reg__"
648 [(set_attr "length" "2,2")])
649
650-(define_insn "*pushsi"
6ef8d480 651- [(set (mem:SI (post_dec:HI (reg:HI REG_SP)))
dbe7ab63 652- (match_operand:SI 0 "reg_or_0_operand" "r,L"))]
653+(define_insn "*push<ALLS:mode>"
6ef8d480
PZ
654+ [(set (mem:ALLS (post_dec:HI (reg:HI REG_SP)))
655+ (match_operand:ALLS 0 "reg_or_0_operand" "r,L"))]
dbe7ab63 656 ""
657 "@
658 push %D0\;push %C0\;push %B0\;push %A0
6ef8d480 659@@ -226,21 +234,21 @@
dbe7ab63 660 ;; are call-saved registers, and most of LD_REGS are call-used registers,
661 ;; so this may still be a win for registers live across function calls.
662
663-(define_expand "movqi"
664- [(set (match_operand:QI 0 "nonimmediate_operand" "")
665- (match_operand:QI 1 "general_operand" ""))]
666+(define_expand "mov<ALLQ:mode>"
667+ [(set (match_operand:ALLQ 0 "nonimmediate_operand" "")
668+ (match_operand:ALLQ 1 "general_operand" ""))]
669 ""
670 "/* One of the ops has to be in a register. */
671- if (!register_operand(operand0, QImode)
672- && ! (register_operand(operand1, QImode) || const0_rtx == operand1))
673- operands[1] = copy_to_mode_reg(QImode, operand1);
674+ if (!register_operand(operand0, <ALLQ:MODE>mode)
675+ && ! (register_operand(operand1, <ALLQ:MODE>mode) || const0_rtx == operand1))
676+ operands[1] = copy_to_mode_reg(<ALLQ:MODE>mode, operand1);
677 ")
678
679-(define_insn "*movqi"
680- [(set (match_operand:QI 0 "nonimmediate_operand" "=r,d,Qm,r,q,r,*r")
681- (match_operand:QI 1 "general_operand" "rL,i,rL,Qm,r,q,i"))]
682- "(register_operand (operands[0],QImode)
683- || register_operand (operands[1], QImode) || const0_rtx == operands[1])"
684+(define_insn "*mov<ALLQ:mode>"
685+ [(set (match_operand:ALLQ 0 "nonimmediate_operand" "=r,d,Qm,r,q,r,*r")
686+ (match_operand:ALLQ 1 "general_operand" "r,i,rL,Qm,r,q,i"))]
687+ "(register_operand (operands[0],<ALLQ:MODE>mode)
688+ || register_operand (operands[1], <ALLQ:MODE>mode) || const0_rtx == operands[1])"
689 "* return output_movqi (insn, operands, NULL);"
690 [(set_attr "length" "1,1,5,5,1,1,4")
691 (set_attr "cc" "none,none,clobber,clobber,none,none,clobber")])
6ef8d480 692@@ -272,17 +280,17 @@
dbe7ab63 693 ;;============================================================================
694 ;; move word (16 bit)
695
696-(define_expand "movhi"
697- [(set (match_operand:HI 0 "nonimmediate_operand" "")
698- (match_operand:HI 1 "general_operand" ""))]
699+(define_expand "mov<ALLH:mode>"
700+ [(set (match_operand:ALLH 0 "nonimmediate_operand" "")
701+ (match_operand:ALLH 1 "general_operand" ""))]
702 ""
703 "
704 {
705 /* One of the ops has to be in a register. */
706- if (!register_operand(operand0, HImode)
707- && !(register_operand(operand1, HImode) || const0_rtx == operands[1]))
708+ if (!register_operand(operand0, <ALLH:MODE>mode)
709+ && !(register_operand(operand1, <ALLH:MODE>mode) || const0_rtx == operands[1]))
710 {
711- operands[1] = copy_to_mode_reg(HImode, operand1);
712+ operands[1] = copy_to_mode_reg(<ALLH:MODE>mode, operand1);
713 }
714 }")
715
6ef8d480 716@@ -337,20 +345,20 @@
dbe7ab63 717 [(set_attr "length" "4")
718 (set_attr "cc" "none")])
719
720-(define_insn "*movhi"
721- [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,d,*r,q,r")
722- (match_operand:HI 1 "general_operand" "rL,m,rL,i,i,r,q"))]
723- "(register_operand (operands[0],HImode)
724- || register_operand (operands[1],HImode) || const0_rtx == operands[1])"
725+(define_insn "*mov<ALLH:mode>"
726+ [(set (match_operand:ALLH 0 "nonimmediate_operand" "=r,r,m,d,*r,q,r")
727+ (match_operand:ALLH 1 "general_operand" "r,m,rL,i,i,r,q"))]
728+ "(register_operand (operands[0],<ALLH:MODE>mode)
729+ || register_operand (operands[1],<ALLH:MODE>mode) || const0_rtx == operands[1])"
730 "* return output_movhi (insn, operands, NULL);"
731 [(set_attr "length" "2,6,7,2,6,5,2")
732 (set_attr "cc" "none,clobber,clobber,none,clobber,none,none")])
733
734 (define_peephole2 ; movw
735- [(set (match_operand:QI 0 "even_register_operand" "")
736- (match_operand:QI 1 "even_register_operand" ""))
737- (set (match_operand:QI 2 "odd_register_operand" "")
738- (match_operand:QI 3 "odd_register_operand" ""))]
739+ [(set (match_operand:ALLQ 0 "even_register_operand" "")
740+ (match_operand:ALLQ 1 "even_register_operand" ""))
741+ (set (match_operand:ALLQ 2 "odd_register_operand" "")
742+ (match_operand:ALLQ 3 "odd_register_operand" ""))]
743 "(AVR_HAVE_MOVW
744 && REGNO (operands[0]) == REGNO (operands[2]) - 1
745 && REGNO (operands[1]) == REGNO (operands[3]) - 1)"
6ef8d480 746@@ -361,10 +369,10 @@
dbe7ab63 747 })
748
749 (define_peephole2 ; movw_r
750- [(set (match_operand:QI 0 "odd_register_operand" "")
751- (match_operand:QI 1 "odd_register_operand" ""))
752- (set (match_operand:QI 2 "even_register_operand" "")
753- (match_operand:QI 3 "even_register_operand" ""))]
754+ [(set (match_operand:ALLQ 0 "odd_register_operand" "")
755+ (match_operand:ALLQ 1 "odd_register_operand" ""))
756+ (set (match_operand:ALLQ 2 "even_register_operand" "")
757+ (match_operand:ALLQ 3 "even_register_operand" ""))]
758 "(AVR_HAVE_MOVW
759 && REGNO (operands[2]) == REGNO (operands[0]) - 1
760 && REGNO (operands[3]) == REGNO (operands[1]) - 1)"
6ef8d480 761@@ -377,26 +385,24 @@
dbe7ab63 762 ;;==========================================================================
763 ;; move double word (32 bit)
764
765-(define_expand "movsi"
766- [(set (match_operand:SI 0 "nonimmediate_operand" "")
767- (match_operand:SI 1 "general_operand" ""))]
768+(define_expand "mov<ALLS:mode>"
769+ [(set (match_operand:ALLS 0 "nonimmediate_operand" "")
770+ (match_operand:ALLS 1 "general_operand" ""))]
771 ""
772 "
773 {
774 /* One of the ops has to be in a register. */
775- if (!register_operand (operand0, SImode)
776- && !(register_operand (operand1, SImode) || const0_rtx == operand1))
777+ if (!register_operand (operand0, <ALLS:MODE>mode)
778+ && !(register_operand (operand1, <ALLS:MODE>mode) || const0_rtx == operand1))
779 {
780- operands[1] = copy_to_mode_reg (SImode, operand1);
781+ operands[1] = copy_to_mode_reg (<ALLS:MODE>mode, operand1);
782 }
783 }")
784
785-
786-
787 (define_peephole2 ; movsi_lreg_const
788 [(match_scratch:QI 2 "d")
789- (set (match_operand:SI 0 "l_register_operand" "")
790- (match_operand:SI 1 "immediate_operand" ""))
791+ (set (match_operand:ALLS 0 "l_register_operand" "")
792+ (match_operand:ALLS 1 "immediate_operand" ""))
793 (match_dup 2)]
794 "(operands[1] != const0_rtx
795 && operands[1] != constm1_rtx)"
6ef8d480 796@@ -406,8 +412,8 @@
dbe7ab63 797
798 ;; '*' because it is not used in rtl generation.
799 (define_insn "*reload_insi"
800- [(set (match_operand:SI 0 "register_operand" "=r")
801- (match_operand:SI 1 "immediate_operand" "i"))
802+ [(set (match_operand:ALLS 0 "register_operand" "=r")
803+ (match_operand:ALLS 1 "immediate_operand" "i"))
804 (clobber (match_operand:QI 2 "register_operand" "=&d"))]
805 "reload_completed"
806 "* return output_reload_insisf (insn, operands, NULL);"
6ef8d480 807@@ -415,11 +421,11 @@
dbe7ab63 808 (set_attr "cc" "none")])
809
810
811-(define_insn "*movsi"
812- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,Qm,!d,r")
813- (match_operand:SI 1 "general_operand" "r,L,Qm,rL,i,i"))]
814- "(register_operand (operands[0],SImode)
815- || register_operand (operands[1],SImode) || const0_rtx == operands[1])"
816+(define_insn "*mov<ALLS:mode>"
817+ [(set (match_operand:ALLS 0 "nonimmediate_operand" "=r,r,r,Qm,!d,r")
818+ (match_operand:ALLS 1 "general_operand" "r,L,Qm,rL,i,i"))]
819+ "(register_operand (operands[0],<ALLS:MODE>mode)
820+ || register_operand (operands[1],<ALLS:MODE>mode) || const0_rtx == operands[1])"
821 "* return output_movsisf (insn, operands, NULL);"
822 [(set_attr "length" "4,4,8,9,4,10")
823 (set_attr "cc" "none,set_zn,clobber,clobber,none,clobber")])
6ef8d480 824@@ -956,23 +962,54 @@
dbe7ab63 825 [(set_attr "type" "xcall")
826 (set_attr "cc" "clobber")])
827
828-(define_insn "mulqihi3"
829+;; Define code iterators
830+(define_code_iterator any_extend [sign_extend zero_extend])
831+(define_code_attr s [(sign_extend "s") (zero_extend "")])
832+(define_code_attr u [(sign_extend "") (zero_extend "u")])
833+(define_code_attr su [(sign_extend "s") (zero_extend "u")])
834+
835+(define_insn "<any_extend:su>mulqi3_highpart"
836+ [(set (match_operand:QI 0 "register_operand" "=r")
837+ (truncate:QI
838+ (lshiftrt:HI
839+ (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "d"))
840+ (any_extend:HI (match_operand:QI 2 "register_operand" "d")))
841+ (const_int 8))))]
842+ "AVR_HAVE_MUL && !optimize_size"
843+ "mul<any_extend:s> %1,%2
844+ mov %0,r1
845+ clr r1"
846+ [(set_attr "length" "3")
847+ (set_attr "cc" "clobber")])
848+
849+(define_insn "<any_extend:u>mulqihi3"
850 [(set (match_operand:HI 0 "register_operand" "=r")
851- (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "d"))
852- (sign_extend:HI (match_operand:QI 2 "register_operand" "d"))))]
853+ (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "d"))
854+ (any_extend:HI (match_operand:QI 2 "register_operand" "d"))))]
855 "AVR_HAVE_MUL"
856- "muls %1,%2
857+ "mul<any_extend:s> %1,%2
858 movw %0,r0
859 clr r1"
860 [(set_attr "length" "3")
861 (set_attr "cc" "clobber")])
862
863-(define_insn "umulqihi3"
864+(define_insn "*sumulqihi3"
865 [(set (match_operand:HI 0 "register_operand" "=r")
866- (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))
867- (zero_extend:HI (match_operand:QI 2 "register_operand" "r"))))]
868+ (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "a"))
869+ (zero_extend:HI (match_operand:QI 2 "register_operand" "a"))))]
870 "AVR_HAVE_MUL"
871- "mul %1,%2
872+ "mulsu %1,%2
873+ movw %0,r0
874+ clr r1"
875+ [(set_attr "length" "3")
876+ (set_attr "cc" "clobber")])
877+
878+(define_insn "*usmulqihi3"
879+ [(set (match_operand:HI 0 "register_operand" "=r")
880+ (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "a"))
881+ (sign_extend:HI (match_operand:QI 2 "register_operand" "a"))))]
882+ "AVR_HAVE_MUL"
883+ "mulsu %2,%1
884 movw %0,r0
885 clr r1"
886 [(set_attr "length" "3")
6ef8d480 887@@ -1026,6 +1063,50 @@
dbe7ab63 888 [(set_attr "type" "xcall")
889 (set_attr "cc" "clobber")])
890
891+(define_expand "<any_extend:u>mulhisi3"
892+ [(set (reg:HI 18) (match_operand:SI 1 "register_operand" ""))
893+ (set (reg:HI 20) (match_operand:SI 2 "register_operand" ""))
894+ (set (reg:SI 22)
895+ (mult:SI (any_extend:SI (reg:HI 18))
896+ (any_extend:SI (reg:HI 20))))
897+ (set (match_operand:SI 0 "register_operand" "") (reg:SI 22))]
898+ "!optimize_size"
899+ "")
900+
901+(define_insn "*<any_extend:u>mulhisi3_call"
902+ [(set (reg:SI 22)
903+ (mult:SI (any_extend:SI (reg:HI 18))
904+ (any_extend:SI (reg:HI 20))))]
905+ "!optimize_size"
906+ "%~call __<any_extend:u>mulhisi3"
907+ [(set_attr "type" "xcall")
908+ (set_attr "cc" "clobber")])
909+
910+(define_expand "<any_extend:su>mulhi3_highpart"
911+ [(set (reg:HI 18) (match_operand:HI 1 "register_operand" ""))
912+ (set (reg:HI 20) (match_operand:HI 2 "register_operand" ""))
913+ (set (reg:HI 24) (truncate:HI (lshiftrt:SI
914+ (mult:SI (any_extend:SI (reg:HI 18))
915+ (any_extend:SI (reg:HI 20)))
916+ (const_int 16))))
917+ (set (match_operand:SI 0 "register_operand" "") (reg:HI 24))]
918+ "AVR_HAVE_MUL"
919+ "")
920+
921+(define_insn_and_split "*<any_extend:su>mulhi3_highpart_call"
922+ [(set (reg:HI 24) (truncate:HI (lshiftrt:SI
923+ (mult:SI (any_extend:SI (reg:HI 18))
924+ (any_extend:SI (reg:HI 20)))
925+ (const_int 16))))]
926+ "AVR_HAVE_MUL"
927+ ""
928+ ""
929+ [(set (reg:SI 22)
930+ (mult:SI (any_extend:SI (reg:HI 18))
931+ (any_extend:SI (reg:HI 20))))
932+ (clobber (reg:HI 22))]
933+ "")
934+
935 ;; Operand 2 (reg:SI 18) not clobbered on the enhanced core.
936 ;; All call-used registers clobbered otherwise - normal library call.
937 (define_expand "mulsi3"
6ef8d480 938@@ -1574,9 +1655,9 @@
dbe7ab63 939 ;;<< << << << << << << << << << << << << << << << << << << << << << << << << <<
940 ;; arithmetic shift left
941
942-(define_expand "ashlqi3"
943- [(set (match_operand:QI 0 "register_operand" "")
944- (ashift:QI (match_operand:QI 1 "register_operand" "")
945+(define_expand "ashl<ALLQ:mode>3"
946+ [(set (match_operand:ALLQ 0 "register_operand" "")
947+ (ashift:ALLQ (match_operand:ALLQ 1 "register_operand" "")
948 (match_operand:QI 2 "general_operand" "")))]
949 ""
950 "")
6ef8d480 951@@ -1610,27 +1691,27 @@
dbe7ab63 952 (set (match_dup 0) (and:QI (match_dup 0) (const_int -64)))]
953 "")
954
955-(define_insn "*ashlqi3"
956- [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,!d,r,r")
957- (ashift:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0,0")
958+(define_insn "*ashl<ALLQ:mode>3"
959+ [(set (match_operand:ALLQ 0 "register_operand" "=r,r,r,r,!d,r,r")
960+ (ashift:ALLQ (match_operand:ALLQ 1 "register_operand" "0,0,0,0,0,0,0")
961 (match_operand:QI 2 "general_operand" "r,L,P,K,n,n,Qm")))]
962 ""
963 "* return ashlqi3_out (insn, operands, NULL);"
964 [(set_attr "length" "5,0,1,2,4,6,9")
965 (set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,set_czn,clobber")])
966
967-(define_insn "ashlhi3"
968- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
969- (ashift:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
970+(define_insn "ashl<ALLH:mode>3"
971+ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r,r,r")
972+ (ashift:ALLH (match_operand:ALLH 1 "register_operand" "0,0,0,r,0,0,0")
973 (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
974 ""
975 "* return ashlhi3_out (insn, operands, NULL);"
976 [(set_attr "length" "6,0,2,2,4,10,10")
977 (set_attr "cc" "clobber,none,set_n,clobber,set_n,clobber,clobber")])
978
979-(define_insn "ashlsi3"
980- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
981- (ashift:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0,0,0")
982+(define_insn "ashl<ALLS:mode>3"
983+ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r,r,r,r")
984+ (ashift:ALLS (match_operand:ALLS 1 "register_operand" "0,0,0,r,0,0,0")
985 (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
986 ""
987 "* return ashlsi3_out (insn, operands, NULL);"
6ef8d480 988@@ -1676,17 +1757,17 @@
dbe7ab63 989
990 (define_peephole2
991 [(match_scratch:QI 3 "d")
992- (set (match_operand:HI 0 "register_operand" "")
993- (ashift:HI (match_operand:HI 1 "register_operand" "")
994+ (set (match_operand:ALLH 0 "register_operand" "")
995+ (ashift:ALLH (match_operand:ALLH 1 "register_operand" "")
996 (match_operand:QI 2 "const_int_operand" "")))]
997 ""
998- [(parallel [(set (match_dup 0) (ashift:HI (match_dup 1) (match_dup 2)))
999+ [(parallel [(set (match_dup 0) (ashift:ALLH (match_dup 1) (match_dup 2)))
1000 (clobber (match_dup 3))])]
1001 "")
1002
1003-(define_insn "*ashlhi3_const"
1004- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
1005- (ashift:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
1006+(define_insn "*ashl<ALLH:mode>3_const"
1007+ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r")
1008+ (ashift:ALLH (match_operand:ALLH 1 "register_operand" "0,0,r,0,0")
1009 (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
1010 (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
1011 "reload_completed"
6ef8d480 1012@@ -1696,17 +1777,17 @@
dbe7ab63 1013
1014 (define_peephole2
1015 [(match_scratch:QI 3 "d")
1016- (set (match_operand:SI 0 "register_operand" "")
1017- (ashift:SI (match_operand:SI 1 "register_operand" "")
1018+ (set (match_operand:ALLS 0 "register_operand" "")
1019+ (ashift:ALLS (match_operand:ALLS 1 "register_operand" "")
1020 (match_operand:QI 2 "const_int_operand" "")))]
1021 ""
1022- [(parallel [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
1023+ [(parallel [(set (match_dup 0) (ashift:ALLS (match_dup 1) (match_dup 2)))
1024 (clobber (match_dup 3))])]
1025 "")
1026
1027-(define_insn "*ashlsi3_const"
1028- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
1029- (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
1030+(define_insn "*ashl<ALLS:mode>3_const"
1031+ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r")
1032+ (ashift:ALLS (match_operand:ALLS 1 "register_operand" "0,0,r,0")
1033 (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
1034 (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
1035 "reload_completed"
6ef8d480 1036@@ -1717,27 +1798,27 @@
dbe7ab63 1037 ;; >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
1038 ;; arithmetic shift right
1039
1040-(define_insn "ashrqi3"
1041- [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,r,r")
1042- (ashiftrt:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0")
1043+(define_insn "ashr<ALLQ:mode>3"
1044+ [(set (match_operand:ALLQ 0 "register_operand" "=r,r,r,r,r,r")
1045+ (ashiftrt:ALLQ (match_operand:ALLQ 1 "register_operand" "0,0,0,0,0,0")
1046 (match_operand:QI 2 "general_operand" "r,L,P,K,n,Qm")))]
1047 ""
1048 "* return ashrqi3_out (insn, operands, NULL);"
1049 [(set_attr "length" "5,0,1,2,5,9")
1050 (set_attr "cc" "clobber,none,clobber,clobber,clobber,clobber")])
1051
1052-(define_insn "ashrhi3"
1053- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
1054- (ashiftrt:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
1055+(define_insn "ashr<ALLH:mode>3"
1056+ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r,r,r")
1057+ (ashiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,0,r,0,0,0")
1058 (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
1059 ""
1060 "* return ashrhi3_out (insn, operands, NULL);"
1061 [(set_attr "length" "6,0,2,4,4,10,10")
1062 (set_attr "cc" "clobber,none,clobber,set_n,clobber,clobber,clobber")])
1063
1064-(define_insn "ashrsi3"
1065- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
1066- (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0,0,0")
1067+(define_insn "ashr<ALLS:mode>3"
1068+ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r,r,r,r")
1069+ (ashiftrt:ALLS (match_operand:ALLS 1 "register_operand" "0,0,0,r,0,0,0")
1070 (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
1071 ""
1072 "* return ashrsi3_out (insn, operands, NULL);"
6ef8d480 1073@@ -1748,17 +1829,17 @@
dbe7ab63 1074
1075 (define_peephole2
1076 [(match_scratch:QI 3 "d")
1077- (set (match_operand:HI 0 "register_operand" "")
1078- (ashiftrt:HI (match_operand:HI 1 "register_operand" "")
1079+ (set (match_operand:ALLH 0 "register_operand" "")
1080+ (ashiftrt:ALLH (match_operand:ALLH 1 "register_operand" "")
1081 (match_operand:QI 2 "const_int_operand" "")))]
1082 ""
1083- [(parallel [(set (match_dup 0) (ashiftrt:HI (match_dup 1) (match_dup 2)))
1084+ [(parallel [(set (match_dup 0) (ashiftrt:ALLH (match_dup 1) (match_dup 2)))
1085 (clobber (match_dup 3))])]
1086 "")
1087
1088 (define_insn "*ashrhi3_const"
1089- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
1090- (ashiftrt:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
1091+ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r")
1092+ (ashiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,r,0,0")
1093 (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
1094 (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
1095 "reload_completed"
6ef8d480 1096@@ -1768,17 +1849,17 @@
dbe7ab63 1097
1098 (define_peephole2
1099 [(match_scratch:QI 3 "d")
1100- (set (match_operand:SI 0 "register_operand" "")
1101- (ashiftrt:SI (match_operand:SI 1 "register_operand" "")
1102+ (set (match_operand:ALLS 0 "register_operand" "")
1103+ (ashiftrt:ALLS (match_operand:ALLS 1 "register_operand" "")
1104 (match_operand:QI 2 "const_int_operand" "")))]
1105 ""
1106- [(parallel [(set (match_dup 0) (ashiftrt:SI (match_dup 1) (match_dup 2)))
1107+ [(parallel [(set (match_dup 0) (ashiftrt:ALLS (match_dup 1) (match_dup 2)))
1108 (clobber (match_dup 3))])]
1109 "")
1110
1111 (define_insn "*ashrsi3_const"
1112- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
1113- (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
1114+ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r")
1115+ (ashiftrt:ALLS (match_operand:ALLS 1 "register_operand" "0,0,r,0")
1116 (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
1117 (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
1118 "reload_completed"
6ef8d480 1119@@ -1789,54 +1870,54 @@
dbe7ab63 1120 ;; >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
1121 ;; logical shift right
1122
1123-(define_expand "lshrqi3"
1124- [(set (match_operand:QI 0 "register_operand" "")
1125- (lshiftrt:QI (match_operand:QI 1 "register_operand" "")
1126- (match_operand:QI 2 "general_operand" "")))]
1127+(define_expand "lshr<ALLQ:mode>3"
1128+ [(set (match_operand:ALLQ 0 "register_operand" "")
1129+ (lshiftrt:ALLQ (match_operand:ALLQ 1 "register_operand" "")
1130+ (match_operand:ALLQ 2 "general_operand" "")))]
1131 ""
1132 "")
1133
1134 (define_split ; lshrqi3_const4
1135- [(set (match_operand:QI 0 "d_register_operand" "")
1136- (lshiftrt:QI (match_dup 0)
1137+ [(set (match_operand:ALLQ 0 "d_register_operand" "")
1138+ (lshiftrt:ALLQ (match_dup 0)
1139 (const_int 4)))]
1140 ""
1141- [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
1142- (set (match_dup 0) (and:QI (match_dup 0) (const_int 15)))]
1143+ [(set (match_dup 0) (rotate:ALLQ (match_dup 0) (const_int 4)))
1144+ (set (match_dup 0) (and:ALLQ (match_dup 0) (const_int 15)))]
1145 "")
1146
1147 (define_split ; lshrqi3_const5
1148- [(set (match_operand:QI 0 "d_register_operand" "")
1149- (lshiftrt:QI (match_dup 0)
1150+ [(set (match_operand:ALLQ 0 "d_register_operand" "")
1151+ (lshiftrt:ALLQ (match_dup 0)
1152 (const_int 5)))]
1153 ""
1154- [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
1155- (set (match_dup 0) (lshiftrt:QI (match_dup 0) (const_int 1)))
1156- (set (match_dup 0) (and:QI (match_dup 0) (const_int 7)))]
1157+ [(set (match_dup 0) (rotate:ALLQ (match_dup 0) (const_int 4)))
1158+ (set (match_dup 0) (lshiftrt:ALLQ (match_dup 0) (const_int 1)))
1159+ (set (match_dup 0) (and:ALLQ (match_dup 0) (const_int 7)))]
1160 "")
1161
1162 (define_split ; lshrqi3_const6
1163- [(set (match_operand:QI 0 "d_register_operand" "")
1164- (lshiftrt:QI (match_dup 0)
1165+ [(set (match_operand:ALLQ 0 "d_register_operand" "")
1166+ (lshiftrt:ALLQ (match_dup 0)
1167 (const_int 6)))]
1168 ""
1169- [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
1170- (set (match_dup 0) (lshiftrt:QI (match_dup 0) (const_int 2)))
1171- (set (match_dup 0) (and:QI (match_dup 0) (const_int 3)))]
1172+ [(set (match_dup 0) (rotate:ALLQ (match_dup 0) (const_int 4)))
1173+ (set (match_dup 0) (lshiftrt:ALLQ (match_dup 0) (const_int 2)))
1174+ (set (match_dup 0) (and:ALLQ (match_dup 0) (const_int 3)))]
1175 "")
1176
1177 (define_insn "*lshrqi3"
1178- [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,!d,r,r")
1179- (lshiftrt:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0,0")
1180- (match_operand:QI 2 "general_operand" "r,L,P,K,n,n,Qm")))]
1181+ [(set (match_operand:ALLQ 0 "register_operand" "=r,r,r,r,!d,r,r")
1182+ (lshiftrt:ALLQ (match_operand:ALLQ 1 "register_operand" "0,0,0,0,0,0,0")
1183+ (match_operand:ALLQ 2 "general_operand" "r,L,P,K,n,n,Qm")))]
1184 ""
1185 "* return lshrqi3_out (insn, operands, NULL);"
1186 [(set_attr "length" "5,0,1,2,4,6,9")
1187 (set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,set_czn,clobber")])
1188
1189-(define_insn "lshrhi3"
1190- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
1191- (lshiftrt:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
1192+(define_insn "lshr<ALLH:mode>3"
1193+ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r,r,r")
1194+ (lshiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,0,r,0,0,0")
1195 (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
1196 ""
1197 "* return lshrhi3_out (insn, operands, NULL);"
6ef8d480 1198@@ -1891,17 +1972,17 @@
dbe7ab63 1199
1200 (define_peephole2
1201 [(match_scratch:QI 3 "d")
1202- (set (match_operand:HI 0 "register_operand" "")
1203- (lshiftrt:HI (match_operand:HI 1 "register_operand" "")
1204+ (set (match_operand:ALLH 0 "register_operand" "")
1205+ (lshiftrt:ALLH (match_operand:ALLH 1 "register_operand" "")
1206 (match_operand:QI 2 "const_int_operand" "")))]
1207 ""
1208- [(parallel [(set (match_dup 0) (lshiftrt:HI (match_dup 1) (match_dup 2)))
1209+ [(parallel [(set (match_dup 0) (lshiftrt:ALLH (match_dup 1) (match_dup 2)))
1210 (clobber (match_dup 3))])]
1211 "")
1212
1213-(define_insn "*lshrhi3_const"
1214- [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
1215- (lshiftrt:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
1216+(define_insn "*lshr<ALLH:mode>3_const"
1217+ [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r")
1218+ (lshiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,r,0,0")
1219 (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
1220 (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
1221 "reload_completed"
6ef8d480 1222@@ -1919,9 +2000,9 @@
dbe7ab63 1223 (clobber (match_dup 3))])]
1224 "")
1225
1226-(define_insn "*lshrsi3_const"
1227- [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
1228- (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
1229+(define_insn "*lshr<ALLS:mode>3_const"
1230+ [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r")
1231+ (lshiftrt:ALLS (match_operand:ALLS 1 "register_operand" "0,0,r,0")
1232 (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
1233 (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
1234 "reload_completed"
6ef8d480 1235@@ -2171,27 +2252,27 @@
dbe7ab63 1236 ;; compare
1237
1238 ; Optimize negated tests into reverse compare if overflow is undefined.
1239-(define_insn "*negated_tstqi"
1240+(define_insn "*negated_tst<ALLQ:mode>"
1241 [(set (cc0)
1242- (compare (neg:QI (match_operand:QI 0 "register_operand" "r"))
1243+ (compare (neg:ALLQ (match_operand:ALLQ 0 "register_operand" "r"))
1244 (const_int 0)))]
1245 "(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
1246 "cp __zero_reg__,%0"
1247 [(set_attr "cc" "compare")
1248 (set_attr "length" "1")])
1249
1250-(define_insn "*reversed_tstqi"
1251+(define_insn "*reversed_tst<ALLQ:mode>"
1252 [(set (cc0)
1253 (compare (const_int 0)
1254- (match_operand:QI 0 "register_operand" "r")))]
1255+ (match_operand:ALLQ 0 "register_operand" "r")))]
1256 ""
1257 "cp __zero_reg__,%0"
1258 [(set_attr "cc" "compare")
1259 (set_attr "length" "2")])
1260
1261-(define_insn "*negated_tsthi"
1262+(define_insn "*negated_tst<ALLH:mode>"
1263 [(set (cc0)
1264- (compare (neg:HI (match_operand:HI 0 "register_operand" "r"))
1265+ (compare (neg:ALLH (match_operand:ALLH 0 "register_operand" "r"))
1266 (const_int 0)))]
1267 "(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
1268 "cp __zero_reg__,%A0
6ef8d480 1269@@ -2201,10 +2282,10 @@
dbe7ab63 1270
1271 ;; Leave here the clobber used by the cmphi pattern for simplicity, even
1272 ;; though it is unused, because this pattern is synthesized by avr_reorg.
1273-(define_insn "*reversed_tsthi"
1274+(define_insn "*reversed_tst<ALLH:mode>"
1275 [(set (cc0)
1276 (compare (const_int 0)
1277- (match_operand:HI 0 "register_operand" "r")))
1278+ (match_operand:ALLH 0 "register_operand" "r")))
1279 (clobber (match_scratch:QI 1 "=X"))]
1280 ""
1281 "cp __zero_reg__,%A0
6ef8d480 1282@@ -2212,9 +2293,9 @@
dbe7ab63 1283 [(set_attr "cc" "compare")
1284 (set_attr "length" "2")])
1285
1286-(define_insn "*negated_tstsi"
1287+(define_insn "*negated_tst<ALLS:mode>"
1288 [(set (cc0)
1289- (compare (neg:SI (match_operand:SI 0 "register_operand" "r"))
1290+ (compare (neg:ALLS (match_operand:ALLS 0 "register_operand" "r"))
1291 (const_int 0)))]
1292 "(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
1293 "cp __zero_reg__,%A0
6ef8d480 1294@@ -2224,10 +2305,10 @@
dbe7ab63 1295 [(set_attr "cc" "compare")
1296 (set_attr "length" "4")])
1297
1298-(define_insn "*reversed_tstsi"
1299+(define_insn "*reversed_tst<ALLS:mode>"
1300 [(set (cc0)
1301 (compare (const_int 0)
1302- (match_operand:SI 0 "register_operand" "r")))
1303+ (match_operand:ALLS 0 "register_operand" "r")))
1304 (clobber (match_scratch:QI 1 "=X"))]
1305 ""
1306 "cp __zero_reg__,%A0
6ef8d480 1307@@ -2238,10 +2319,10 @@
dbe7ab63 1308 (set_attr "length" "4")])
1309
1310
1311-(define_insn "*cmpqi"
1312+(define_insn "*cmp<ALLQ:mode>"
1313 [(set (cc0)
1314- (compare (match_operand:QI 0 "register_operand" "r,r,d")
1315- (match_operand:QI 1 "nonmemory_operand" "L,r,i")))]
1316+ (compare (match_operand:ALLQ 0 "register_operand" "r,r,d")
1317+ (match_operand:ALLQ 1 "nonmemory_operand" "L,r,i")))]
1318 ""
1319 "@
1320 tst %0
6ef8d480 1321@@ -2260,10 +2341,10 @@
dbe7ab63 1322 [(set_attr "cc" "compare")
1323 (set_attr "length" "1")])
1324
1325-(define_insn "*cmphi"
1326+(define_insn "*cmp<ALLH:mode>"
1327 [(set (cc0)
1328- (compare (match_operand:HI 0 "register_operand" "!w,r,r,d,d,r,r")
1329- (match_operand:HI 1 "nonmemory_operand" "L,L,r,M,i,M,i")))
1330+ (compare (match_operand:ALLH 0 "register_operand" "!w,r,r,d,d,r,r")
1331+ (match_operand:ALLH 1 "nonmemory_operand" "L,L,r,M,i,M,i")))
1332 (clobber (match_scratch:QI 2 "=X,X,X,X,&d,&d,&d"))]
1333 ""
1334 "*{
6ef8d480 1335@@ -2308,10 +2389,10 @@
dbe7ab63 1336 (set_attr "length" "1,2,2,2,3,3,4")])
1337
1338
1339-(define_insn "*cmpsi"
1340+(define_insn "*cmp<ALLS:mode>"
1341 [(set (cc0)
1342- (compare (match_operand:SI 0 "register_operand" "r,r,d,d,r,r")
1343- (match_operand:SI 1 "nonmemory_operand" "L,r,M,i,M,i")))
1344+ (compare (match_operand:ALLS 0 "register_operand" "r,r,d,d,r,r")
1345+ (match_operand:ALLS 1 "nonmemory_operand" "L,r,M,i,M,i")))
1346 (clobber (match_scratch:QI 2 "=X,X,X,&d,&d,&d"))]
1347 ""
1348 "*{
6ef8d480
PZ
1349diff -Naurp gcc/config/avr/avr-modes.def gcc/config/avr/avr-modes.def
1350--- gcc/config/avr/avr-modes.def 1970-01-01 05:30:00.000000000 +0530
1351+++ gcc/config/avr/avr-modes.def 2011-10-27 16:55:55.000000000 +0530
dbe7ab63 1352@@ -0,0 +1,34 @@
1353+/* Definitions of target machine for GCC for AVR.
1354+ Copyright (C) 2009 Free Software Foundation, Inc.
1355+
1356+This file is part of GCC.
1357+
1358+GCC is free software; you can redistribute it and/or modify
1359+it under the terms of the GNU General Public License as published by
1360+the Free Software Foundation; either version 3, or (at your option)
1361+any later version.
1362+
1363+GCC is distributed in the hope that it will be useful,
1364+but WITHOUT ANY WARRANTY; without even the implied warranty of
1365+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1366+GNU General Public License for more details.
1367+
1368+You should have received a copy of the GNU General Public License
1369+along with GCC; see the file COPYING3. If not see
1370+<http://www.gnu.org/licenses/>. */
1371+
1372+/* On 8 bit machines it requires fewer instructions for fixed point
1373+ routines if the decimal place is on a byte boundary which is not
1374+ the default for signed accum types. */
1375+
1376+ADJUST_IBIT (HA, 7);
1377+ADJUST_FBIT (HA, 8);
1378+
1379+ADJUST_IBIT (SA, 15);
1380+ADJUST_FBIT (SA, 16);
1381+
1382+ADJUST_IBIT (DA, 31);
1383+ADJUST_FBIT (DA, 32);
1384+
1385+ADJUST_IBIT (TA, 63);
1386+ADJUST_FBIT (TA, 64);
6ef8d480
PZ
1387diff -Naurp gcc/config/avr/avr-protos.h gcc/config/avr/avr-protos.h
1388--- gcc/config/avr/avr-protos.h 2011-10-27 16:45:17.000000000 +0530
1389+++ gcc/config/avr/avr-protos.h 2011-10-27 16:55:55.000000000 +0530
1390@@ -75,6 +75,8 @@ extern const char *lshrhi3_out (rtx insn
dbe7ab63 1391 extern const char *lshrsi3_out (rtx insn, rtx operands[], int *len);
1392 extern bool avr_rotate_bytes (rtx operands[]);
1393
1394+extern const char *fract_out (rtx insn, rtx operands[], int intsigned, int *l);
1395+
1396 extern void expand_prologue (void);
1397 extern void expand_epilogue (void);
1398 extern int avr_epilogue_uses (int regno);
6ef8d480
PZ
1399diff -Naurp gcc/config/avr/libgcc-fixed.S gcc/config/avr/libgcc-fixed.S
1400--- gcc/config/avr/libgcc-fixed.S 1970-01-01 05:30:00.000000000 +0530
1401+++ gcc/config/avr/libgcc-fixed.S 2011-10-27 16:55:55.000000000 +0530
dbe7ab63 1402@@ -0,0 +1,1123 @@
1403+/* -*- Mode: Asm -*- */
1404+/* Copyright (C) 2009
1405+ Free Software Foundation, Inc.
1406+ Contributed by Sean D'Epagnier
1407+
1408+This file is free software; you can redistribute it and/or modify it
1409+under the terms of the GNU General Public License as published by the
1410+Free Software Foundation; either version 3, or (at your option) any
1411+later version.
1412+
1413+In addition to the permissions in the GNU General Public License, the
1414+Free Software Foundation gives you unlimited permission to link the
1415+compiled version of this file into combinations with other programs,
1416+and to distribute those combinations without any restriction coming
1417+from the use of this file. (The General Public License restrictions
1418+do apply in other respects; for example, they cover modification of
1419+the file, and distribution when not linked into a combine
1420+executable.)
1421+
1422+This file is distributed in the hope that it will be useful, but
1423+WITHOUT ANY WARRANTY; without even the implied warranty of
1424+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1425+General Public License for more details.
1426+
1427+You should have received a copy of the GNU General Public License
1428+along with this program; see the file COPYING. If not, write to
1429+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
1430+Boston, MA 02110-1301, USA. */
1431+
1432+/* Fixed point library routines for avr. */
1433+
1434+#define __zero_reg__ r1
1435+#define __tmp_reg__ r0
1436+#define __SREG__ 0x3f
1437+#define __SP_H__ 0x3e
1438+#define __SP_L__ 0x3d
1439+#define __RAMPZ__ 0x3B
1440+
1441+/* Conversions to float. */
1442+#if defined (L_fractqqsf)
1443+ .global __fractqqsf
1444+ .func __fractqqsf
1445+__fractqqsf:
1446+ clr r25
1447+ sbrc r24, 7 ; if negative
1448+ ser r25 ; sign extend
1449+ mov r23, r24 ; move in place
1450+ mov r24, r25 ; sign extend lower byte
1451+ lsl r23
1452+ clr r22
1453+ rjmp __fractsasf ; call larger conversion
1454+.endfunc
1455+#endif /* defined (L_fractqqsf) */
1456+
1457+#if defined (L_fractuqqsf)
1458+ .global __fractuqqsf
1459+ .func __fractuqqsf
1460+__fractuqqsf:
1461+ clr r22
1462+ mov r23, r24
1463+ clr r24
1464+ clr r25
1465+ rjmp __fractsasf ; call larger conversion
1466+.endfunc
1467+#endif /* defined (L_fractuqqsf) */
1468+
1469+#if defined (L_fracthqsf)
1470+ .global __fracthqsf
1471+ .func __fracthqsf
1472+__fracthqsf:
1473+ mov_l r22, r24 ; put fractional part in place
1474+ mov_h r23, r25
1475+ clr r25
1476+ sbrc r23, 7 ; if negative
1477+ ser r25 ; sign extend
1478+ mov r24, r25 ; sign extend lower byte
1479+ lsl r22
1480+ rol r23
1481+ rjmp __fractsasf ; call larger conversion
1482+.endfunc
1483+#endif /* defined (L_fracthqsf) */
1484+
1485+#if defined (L_fractuhqsf)
1486+ .global __fractuhqsf
1487+ .func __fractuhqsf
1488+__fractuhqsf:
1489+ mov_l r22, r24 ; put fractional part in place
1490+ mov_h r23, r25
1491+ clr r24
1492+ clr r25
1493+ rjmp __fractsasf ; call larger conversion
1494+.endfunc
1495+#endif /* defined (L_fractuhqsf) */
1496+
1497+#if defined (L_fracthasf)
1498+ .global __fracthasf
1499+ .func __fracthasf
1500+__fracthasf:
1501+ clr r22
1502+ mov r23, r24 ; move into place
1503+ mov r24, r25
1504+ clr r25
1505+ sbrc r24, 7 ; if negative
1506+ ser r25 ; sign extend
1507+ rjmp __fractsasf ; call larger conversion
1508+#endif /* defined (L_fracthasf) */
1509+
1510+#if defined (L_fractuhasf)
1511+ .global __fractuhasf
1512+ .func __fractuhasf
1513+__fractuhasf:
1514+ clr r22
1515+ mov r23, r24 ; move into place
1516+ rjmp __fractsasf ; call larger conversion
1517+.endfunc
1518+#endif /* defined (L_fractuhasf) */
1519+
1520+#if defined (L_fractsasf)
1521+ .global __fractsasf
1522+ .func __fractsasf
1523+__fractsasf:
1524+ rcall __floatsisf
1525+ tst r25
1526+ breq __fractsasf_exit ; skip if zero
1527+ subi r25, 0x08 ; adjust exponent
1528+__fractsasf_exit:
1529+ ret
1530+.endfunc
1531+#endif /* defined (L_fractsasf) */
1532+
1533+#if defined (L_fractusasf)
1534+ .global __fractusasf
1535+ .func __fractusasf
1536+__fractusasf:
1537+ rcall __floatunsisf
1538+ tst r25
1539+ breq __fractusasf_exit ; skip if zero
1540+ subi r25, 0x08 ; adjust exponent
1541+__fractusasf_exit:
1542+ ret
1543+.endfunc
1544+#endif /* defined (L_fractusasf) */
1545+
1546+#if defined (L_fractsfqq) /* Conversions from float. */
1547+ .global __fractsfqq
1548+ .func __fractsfqq
1549+__fractsfqq:
1550+ subi r25, -11 ; adjust exponent
1551+ subi r24, 128
1552+ rjmp __fixsfsi
1553+.endfunc
1554+#endif /* defined (L_fractqq) */
1555+
1556+#if defined (L_fractsfuqq)
1557+ .global __fractsfuqq
1558+ .func __fractsfuqq
1559+__fractsfuqq:
1560+ subi r25, -12 ; adjust exponent
1561+ rjmp __fixsfsi
1562+.endfunc
1563+#endif /* defined (L_fractuqq) */
1564+
1565+#if defined (L_fractsfhq)
1566+ .global __fractsfhq
1567+ .func __fractsfhq
1568+__fractsfhq:
1569+ subi r25, -15 ; adjust exponent
1570+ subi r24, 128
1571+ rjmp __fixsfsi
1572+.endfunc
1573+#endif /* defined (L_fractsfhq) */
1574+
1575+#if defined (L_fractsfuhq)
1576+ .global __fractsfuhq
1577+ .func __fractsfuhq
1578+__fractsfuhq:
1579+ subi r25, -16 ; adjust exponent
1580+ rjmp __fixsfsi
1581+.endfunc
1582+#endif /* defined (L_fractsfuhq) */
1583+
1584+#if defined (L_fractsfha)
1585+ .global __fractsfha
1586+ .func __fractsfha
1587+__fractsfha:
1588+.endfunc
1589+ .global __fractsfuha
1590+ .func __fractsfuha
1591+__fractsfuha:
1592+ subi r25, -12 ; adjust exponent
1593+ rjmp __fixsfsi
1594+.endfunc
1595+#endif /* defined (L_fractsfha) */
1596+
1597+#if defined (L_fractsfsa)
1598+ .global __fractsfsa
1599+ .func __fractsfsa
1600+__fractsfsa:
1601+.endfunc
1602+ .global __fractsfusa
1603+ .func __fractsfusa
1604+__fractsfusa:
1605+ subi r25, -8 ; adjust exponent
1606+ rjmp __fixsfsi
1607+.endfunc
1608+#endif /* defined (L_fractsfsa) */
1609+
1610+/* For multiplication the functions here are called directly from
1611+ avr-fixed.md patterns, instead of using the standard libcall mechanisms.
1612+ This can make better code because GCC knows exactly which
1613+ of the call-used registers (not all of them) are clobbered. */
1614+
1615+/* mulqq and muluqq open coded on the enhanced core */
1616+#if !defined (__AVR_HAVE_MUL__)
1617+/*******************************************************
1618+ Fractional Multiplication 8 x 8
1619+*******************************************************/
1620+#define r_arg2 r22 /* multiplicand */
1621+#define r_arg1 r24 /* multiplier */
1622+#define r_res __tmp_reg__ /* result */
1623+
1624+#if defined (L_mulqq3)
1625+ .global __mulqq3
1626+ .func __mulqq3
1627+__mulqq3:
1628+ mov r_res, r_arg1
1629+ eor r_res, r_arg2
1630+ bst r_res, 7
1631+ lsl r_arg1
1632+ lsl r_arg2
1633+ brcc __mulqq3_skipneg
1634+ neg r_arg2
1635+__mulqq3_skipneg:
1636+ rcall __muluqq3
1637+ lsr r_arg1
1638+ brtc __mulqq3_exit
1639+ neg r_arg1
1640+__mulqq3_exit:
1641+ ret
1642+
1643+.endfunc
1644+#endif /* defined (L_mulqq3) */
1645+
1646+#if defined (L_muluqq3)
1647+ .global __muluqq3
1648+ .func __muluqq3
1649+__muluqq3:
1650+ clr r_res ; clear result
1651+__muluqq3_loop:
1652+ lsr r_arg2 ; shift multiplicand
1653+ sbrc r_arg1,7
1654+ add r_res,r_arg2
1655+ breq __muluqq3_exit ; while multiplicand != 0
1656+ lsl r_arg1
1657+ brne __muluqq3_loop ; exit if multiplier = 0
1658+__muluqq3_exit:
1659+ mov r_arg1,r_res ; result to return register
1660+ ret
1661+#undef r_arg2
1662+#undef r_arg1
1663+#undef r_res
1664+
1665+.endfunc
1666+#endif /* defined (L_muluqq3) */
1667+#endif /* !defined (__AVR_HAVE_MUL__) */
1668+
1669+/*******************************************************
1670+ Fractional Multiplication 16 x 16
1671+*******************************************************/
1672+
1673+#if defined (__AVR_HAVE_MUL__)
1674+#define r_arg1L r22 /* multiplier Low */
1675+#define r_arg1H r23 /* multiplier High */
1676+#define r_arg2L r20 /* multiplicand Low */
1677+#define r_arg2H r21 /* multiplicand High */
1678+#define r_resL r18 /* result Low */
1679+#define r_resH r19 /* result High */
1680+
1681+#if defined (L_mulhq3)
1682+ .global __mulhq3
1683+ .func __mulhq3
1684+__mulhq3:
1685+ fmuls r_arg1H, r_arg2H
1686+ movw r_resL, r0
1687+ fmulsu r_arg2H, r_arg1L
1688+ clr r_arg1L
1689+ sbc r_resH, r_arg1L
1690+ add r_resL, r1
1691+ adc r_resH, r_arg1L
1692+ fmulsu r_arg1H, r_arg2L
1693+ sbc r_resH, r_arg1L
1694+ add r_resL, r1
1695+ adc r_resH, r_arg1L
1696+ clr __zero_reg__
1697+ ret
1698+.endfunc
1699+#endif /* defined (L_mulhq3) */
1700+
1701+#if defined (L_muluhq3)
1702+ .global __muluhq3
1703+ .func __muluhq3
1704+__muluhq3:
1705+ mul r_arg1H, r_arg2H
1706+ movw r_resL, r0
1707+ mul r_arg1H, r_arg2L
1708+ add r_resL, r1
1709+ clr __zero_reg__
1710+ adc r_resH, __zero_reg__
1711+ mul r_arg1L, r_arg2H
1712+ add r_resL, r1
1713+ clr __zero_reg__
1714+ adc r_resH, __zero_reg__
1715+ ret
1716+.endfunc
1717+#endif /* defined (L_muluhq3) */
1718+
1719+#else
1720+#define r_arg1L r24 /* multiplier Low */
1721+#define r_arg1H r25 /* multiplier High */
1722+#define r_arg2L r22 /* multiplicand Low */
1723+#define r_arg2H r23 /* multiplicand High */
1724+#define r_resL __tmp_reg__ /* result Low */
1725+#define r_resH __zero_reg__ /* result High */
1726+
1727+#if defined (L_mulhq3)
1728+ .global __mulhq3
1729+ .func __mulhq3
1730+__mulhq3:
1731+ mov r_resL, r_arg1H
1732+ eor r_resL, r_arg2H
1733+ bst r_resL, 7
1734+ lsl r_arg1L
1735+ rol r_arg1H
1736+ lsl r_arg2L
1737+ rol r_arg2H
1738+ brcc mulhq3_skipneg
1739+ com r_arg2H
1740+ neg r_arg2L
1741+ sbci r_arg2H, -1
1742+mulhq3_skipneg:
1743+ rcall __muluhq3
1744+ lsr r_arg1H
1745+ ror r_arg1L
1746+ brtc mulhq3_exit
1747+ com r_arg1H
1748+ neg r_arg1L
1749+ sbci r_arg1H, -1
1750+mulhq3_exit:
1751+ ret
1752+.endfunc
1753+#endif /* defined (L_mulhq3) */
1754+
1755+#if defined (L_muluhq3)
1756+ .global __muluhq3
1757+ .func __muluhq3
1758+__muluhq3:
1759+ clr r_resL ; clear result
1760+__muluhq3_loop:
1761+ lsr r_arg2H ; shift multiplicand
1762+ ror r_arg2L
1763+ sbrs r_arg1H,7
1764+ rjmp __muluhq3_skip
1765+ add r_resL,r_arg2L ; result + multiplicand
1766+ adc r_resH,r_arg2H
1767+__muluhq3_skip:
1768+ lsl r_arg1L ; shift multiplier
1769+ rol r_arg1H
1770+ brne __muluhq3_loop
1771+ cpi r_arg1L, 0
1772+ brne __muluhq3_loop ; exit multiplier = 0
1773+ mov_l r_arg1L,r_resL
1774+ mov_h r_arg1H,r_resH ; result to return register
1775+ clr __zero_reg__ ; zero the zero reg
1776+ ret
1777+.endfunc
1778+#endif /* defined (L_muluhq3) */
1779+
1780+#endif /* defined (__AVR_HAVE_MUL__) */
1781+
1782+#undef r_arg1L
1783+#undef r_arg1H
1784+#undef r_arg2L
1785+#undef r_arg2H
1786+#undef r_resL
1787+#undef r_resH
1788+
1789+/*******************************************************
1790+ Fixed Multiplication 8.8 x 8.8
1791+*******************************************************/
1792+
1793+#if defined (__AVR_HAVE_MUL__)
1794+#define r_arg1L r22 /* multiplier Low */
1795+#define r_arg1H r23 /* multiplier High */
1796+#define r_arg2L r20 /* multiplicand Low */
1797+#define r_arg2H r21 /* multiplicand High */
1798+#define r_resL r18 /* result Low */
1799+#define r_resH r19 /* result High */
1800+
1801+#if defined (L_mulha3)
1802+ .global __mulha3
1803+ .func __mulha3
1804+__mulha3:
1805+ mul r_arg1L, r_arg2L
1806+ mov r_resL, r1
1807+ muls r_arg1H, r_arg2H
1808+ mov r_resH, r0
1809+ mulsu r_arg1H, r_arg2L
1810+ add r_resL, r0
1811+ adc r_resH, r1
1812+ mulsu r_arg2H, r_arg1L
1813+ add r_resL, r0
1814+ adc r_resH, r1
1815+ clr __zero_reg__
1816+ ret
1817+.endfunc
1818+#endif /* defined (L_mulha3) */
1819+
1820+#if defined (L_muluha3)
1821+ .global __muluha3
1822+ .func __muluha3
1823+__muluha3:
1824+ mul r_arg1L, r_arg2L
1825+ mov r_resL, r1
1826+ mul r_arg1H, r_arg2H
1827+ mov r_resH, r0
1828+ mul r_arg1H, r_arg2L
1829+ add r_resL, r0
1830+ adc r_resH, r1
1831+ mul r_arg1L, r_arg2H
1832+ add r_resL, r0
1833+ adc r_resH, r1
1834+ clr __zero_reg__
1835+ ret
1836+.endfunc
1837+#endif /* defined (L_muluha3) */
1838+
1839+#else
1840+
1841+#define r_arg1L r24 /* multiplier Low */
1842+#define r_arg1H r25 /* multiplier High */
1843+#define r_arg2L r22 /* multiplicand Low */
1844+#define r_arg2H r23 /* multiplicand High */
1845+#define r_resL r18 /* result Low */
1846+#define r_resH r19 /* result High */
1847+#define r_scratchL r0 /* scratch Low */
1848+#define r_scratchH r1
1849+
1850+#if defined (L_mulha3)
1851+ .global __mulha3
1852+ .func __mulha3
1853+__mulha3:
1854+ mov r_resL, r_arg1H
1855+ eor r_resL, r_arg2H
1856+ bst r_resL, 7
1857+ sbrs r_arg1H, 7
1858+ rjmp __mulha3_arg1pos
1859+ com r_arg1H
1860+ neg r_arg1L
1861+ sbci r_arg1H,-1
1862+__mulha3_arg1pos:
1863+ sbrs r_arg2H, 7
1864+ rjmp __mulha3_arg2pos
1865+ com r_arg2H
1866+ neg r_arg2L
1867+ sbci r_arg2H,-1
1868+__mulha3_arg2pos:
1869+ rcall __muluha3
1870+ brtc __mulha3_exit
1871+ com r_resH
1872+ neg r_resL
1873+ sbci r_resH,-1
1874+__mulha3_exit:
1875+ ret
1876+.endfunc
1877+#endif /* defined (L_mulha3) */
1878+
1879+#if defined (L_muluha3)
1880+ .global __muluha3
1881+ .func __muluha3
1882+__muluha3:
1883+ clr r_resL ; clear result
1884+ clr r_resH
1885+ mov_l r0, r_arg1L ; save multiplicand
1886+ mov_h r1, r_arg1H
1887+__muluha3_loop1:
1888+ sbrs r_arg2H,0
1889+ rjmp __muluha3_skip1
1890+ add r_resL,r_arg1L ; result + multiplicand
1891+ adc r_resH,r_arg1H
1892+__muluha3_skip1:
1893+ lsl r_arg1L ; shift multiplicand
1894+ rol r_arg1H
1895+ sbiw r_arg1L,0
1896+ breq __muluha3_loop1_done ; exit multiplicand = 0
1897+ lsr r_arg2H
1898+ brne __muluha3_loop1 ; exit multiplier = 0
1899+__muluha3_loop1_done:
1900+ mov_l r_arg1L, r_scratchL ; restore multiplicand
1901+ mov_h r_arg1H, r_scratchH
1902+__muluha3_loop2:
1903+ lsr r_arg1H ; shift multiplicand
1904+ ror r_arg1L
1905+ sbiw r_arg1L,0
1906+ breq __muluha3_exit ; exit if multiplicand = 0
1907+ sbrs r_arg2L,7
1908+ rjmp __muluha3_skip2
1909+ add r_resL,r_arg1L ; result + multiplicand
1910+ adc r_resH,r_arg1H
1911+__muluha3_skip2:
1912+ lsl r_arg2L
1913+ brne __muluha3_loop2 ; exit if multiplier = 0
1914+__muluha3_exit:
1915+ clr __zero_reg__ ; got clobbered
1916+ ret
1917+.endfunc
1918+#endif /* defined (L_muluha3) */
1919+
1920+#endif /* defined (__AVR_HAVE_MUL__) */
1921+
1922+#undef r_arg1L
1923+#undef r_arg1H
1924+#undef r_arg2L
1925+#undef r_arg2H
1926+#undef r_resL
1927+#undef r_resH
1928+
1929+/*******************************************************
1930+ Fixed Multiplication 16.16 x 16.16
1931+*******************************************************/
1932+
1933+#if defined (__AVR_HAVE_MUL__)
1934+/* uses nonstandard registers because mulus only works from 16-23 */
1935+#define r_clr r15
1936+
1937+#define r_arg1L r16 /* multiplier Low */
1938+#define r_arg1H r17
1939+#define r_arg1HL r18
1940+#define r_arg1HH r19 /* multiplier High */
1941+
1942+#define r_arg2L r20 /* multiplicand Low */
1943+#define r_arg2H r21
1944+#define r_arg2HL r22
1945+#define r_arg2HH r23 /* multiplicand High */
1946+
1947+#define r_resL r24 /* result Low */
1948+#define r_resH r25
1949+#define r_resHL r26
1950+#define r_resHH r27 /* result High */
1951+
1952+#if defined (L_mulsa3)
1953+ .global __mulsa3
1954+ .func __mulsa3
1955+__mulsa3:
1956+ clr r_clr
1957+ clr r_resH
1958+ clr r_resHL
1959+ clr r_resHH
1960+ mul r_arg1H, r_arg2L
1961+ mov r_resL, r1
1962+ mul r_arg1L, r_arg2H
1963+ add r_resL, r1
1964+ adc r_resH, r_clr
1965+ mul r_arg1L, r_arg2HL
1966+ add r_resL, r0
1967+ adc r_resH, r1
1968+ adc r_resHL, r_clr
1969+ mul r_arg1H, r_arg2H
1970+ add r_resL, r0
1971+ adc r_resH, r1
1972+ adc r_resHL, r_clr
1973+ mul r_arg1HL, r_arg2L
1974+ add r_resL, r0
1975+ adc r_resH, r1
1976+ adc r_resHL, r_clr
1977+ mulsu r_arg2HH, r_arg1L
1978+ sbc r_resHH, r_clr
1979+ add r_resH, r0
1980+ adc r_resHL, r1
1981+ adc r_resHH, r_clr
1982+ mul r_arg1H, r_arg2HL
1983+ add r_resH, r0
1984+ adc r_resHL, r1
1985+ adc r_resHH, r_clr
1986+ mul r_arg1HL, r_arg2H
1987+ add r_resH, r0
1988+ adc r_resHL, r1
1989+ adc r_resHH, r_clr
1990+ mulsu r_arg1HH, r_arg2L
1991+ sbc r_resHH, r_clr
1992+ add r_resH, r0
1993+ adc r_resHL, r1
1994+ adc r_resHH, r_clr
1995+ mulsu r_arg2HH, r_arg1H
1996+ add r_resHL, r0
1997+ adc r_resHH, r1
1998+ mul r_arg1HL, r_arg2HL
1999+ add r_resHL, r0
2000+ adc r_resHH, r1
2001+ mulsu r_arg1HH, r_arg2H
2002+ add r_resHL, r0
2003+ adc r_resHH, r1
2004+ mulsu r_arg2HH, r_arg1HL
2005+ add r_resHH, r0
2006+ mulsu r_arg1HH, r_arg2HL
2007+ add r_resHH, r0
2008+ clr __zero_reg__
2009+ ret
2010+.endfunc
2011+#endif
2012+
2013+#if defined (L_mulusa3)
2014+ .global __mulusa3
2015+ .func __mulusa3
2016+__mulusa3:
2017+ clr r_clr
2018+ clr r_resH
2019+ clr r_resHL
2020+ clr r_resHH
2021+ mul r_arg1H, r_arg2L
2022+ mov r_resL, r1
2023+ mul r_arg1L, r_arg2H
2024+ add r_resL, r1
2025+ adc r_resH, r_clr
2026+ mul r_arg1L, r_arg2HL
2027+ add r_resL, r0
2028+ adc r_resH, r1
2029+ adc r_resHL, r_clr
2030+ mul r_arg1H, r_arg2H
2031+ add r_resL, r0
2032+ adc r_resH, r1
2033+ adc r_resHL, r_clr
2034+ mul r_arg1HL, r_arg2L
2035+ add r_resL, r0
2036+ adc r_resH, r1
2037+ adc r_resHL, r_clr
2038+ mul r_arg1L, r_arg2HH
2039+ add r_resH, r0
2040+ adc r_resHL, r1
2041+ adc r_resHH, r_clr
2042+ mul r_arg1H, r_arg2HL
2043+ add r_resH, r0
2044+ adc r_resHL, r1
2045+ adc r_resHH, r_clr
2046+ mul r_arg1HL, r_arg2H
2047+ add r_resH, r0
2048+ adc r_resHL, r1
2049+ adc r_resHH, r_clr
2050+ mul r_arg1HH, r_arg2L
2051+ add r_resH, r0
2052+ adc r_resHL, r1
2053+ adc r_resHH, r_clr
2054+ mul r_arg1H, r_arg2HH
2055+ add r_resHL, r0
2056+ adc r_resHH, r1
2057+ mul r_arg1HL, r_arg2HL
2058+ add r_resHL, r0
2059+ adc r_resHH, r1
2060+ mul r_arg1HH, r_arg2H
2061+ add r_resHL, r0
2062+ adc r_resHH, r1
2063+ mul r_arg1HL, r_arg2HH
2064+ add r_resHH, r0
2065+ mul r_arg1HH, r_arg2HL
2066+ add r_resHH, r0
2067+ clr __zero_reg__
2068+ ret
2069+.endfunc
2070+#endif
2071+
2072+#else
2073+
2074+#define r_arg1L r18 /* multiplier Low */
2075+#define r_arg1H r19
2076+#define r_arg1HL r20
2077+#define r_arg1HH r21 /* multiplier High */
2078+
2079+/* these registers needed for sbiw */
2080+#define r_arg2L r24 /* multiplicand Low */
2081+#define r_arg2H r25
2082+#define r_arg2HL r26
2083+#define r_arg2HH r27 /* multiplicand High */
2084+
2085+#define r_resL r14 /* result Low */
2086+#define r_resH r15
2087+#define r_resHL r16
2088+#define r_resHH r17 /* result High */
2089+
2090+#define r_scratchL r0 /* scratch Low */
2091+#define r_scratchH r1
2092+#define r_scratchHL r22
2093+#define r_scratchHH r23 /* scratch High */
2094+
2095+#if defined (L_mulsa3)
2096+ .global __mulsa3
2097+ .func __mulsa3
2098+__mulsa3:
2099+ mov r_resL, r_arg1HH
2100+ eor r_resL, r_arg2HH
2101+ bst r_resL, 7
2102+ sbrs r_arg1HH, 7
2103+ rjmp __mulsa3_arg1pos
2104+ com r_arg1HH
2105+ com r_arg1HL
2106+ com r_arg1H
2107+ neg r_arg1L
2108+ sbci r_arg1H,-1
2109+ sbci r_arg1HL,-1
2110+ sbci r_arg1HH,-1
2111+__mulsa3_arg1pos:
2112+ sbrs r_arg2HH, 7
2113+ rjmp __mulsa3_arg2pos
2114+ com r_arg2HH
2115+ com r_arg2HL
2116+ com r_arg2H
2117+ neg r_arg2L
2118+ sbci r_arg2H,-1
2119+ sbci r_arg2HL,-1
2120+ sbci r_arg2HH,-1
2121+__mulsa3_arg2pos:
2122+ rcall __mulusa3
2123+ brtc __mulsa3_exit
2124+ com r_resHH
2125+ com r_resHL
2126+ com r_resH
2127+ com r_resL
2128+ adc r_resL,__zero_reg__
2129+ adc r_resH,__zero_reg__
2130+ adc r_resHL,__zero_reg__
2131+ adc r_resHH,__zero_reg__
2132+__mulsa3_exit:
2133+ ret
2134+.endfunc
2135+#endif /* defined (L_mulsa3) */
2136+
2137+#if defined (L_mulusa3)
2138+ .global __mulusa3
2139+ .func __mulusa3
2140+__mulusa3:
2141+ clr r_resL ; clear result
2142+ clr r_resH
2143+ mov_l r_resHL, r_resL
2144+ mov_h r_resHH, r_resH
2145+ mov_l r_scratchL, r_arg1L ; save multiplicand
2146+ mov_h r_scratchH, r_arg1H
2147+ mov_l r_scratchHL, r_arg1HL
2148+ mov_h r_scratchHH, r_arg1HH
2149+__mulusa3_loop1:
2150+ sbrs r_arg2HL,0
2151+ rjmp __mulusa3_skip1
2152+ add r_resL,r_arg1L ; result + multiplicand
2153+ adc r_resH,r_arg1H
2154+ adc r_resHL,r_arg1HL
2155+ adc r_resHH,r_arg1HH
2156+__mulusa3_skip1:
2157+ lsl r_arg1L ; shift multiplicand
2158+ rol r_arg1H
2159+ rol r_arg1HL
2160+ rol r_arg1HH
2161+ lsr r_arg2HH
2162+ ror r_arg2HL
2163+ sbiw r_arg2HL,0
2164+ brne __mulusa3_loop1 ; exit multiplier = 0
2165+__mulusa3_loop1_done:
2166+ mov_l r_arg1L, r_scratchL ; restore multiplicand
2167+ mov_h r_arg1H, r_scratchH
2168+ mov_l r_arg1HL, r_scratchHL
2169+ mov_h r_arg1HH, r_scratchHH
2170+__mulusa3_loop2:
2171+ lsr r_arg1HH ; shift multiplicand
2172+ ror r_arg1HL
2173+ ror r_arg1H
2174+ ror r_arg1L
2175+ sbrs r_arg2H,7
2176+ rjmp __mulusa3_skip2
2177+ add r_resL,r_arg1L ; result + multiplicand
2178+ adc r_resH,r_arg1H
2179+ adc r_resHL,r_arg1HL
2180+ adc r_resHH,r_arg1HH
2181+__mulusa3_skip2:
2182+ lsl r_arg2L
2183+ rol r_arg2H
2184+ sbiw r_arg2L,0
2185+ brne __mulusa3_loop2 ; exit if multiplier = 0
2186+__mulusa3_exit:
2187+ clr __zero_reg__ ; got clobbered
2188+ ret
2189+.endfunc
2190+#endif /* defined (L_mulusa3) */
2191+
2192+#undef r_scratchL
2193+#undef r_scratchH
2194+#undef r_scratchHL
2195+#undef r_scratchHH
2196+
2197+#endif
2198+
2199+#undef r_arg1L
2200+#undef r_arg1H
2201+#undef r_arg1HL
2202+#undef r_arg1HH
2203+
2204+#undef r_arg2L
2205+#undef r_arg2H
2206+#undef r_arg2HL
2207+#undef r_arg2HH
2208+
2209+#undef r_resL
2210+#undef r_resH
2211+#undef r_resHL
2212+#undef r_resHH
2213+
2214+/*******************************************************
2215+ Fractional Division 8 / 8
2216+*******************************************************/
2217+#define r_divd r25 /* dividend */
2218+#define r_quo r24 /* quotient */
2219+#define r_div r22 /* divisor */
2220+#define r_cnt r23 /* loop count */
2221+
2222+#if defined (L_divqq3)
2223+ .global __divqq3
2224+ .func __divqq3
2225+__divqq3:
2226+ mov r0, r_divd
2227+ eor r0, r_div
2228+ sbrc r_div, 7
2229+ neg r_div
2230+ sbrc r_divd, 7
2231+ neg r_divd
2232+ cp r_divd, r_div
2233+ breq __divqq3_minus1 ; if equal return -1
2234+ rcall __udivuqq3
2235+ lsr r_quo
2236+ sbrc r0, 7 ; negate result if needed
2237+ neg r_quo
2238+ ret
2239+__divqq3_minus1:
2240+ ldi r_quo, 0x80
2241+ ret
2242+.endfunc
2243+#endif /* defined (L_divqq3) */
2244+
2245+#if defined (L_udivuqq3)
2246+ .global __udivuqq3
2247+ .func __udivuqq3
2248+__udivuqq3:
2249+ clr r_quo ; clear quotient
2250+ ldi r_cnt,8 ; init loop counter
2251+__udivuqq3_loop:
2252+ lsl r_divd ; shift dividend
2253+ brcs __udivuqq3_ep ; dividend overflow
2254+ cp r_divd,r_div ; compare dividend & divisor
2255+ brcc __udivuqq3_ep ; dividend >= divisor
2256+ rol r_quo ; shift quotient (with CARRY)
2257+ rjmp __udivuqq3_cont
2258+__udivuqq3_ep:
2259+ sub r_divd,r_div ; restore dividend
2260+ lsl r_quo ; shift quotient (without CARRY)
2261+__udivuqq3_cont:
2262+ dec r_cnt ; decrement loop counter
2263+ brne __udivuqq3_loop
2264+ com r_quo ; complement result
2265+ ; because C flag was complemented in loop
2266+ ret
2267+.endfunc
2268+#endif /* defined (L_udivuqq3) */
2269+
2270+#undef r_divd
2271+#undef r_quo
2272+#undef r_div
2273+#undef r_cnt
2274+
2275+
2276+/*******************************************************
2277+ Fractional Division 16 / 16
2278+*******************************************************/
2279+#define r_divdL r26 /* dividend Low */
2280+#define r_divdH r27 /* dividend Hig */
2281+#define r_quoL r24 /* quotient Low */
2282+#define r_quoH r25 /* quotient High */
2283+#define r_divL r22 /* divisor */
2284+#define r_divH r23 /* divisor */
2285+#define r_cnt 21
2286+
2287+#if defined (L_divhq3)
2288+ .global __divhq3
2289+ .func __divhq3
2290+__divhq3:
2291+ mov r0, r_divdH
2292+ eor r0, r_divH
2293+ sbrs r_divH, 7
2294+ rjmp __divhq3_divpos
2295+ com r_divH
2296+ neg r_divL
2297+ sbci r_divH,-1
2298+__divhq3_divpos:
2299+ sbrs r_divdH, 7
2300+ rjmp __divhq3_divdpos
2301+ com r_divdH
2302+ neg r_divdL
2303+ sbci r_divdH,-1
2304+__divhq3_divdpos:
2305+ cp r_divdL, r_divL
2306+ cpc r_divdH, r_divH
2307+ breq __divhq3_minus1 ; if equal return -1
2308+ rcall __udivuhq3
2309+ lsr r_quoH
2310+ ror r_quoL
2311+ sbrs r0, 7 ; negate result if needed
2312+ ret
2313+ com r_quoH
2314+ neg r_quoL
2315+ sbci r_quoH,-1
2316+ ret
2317+__divhq3_minus1:
2318+ ldi r_quoH, 0x80
2319+ clr r_quoL
2320+ ret
2321+.endfunc
2322+#endif /* defined (L_divhq3) */
2323+
2324+#if defined (L_udivuhq3)
2325+ .global __udivuhq3
2326+ .func __udivuhq3
2327+__udivuhq3:
2328+ sub r_quoH,r_quoH ; clear quotient and carry
2329+ .global __udivuha3_entry
2330+__udivuha3_entry:
2331+ clr r_quoL ; clear quotient
2332+ ldi r_cnt,16 ; init loop counter
2333+__udivuhq3_loop:
2334+ rol r_divdL ; shift dividend (with CARRY)
2335+ rol r_divdH
2336+ brcs __udivuhq3_ep ; dividend overflow
2337+ cp r_divdL,r_divL ; compare dividend & divisor
2338+ cpc r_divdH,r_divH
2339+ brcc __udivuhq3_ep ; dividend >= divisor
2340+ rol r_quoL ; shift quotient (with CARRY)
2341+ rjmp __udivuhq3_cont
2342+__udivuhq3_ep:
2343+ sub r_divdL,r_divL ; restore dividend
2344+ sbc r_divdH,r_divH
2345+ lsl r_quoL ; shift quotient (without CARRY)
2346+__udivuhq3_cont:
2347+ rol r_quoH ; shift quotient
2348+ dec r_cnt ; decrement loop counter
2349+ brne __udivuhq3_loop
2350+ com r_quoL ; complement result
2351+ com r_quoH ; because C flag was complemented in loop
2352+ ret
2353+.endfunc
2354+#endif /* defined (L_udivuhq3) */
2355+
2356+/*******************************************************
2357+ Fixed Division 8.8 / 8.8
2358+*******************************************************/
2359+#if defined (L_divha3)
2360+ .global __divha3
2361+ .func __divha3
2362+__divha3:
2363+ mov r0, r_divdH
2364+ eor r0, r_divH
2365+ sbrs r_divH, 7
2366+ rjmp __divha3_divpos
2367+ com r_divH
2368+ neg r_divL
2369+ sbci r_divH,-1
2370+__divha3_divpos:
2371+ sbrs r_divdH, 7
2372+ rjmp __divha3_divdpos
2373+ com r_divdH
2374+ neg r_divdL
2375+ sbci r_divdH,-1
2376+__divha3_divdpos:
2377+ rcall __udivuha3
2378+ sbrs r0, 7 ; negate result if needed
2379+ ret
2380+ com r_quoH
2381+ neg r_quoL
2382+ sbci r_quoH,-1
2383+ ret
2384+.endfunc
2385+#endif /* defined (L_divha3) */
2386+
2387+#if defined (L_udivuha3)
2388+ .global __udivuha3
2389+ .func __udivuha3
2390+__udivuha3:
2391+ mov r_quoH, r_divdL
2392+ mov r_divdL, r_divdH
2393+ clr r_divdH
2394+ lsl r_quoH ; shift quotient into carry
2395+ rjmp __udivuha3_entry ; same as fractional after rearrange
2396+.endfunc
2397+#endif /* defined (L_udivuha3) */
2398+
2399+#undef r_divdL
2400+#undef r_divdH
2401+#undef r_quoL
2402+#undef r_quoH
2403+#undef r_divL
2404+#undef r_divH
2405+#undef r_cnt
2406+
2407+/*******************************************************
2408+ Fixed Division 16.16 / 16.16
2409+*******************************************************/
2410+#define r_arg1L r24 /* arg1 gets passed already in place */
2411+#define r_arg1H r25
2412+#define r_arg1HL r26
2413+#define r_arg1HH r27
2414+#define r_divdL r26 /* dividend Low */
2415+#define r_divdH r27
2416+#define r_divdHL r30
2417+#define r_divdHH r31 /* dividend High */
2418+#define r_quoL r22 /* quotient Low */
2419+#define r_quoH r23
2420+#define r_quoHL r24
2421+#define r_quoHH r25 /* quotient High */
2422+#define r_divL r18 /* divisor Low */
2423+#define r_divH r19
2424+#define r_divHL r20
2425+#define r_divHH r21 /* divisor High */
2426+#define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
2427+
2428+#if defined (L_divsa3)
2429+ .global __divsa3
2430+ .func __divsa3
2431+__divsa3:
2432+ mov r0, r27
2433+ eor r0, r_divHH
2434+ sbrs r_divHH, 7
2435+ rjmp __divsa3_divpos
2436+ com r_divHH
2437+ com r_divHL
2438+ com r_divH
2439+ neg r_divL
2440+ sbci r_divH,-1
2441+ sbci r_divHL,-1
2442+ sbci r_divHH,-1
2443+__divsa3_divpos:
2444+ sbrs r_arg1HH, 7
2445+ rjmp __divsa3_arg1pos
2446+ com r_arg1HH
2447+ com r_arg1HL
2448+ com r_arg1H
2449+ neg r_arg1L
2450+ sbci r_arg1H,-1
2451+ sbci r_arg1HL,-1
2452+ sbci r_arg1HH,-1
2453+__divsa3_arg1pos:
2454+ rcall __udivusa3
2455+ sbrs r0, 7 ; negate result if needed
2456+ ret
2457+ com r_quoHH
2458+ com r_quoHL
2459+ com r_quoH
2460+ neg r_quoL
2461+ sbci r_quoH,-1
2462+ sbci r_quoHL,-1
2463+ sbci r_quoHH,-1
2464+ ret
2465+.endfunc
2466+#endif /* defined (L_divsa3) */
2467+
2468+#if defined (L_udivusa3)
2469+ .global __udivusa3
2470+ .func __udivusa3
2471+__udivusa3:
2472+ ldi r_divdHL, 32 ; init loop counter
2473+ mov r_cnt, r_divdHL
2474+ clr r_divdHL
2475+ clr r_divdHH
2476+ mov_l r_quoL, r_divdHL
2477+ mov_h r_quoH, r_divdHH
2478+ lsl r_quoHL ; shift quotient into carry
2479+ rol r_quoHH
2480+__udivusa3_loop:
2481+ rol r_divdL ; shift dividend (with CARRY)
2482+ rol r_divdH
2483+ rol r_divdHL
2484+ rol r_divdHH
2485+ brcs __udivusa3_ep ; dividend overflow
2486+ cp r_divdL,r_divL ; compare dividend & divisor
2487+ cpc r_divdH,r_divH
2488+ cpc r_divdHL,r_divHL
2489+ cpc r_divdHH,r_divHH
2490+ brcc __udivusa3_ep ; dividend >= divisor
2491+ rol r_quoL ; shift quotient (with CARRY)
2492+ rjmp __udivusa3_cont
2493+__udivusa3_ep:
2494+ sub r_divdL,r_divL ; restore dividend
2495+ sbc r_divdH,r_divH
2496+ sbc r_divdHL,r_divHL
2497+ sbc r_divdHH,r_divHH
2498+ lsl r_quoL ; shift quotient (without CARRY)
2499+__udivusa3_cont:
2500+ rol r_quoH ; shift quotient
2501+ rol r_quoHL
2502+ rol r_quoHH
2503+ dec r_cnt ; decrement loop counter
2504+ brne __udivusa3_loop
2505+ com r_quoL ; complement result
2506+ com r_quoH ; because C flag was complemented in loop
2507+ com r_quoHL
2508+ com r_quoHH
2509+ ret
2510+.endfunc
2511+#endif /* defined (L_udivusa3) */
2512+
2513+#undef r_divdL
2514+#undef r_divdH
2515+#undef r_divdHL
2516+#undef r_divdHH
2517+#undef r_quoL
2518+#undef r_quoH
2519+#undef r_quoHL
2520+#undef r_quoHH
2521+#undef r_divL
2522+#undef r_divH
2523+#undef r_divHL
2524+#undef r_divHH
2525+#undef r_cnt
6ef8d480
PZ
2526diff -Naurp gcc/config/avr/libgcc.S gcc/config/avr/libgcc.S
2527--- gcc/config/avr/libgcc.S 2011-10-27 16:45:17.000000000 +0530
2528+++ gcc/config/avr/libgcc.S 2011-10-27 16:55:55.000000000 +0530
2529@@ -163,6 +163,23 @@ __mulhi3_exit:
dbe7ab63 2530 .global __mulhisi3
2531 .func __mulhisi3
2532 __mulhisi3:
2533+#if defined (__AVR_HAVE_MUL__)
2534+ muls r21, r19
2535+ movw r24, r0
2536+ mul r20, r18
2537+ movw r22, r0
2538+ mulsu r21, r18
2539+ add r23, r0
2540+ adc r24, r1
2541+ clr r1
2542+ adc r25, r1
2543+ mulsu r19, r20
2544+ add r23, r0
2545+ adc r24, r1
2546+ clr r1
2547+ adc r25, r1
2548+ ret
2549+#else
2550 mov_l r18, r24
2551 mov_h r19, r25
2552 clr r24
6ef8d480 2553@@ -174,6 +191,7 @@ __mulhisi3:
dbe7ab63 2554 dec r20
2555 mov r21, r20
2556 rjmp __mulsi3
2557+#endif /* defined (__AVR_HAVE_MUL__) */
2558 .endfunc
2559 #endif /* defined (L_mulhisi3) */
2560
6ef8d480 2561@@ -181,13 +199,31 @@ __mulhisi3:
dbe7ab63 2562 .global __umulhisi3
2563 .func __umulhisi3
2564 __umulhisi3:
2565- mov_l r18, r24
2566- mov_h r19, r25
2567+#if defined (__AVR_HAVE_MUL__)
2568+ mul r21, r19
2569+ movw r24, r0
2570+ mul r20, r18
2571+ movw r22, r0
2572+ mul r21, r18
2573+ add r23, r0
2574+ adc r24, r1
2575+ clr r1
2576+ adc r25, r1
2577+ mul r19, r20
2578+ add r23, r0
2579+ adc r24, r1
2580+ clr r1
2581+ adc r25, r1
2582+ ret
2583+#else
2584+ mov_l r22, r20
2585+ mov_h r23, r21
2586 clr r24
2587 clr r25
2588 clr r20
2589 clr r21
2590 rjmp __mulsi3
2591+#endif
2592 .endfunc
2593 #endif /* defined (L_umulhisi3) */
2594
6ef8d480 2595@@ -200,7 +236,6 @@ __umulhisi3:
dbe7ab63 2596 #define r_arg1HL r24
2597 #define r_arg1HH r25 /* multiplier High */
2598
2599-
2600 #define r_arg2L r18 /* multiplicand Low */
2601 #define r_arg2H r19
2602 #define r_arg2HL r20
6ef8d480 2603@@ -556,6 +591,23 @@ __divmodsi4_neg1:
dbe7ab63 2604 .endfunc
2605 #endif /* defined (L_divmodsi4) */
2606
2607+#undef r_remHH
2608+#undef r_remHL
2609+#undef r_remH
2610+#undef r_remL
2611+
2612+#undef r_arg1HH
2613+#undef r_arg1HL
2614+#undef r_arg1H
2615+#undef r_arg1L
2616+
2617+#undef r_arg2HH
2618+#undef r_arg2HL
2619+#undef r_arg2H
2620+#undef r_arg2L
2621+
2622+#undef r_cnt
2623+
2624 /**********************************
2625 * This is a prologue subroutine
2626 **********************************/
6ef8d480 2627@@ -899,3 +951,4 @@ __tablejump_elpm__:
dbe7ab63 2628 .endfunc
2629 #endif /* defined (L_tablejump_elpm) */
2630
2631+#include "libgcc-fixed.S"
6ef8d480
PZ
2632diff -Naurp gcc/config/avr/t-avr gcc/config/avr/t-avr
2633--- gcc/config/avr/t-avr 2011-10-27 16:45:17.000000000 +0530
2634+++ gcc/config/avr/t-avr 2011-10-27 16:55:55.000000000 +0530
dbe7ab63 2635@@ -36,6 +36,8 @@ LIB1ASMSRC = avr/libgcc.S
2636 LIB1ASMFUNCS = \
2637 _mulqi3 \
2638 _mulhi3 \
2639+ _mulhisi3 \
2640+ _umulhisi3 \
2641 _mulsi3 \
2642 _udivmodqi4 \
2643 _divmodqi4 \
2644@@ -54,6 +56,39 @@ LIB1ASMFUNCS = \
2645 _ctors \
2646 _dtors
2647
2648+# Fixed point routines
2649+LIB1ASMFUNCS += \
2650+ _fractqqsf \
2651+ _fractuqqsf \
2652+ _fracthqsf \
2653+ _fractuhqsf \
2654+ _fracthasf \
2655+ _fractuhasf \
2656+ _fractsasf \
2657+ _fractusasf \
2658+ _fractsfqq \
2659+ _fractsfuqq \
2660+ _fractsfhq \
2661+ _fractsfuhq \
2662+ _fractsfha \
2663+ _fractsfsa \
2664+ _mulqq3 \
2665+ _muluqq3 \
2666+ _mulhq3 \
2667+ _muluhq3 \
2668+ _mulha3 \
2669+ _muluha3 \
2670+ _mulsa3 \
2671+ _mulusa3 \
2672+ _divqq3 \
2673+ _udivuqq3 \
2674+ _divhq3 \
2675+ _udivuhq3 \
2676+ _divha3 \
2677+ _udivuha3 \
2678+ _divsa3 \
2679+ _udivusa3
2680+
2681 # We do not have the DF type.
2682 # Most of the C functions in libgcc2 use almost all registers,
2683 # so use -mcall-prologues for smaller code size.
6ef8d480
PZ
2684diff -Naurp gcc/cse.c gcc/cse.c
2685--- gcc/cse.c 2011-10-27 16:45:17.000000000 +0530
2686+++ gcc/cse.c 2011-10-27 16:55:55.000000000 +0530
2687@@ -3702,9 +3702,10 @@ fold_rtx (rtx x, rtx insn)
dbe7ab63 2688 && exact_log2 (- INTVAL (const_arg1)) >= 0)))
2689 break;
2690
2691- /* ??? Vector mode shifts by scalar
2692+ /* ??? Vector and Fixed Point shifts by scalar
2693 shift operand are not supported yet. */
2694- if (is_shift && VECTOR_MODE_P (mode))
2695+ if (is_shift && (VECTOR_MODE_P (mode)
2696+ || ALL_FIXED_POINT_MODE_P (mode)))
2697 break;
2698
2699 if (is_shift
6ef8d480
PZ
2700diff -Naurp gcc/dwarf2out.c gcc/dwarf2out.c
2701--- gcc/dwarf2out.c 2011-10-27 16:45:17.000000000 +0530
2702+++ gcc/dwarf2out.c 2011-10-27 16:55:55.000000000 +0530
2703@@ -12790,6 +12790,12 @@ base_type_die (tree type)
dbe7ab63 2704
2705 add_AT_unsigned (base_type_result, DW_AT_byte_size,
2706 int_size_in_bytes (type));
2707+
2708+ /* version 3 dwarf specifies that for fixed-point types DW_AT_binary_scale
2709+ describes the location of the decimal place */
2710+ if (TREE_CODE (type) == FIXED_POINT_TYPE)
2711+ add_AT_int (base_type_result, DW_AT_binary_scale, -TYPE_FBIT (type));
2712+
2713 add_AT_unsigned (base_type_result, DW_AT_encoding, encoding);
2714
2715 return base_type_result;
6ef8d480 2716@@ -16561,7 +16567,11 @@ add_const_value_attribute (dw_die_ref di
dbe7ab63 2717
2718 case HIGH:
2719 case CONST_FIXED:
2720- return false;
2721+ {
2722+ add_AT_double (die, DW_AT_const_value,
2723+ CONST_FIXED_VALUE_HIGH (rtl), CONST_FIXED_VALUE_LOW (rtl));
2724+ }
2725+ break;
2726
2727 case MEM:
2728 if (GET_CODE (XEXP (rtl, 0)) == CONST_STRING
6ef8d480
PZ
2729diff -Naurp gcc/fold-const.c gcc/fold-const.c
2730--- gcc/fold-const.c 2011-10-27 16:45:17.000000000 +0530
2731+++ gcc/fold-const.c 2011-10-27 16:55:55.000000000 +0530
2732@@ -11782,6 +11782,11 @@ fold_binary_loc (location_t loc,
dbe7ab63 2733 if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0)
2734 return NULL_TREE;
2735
2736+ /* Since fixed point types cannot perform bitwise and, or, etc..
2737+ don't try to convert to an expression with them. */
2738+ if (TREE_CODE(type) == FIXED_POINT_TYPE)
2739+ return NULL_TREE;
2740+
2741 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2742 if (TREE_CODE (op0) == code && host_integerp (arg1, false)
2743 && TREE_INT_CST_LOW (arg1) < TYPE_PRECISION (type)
6ef8d480
PZ
2744diff -Naurp gcc/varasm.c gcc/varasm.c
2745--- gcc/varasm.c 2011-10-27 16:45:17.000000000 +0530
2746+++ gcc/varasm.c 2011-10-27 16:55:55.000000000 +0530
2747@@ -2504,7 +2504,7 @@ assemble_integer (rtx x, unsigned int si
dbe7ab63 2748 else
2749 mclass = MODE_INT;
2750
2751- omode = mode_for_size (subsize * BITS_PER_UNIT, mclass, 0);
2752+ omode = mode_for_size (subsize * BITS_PER_UNIT, MODE_INT, 0);
2753 imode = mode_for_size (size * BITS_PER_UNIT, mclass, 0);
2754
2755 for (i = 0; i < size; i += subsize)
This page took 0.475399 seconds and 4 git commands to generate.