1 diff -Naurp gcc/config/avr/avr.c gcc/config/avr/avr.c
2 --- gcc/config/avr/avr.c 2011-10-27 16:45:17.000000000 +0530
3 +++ gcc/config/avr/avr.c 2011-10-27 16:55:55.000000000 +0530
4 @@ -236,6 +236,19 @@ static const struct default_options avr_
5 #undef TARGET_EXCEPT_UNWIND_INFO
6 #define TARGET_EXCEPT_UNWIND_INFO sjlj_except_unwind_info
8 +#undef TARGET_SCALAR_MODE_SUPPORTED_P
9 +#define TARGET_SCALAR_MODE_SUPPORTED_P avr_scalar_mode_supported_p
11 + /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
13 + avr_scalar_mode_supported_p (enum machine_mode mode)
15 + if (ALL_FIXED_POINT_MODE_P (mode))
18 + return default_scalar_mode_supported_p (mode);
21 struct gcc_target targetm = TARGET_INITIALIZER;
24 @@ -1767,9 +1780,9 @@ output_movqi (rtx insn, rtx operands[],
28 - if (register_operand (dest, QImode))
29 + if (register_operand (dest, VOIDmode))
31 - if (register_operand (src, QImode)) /* mov r,r */
32 + if (register_operand (src, VOIDmode)) /* mov r,r */
34 if (test_hard_reg_class (STACK_REG, dest))
35 return AS2 (out,%0,%1);
36 @@ -1857,9 +1870,9 @@ output_movhi (rtx insn, rtx operands[],
40 - if (register_operand (dest, HImode))
41 + if (register_operand (dest, VOIDmode))
43 - if (register_operand (src, HImode)) /* mov r,r */
44 + if (register_operand (src, VOIDmode)) /* mov r,r */
46 if (test_hard_reg_class (STACK_REG, dest))
48 @@ -2582,6 +2595,14 @@ output_movsisf(rtx insn, rtx operands[],
50 if (test_hard_reg_class (LD_REGS, dest)) /* ldi d,i */
53 + && (UINTVAL (src) >> 16) == (UINTVAL (src) & 0xffff))
56 + return (AS2 (ldi,%A0,lo8(%1)) CR_TAB
57 + AS2 (ldi,%B0,hi8(%1)) CR_TAB
58 + AS2 (movw,%C0,%A0));
61 return (AS2 (ldi,%A0,lo8(%1)) CR_TAB
62 AS2 (ldi,%B0,hi8(%1)) CR_TAB
63 @@ -4527,6 +4548,196 @@ avr_rotate_bytes (rtx operands[])
67 +/* Outputs instructions needed for fixed point conversion. */
70 +fract_out (rtx insn ATTRIBUTE_UNUSED, rtx operands[], int intsigned, int *len)
73 + int sbit[2], ilen[2], flen[2], tlen[2];
74 + int rdest, rsource, offset;
75 + int start, end, dir;
76 + int hadbst = 0, hadlsl = 0;
77 + int clrword = -1, lastclr = 0, clr = 0;
83 + for (i = 0; i < 2; i++)
85 + enum machine_mode mode = GET_MODE (operands[i]);
86 + tlen[i] = GET_MODE_SIZE (mode);
87 + if (SCALAR_INT_MODE_P (mode))
89 + sbit[i] = intsigned;
90 + ilen[i] = GET_MODE_BITSIZE(mode) / 8;
93 + else if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
95 + sbit[i] = SIGNED_SCALAR_FIXED_POINT_MODE_P (mode);
96 + ilen[i] = (GET_MODE_IBIT (mode) + 1) / 8;
97 + flen[i] = (GET_MODE_FBIT (mode) + 1) / 8;
100 + fatal_insn ("unsupported fixed-point conversion", insn);
103 + rdest = true_regnum (operands[0]);
104 + rsource = true_regnum (operands[1]);
105 + offset = flen[1] - flen[0];
107 + /* Store the sign bit if the destination is a signed
108 + fract and the source has a sign in the integer part. */
109 + if (sbit[0] && !ilen[0] && sbit[1] && ilen[1])
111 + /* To avoid using bst and bld if the source and
112 + destination registers overlap we can use a single lsl
113 + since we don't care about preserving the source register. */
114 + if (rdest < rsource + tlen[1] && rdest + tlen[0] > rsource)
116 + sprintf (buf, "lsl r%d", rsource + tlen[1] - 1);
121 + sprintf (buf, "bst r%d, 7", rsource + tlen[1] - 1);
124 + output_asm_insn (buf, operands);
128 + /* Pick the correct direction. */
129 + if (rdest < rsource + offset)
138 + start = tlen[0] - 1;
142 + /* Move registers into place, clearing registers that do not overlap. */
143 + for (i = start; i != end; i += dir)
145 + int destloc = rdest + i, sourceloc = rsource + i + offset;
146 + if (sourceloc < rsource || sourceloc >= rsource + tlen[1])
148 + if (AVR_HAVE_MOVW && i+dir != end
149 + && (sourceloc+dir < rsource || sourceloc+dir >= rsource + tlen[1])
150 + && ((dir == 1 && !(destloc%2) && !(sourceloc%2))
151 + || (dir == -1 && (destloc%2) && (sourceloc%2)))
154 + sprintf (buf, "movw r%d, r%d", destloc&0xfe, clrword&0xfe);
159 + /* Do not clear the register if it is going to get
160 + sign extended with a mov later. */
161 + if (sbit[0] && sbit[1] && i != tlen[0] - 1 && i >= flen[0])
164 + sprintf (buf, "clr r%d", destloc);
170 + else if (destloc == sourceloc)
173 + if (AVR_HAVE_MOVW && i+dir != end
174 + && sourceloc+dir >= rsource && sourceloc+dir < rsource + tlen[1]
175 + && ((dir == 1 && !(destloc%2) && !(sourceloc%2))
176 + || (dir == -1 && (destloc%2) && (sourceloc%2))))
178 + sprintf (buf, "movw r%d, r%d", destloc&0xfe, sourceloc&0xfe);
182 + sprintf (buf, "mov r%d, r%d", destloc, sourceloc);
184 + output_asm_insn (buf, operands);
191 + /* Perform sign extension if needed. */
192 + if (sbit[0] && sbit[1] && ilen[0] > ilen[1])
194 + sprintf (buf, "sbrc r%d, 7", rdest+tlen[1]-1-offset);
195 + output_asm_insn (buf, operands);
196 + sprintf (buf, "com r%d", rdest+tlen[0]-1);
197 + output_asm_insn (buf, operands);
199 + /* Sign extend additional bytes. */
200 + start = rdest + tlen[0] - 2;
201 + end = rdest + flen[0] + ilen[1] - 1;
202 + for (i = start; i != end; i--)
204 + if (AVR_HAVE_MOVW && i != start && i-1 != end)
205 + sprintf (buf, "movw r%d, r%d", --i, rdest+tlen[0]-2);
207 + sprintf (buf, "mov r%d, r%d", i, rdest+tlen[0]-1);
208 + output_asm_insn (buf, operands);
213 + /* Perform shifts, only needed if one operand
214 + is a signed fract, and the other is not. */
215 + if (sbit[0] && !ilen[0] && (!sbit[1] || ilen[1]))
217 + start = rdest+flen[0]-1;
218 + end = rdest + flen[0] - flen[1];
221 + for (i = start; i >= end; i--)
223 + if (i == start && !hadlsl)
224 + sprintf (buf, "lsr r%d", i);
226 + sprintf (buf, "ror r%d", i);
227 + output_asm_insn (buf, operands);
233 + sprintf (buf, "bld r%d, 7", rdest + tlen[0] - 1);
234 + output_asm_insn (buf, operands);
238 + else if (sbit[1] && !ilen[1] && (!sbit[0] || ilen[0]))
240 + start = rdest + flen[0] - flen[1];
243 + for (i = start; i<rdest+flen[0]; i++)
246 + sprintf (buf, "lsl r%d", i);
248 + sprintf (buf, "rol r%d", i);
249 + output_asm_insn (buf, operands);
257 /* Modifies the length assigned to instruction INSN
258 LEN is the initially computed length of the insn. */
260 diff -Naurp gcc/config/avr/avr-fixed.md gcc/config/avr/avr-fixed.md
261 --- gcc/config/avr/avr-fixed.md 1970-01-01 05:30:00.000000000 +0530
262 +++ gcc/config/avr/avr-fixed.md 2011-10-27 16:55:55.000000000 +0530
264 +;; -*- Mode: Scheme -*-
265 +;; This file contains instructions that support fixed-point operations
266 +;; for ATMEL AVR micro controllers.
267 +;; Copyright (C) 2009
268 +;; Free Software Foundation, Inc.
269 +;; Contributed by Sean D'Epagnier (sean@depagnier.com)
271 +;; This file is part of GCC.
273 +;; GCC is free software; you can redistribute it and/or modify
274 +;; it under the terms of the GNU General Public License as published by
275 +;; the Free Software Foundation; either version 3, or (at your option)
276 +;; any later version.
278 +;; GCC is distributed in the hope that it will be useful,
279 +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
280 +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
281 +;; GNU General Public License for more details.
283 +;; You should have received a copy of the GNU General Public License
284 +;; along with GCC; see the file COPYING3. If not see
285 +;; <http://www.gnu.org/licenses/>.
287 +(define_mode_iterator ALLQQ [(QQ "") (UQQ "")])
288 +(define_mode_iterator ALLHQ [(HQ "") (UHQ "")])
289 +(define_mode_iterator ALLHA [(HA "") (UHA "")])
290 +(define_mode_iterator ALLHQHA [(HQ "") (UHQ "") (HA "") (UHA "")])
291 +(define_mode_iterator ALLSA [(SA "") (USA "")])
295 +(define_mode_iterator FIXED1 [(QQ "") (UQQ "") (HQ "") (UHQ "")
296 + (SQ "") (USQ "") (DQ "") (UDQ "")
297 + (HA "") (UHA "") (SA "") (USA "")
298 + (DA "") (UDA "") (TA "") (UTA "")
299 + (QI "") (HI "") (SI "") (DI "")])
300 +(define_mode_iterator FIXED2 [(QQ "") (UQQ "") (HQ "") (UHQ "")
301 + (SQ "") (USQ "") (DQ "") (UDQ "")
302 + (HA "") (UHA "") (SA "") (USA "")
303 + (DA "") (UDA "") (TA "") (UTA "")
304 + (QI "") (HI "") (SI "") (DI "")])
306 +(define_insn "fract<FIXED2:mode><FIXED1:mode>2"
307 + [(set (match_operand:FIXED1 0 "register_operand" "=r")
308 + (fract_convert:FIXED1 (match_operand:FIXED2 1 "register_operand" "r")))]
310 + "* return fract_out (insn, operands, 1, NULL);"
311 + [(set_attr "cc" "clobber")])
313 +(define_insn "fractuns<FIXED2:mode><FIXED1:mode>2"
314 + [(set (match_operand:FIXED1 0 "register_operand" "=r")
315 + (unsigned_fract_convert:FIXED1 (match_operand:FIXED2 1 "register_operand" "r")))]
317 + "* return fract_out (insn, operands, 0, NULL);"
318 + [(set_attr "cc" "clobber")])
320 +;;; Addition/Subtraction, mostly identical to integer versions
322 +(define_insn "add<ALLQQ:mode>3"
323 + [(set (match_operand:ALLQQ 0 "register_operand" "=r,d")
324 + (plus:ALLQQ (match_operand:ALLQQ 1 "register_operand" "%0,0")
325 + (match_operand:ALLQQ 2 "nonmemory_operand" "r,i")))]
329 + subi %0,lo8(-(%2))"
330 + [(set_attr "length" "1,1")
331 + (set_attr "cc" "set_czn,set_czn")])
333 +(define_insn "sub<ALLQQ:mode>3"
334 + [(set (match_operand:ALLQQ 0 "register_operand" "=r,d")
335 + (minus:ALLQQ (match_operand:ALLQQ 1 "register_operand" "0,0")
336 + (match_operand:ALLQQ 2 "nonmemory_operand" "r,i")))]
341 + [(set_attr "length" "1,1")
342 + (set_attr "cc" "set_czn,set_czn")])
345 +(define_insn "add<ALLHQHA:mode>3"
346 + [(set (match_operand:ALLHQHA 0 "register_operand" "=r,d")
347 + (plus:ALLHQHA (match_operand:ALLHQHA 1 "register_operand" "%0,0")
348 + (match_operand:ALLHQHA 2 "nonmemory_operand" "r,i")))]
351 + add %A0,%A2\;adc %B0,%B2
352 + subi %A0,lo8(-(%2))\;sbci %B0,hi8(-(%2))"
353 + [(set_attr "length" "2,2")
354 + (set_attr "cc" "set_n,set_czn")])
356 +(define_insn "sub<ALLHQHA:mode>3"
357 + [(set (match_operand:ALLHQHA 0 "register_operand" "=r,d")
358 + (minus:ALLHQHA (match_operand:ALLHQHA 1 "register_operand" "0,0")
359 + (match_operand:ALLHQHA 2 "nonmemory_operand" "r,i")))]
362 + sub %A0,%A2\;sbc %B0,%B2
363 + subi %A0,lo8(%2)\;sbci %B0,hi8(%2)"
364 + [(set_attr "length" "2,2")
365 + (set_attr "cc" "set_czn,set_czn")])
367 +(define_insn "add<ALLSA:mode>3"
368 + [(set (match_operand:ALLSA 0 "register_operand" "=r,d")
369 + (plus:ALLSA (match_operand:ALLSA 1 "register_operand" "%0,0")
370 + (match_operand:ALLSA 2 "nonmemory_operand" "r,i")))]
373 + add %A0,%A2\;adc %B0,%B2\;adc %C0,%C2\;adc %D0,%D2
374 + subi %0,lo8(-(%2))\;sbci %B0,hi8(-(%2))\;sbci %C0,hlo8(-(%2))\;sbci %D0,hhi8(-(%2))"
375 + [(set_attr "length" "4,4")
376 + (set_attr "cc" "set_n,set_czn")])
378 +(define_insn "sub<ALLSA:mode>3"
379 + [(set (match_operand:ALLSA 0 "register_operand" "=r,d")
380 + (minus:ALLSA (match_operand:ALLSA 1 "register_operand" "0,0")
381 + (match_operand:ALLSA 2 "nonmemory_operand" "r,i")))]
384 + sub %0,%2\;sbc %B0,%B2\;sbc %C0,%C2\;sbc %D0,%D2
385 + subi %A0,lo8(%2)\;sbci %B0,hi8(%2)\;sbci %C0,hlo8(%2)\;sbci %D0,hhi8(%2)"
386 + [(set_attr "length" "4,4")
387 + (set_attr "cc" "set_czn,set_czn")])
389 +;******************************************************************************
392 +(define_insn "mulqq3"
393 + [(set (match_operand:QQ 0 "register_operand" "=r")
394 + (mult:QQ (match_operand:QQ 1 "register_operand" "a")
395 + (match_operand:QQ 2 "register_operand" "a")))]
397 + "fmuls %1,%2\;mov %0,r1\;clr r1"
398 + [(set_attr "length" "3")
399 + (set_attr "cc" "clobber")])
401 +(define_insn "muluqq3"
402 + [(set (match_operand:UQQ 0 "register_operand" "=r")
403 + (mult:UQQ (match_operand:UQQ 1 "register_operand" "r")
404 + (match_operand:UQQ 2 "register_operand" "r")))]
406 + "mul %1,%2\;mov %0,r1\;clr r1"
407 + [(set_attr "length" "3")
408 + (set_attr "cc" "clobber")])
410 +;; (reg:ALLHQ 20) not clobbered on the enhanced core.
411 +;; use registers from 16-23 so we can use fmuls
412 +;; All call-used registers clobbered otherwise - normal library call.
413 +(define_expand "mul<ALLHQ:mode>3"
414 + [(set (reg:ALLHQ 22) (match_operand:ALLHQ 1 "register_operand" ""))
415 + (set (reg:ALLHQ 20) (match_operand:ALLHQ 2 "register_operand" ""))
416 + (parallel [(set (reg:ALLHQ 18) (mult:ALLHQ (reg:ALLHQ 22) (reg:ALLHQ 20)))
417 + (clobber (reg:ALLHQ 22))])
418 + (set (match_operand:ALLHQ 0 "register_operand" "") (reg:ALLHQ 18))]
422 +(define_insn "*mul<ALLHQ:mode>3_enh_call"
423 + [(set (reg:ALLHQ 18) (mult:ALLHQ (reg:ALLHQ 22) (reg:ALLHQ 20)))
424 + (clobber (reg:ALLHQ 22))]
426 + "%~call __mul<ALLHQ:mode>3"
427 + [(set_attr "type" "xcall")
428 + (set_attr "cc" "clobber")])
430 +; Special calls for with and without mul.
431 +(define_expand "mul<ALLHA:mode>3"
432 + [(set (reg:ALLHA 22) (match_operand:ALLHA 1 "register_operand" ""))
433 + (set (reg:ALLHA 20) (match_operand:ALLHA 2 "register_operand" ""))
434 + (parallel [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 20)))
435 + (clobber (reg:ALLHA 22))])
436 + (set (match_operand:ALLHA 0 "register_operand" "") (reg:ALLHA 18))]
442 + emit_insn (gen_mul<ALLHA:mode>3_call (operands[0], operands[1], operands[2]));
447 +(define_insn "*mul<ALLHA:mode>3_enh"
448 + [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 20)))
449 + (clobber (reg:ALLHA 22))]
451 + "%~call __mul<ALLHA:mode>3"
452 + [(set_attr "type" "xcall")
453 + (set_attr "cc" "clobber")])
455 +; Without multiplier, clobbers both inputs, and needs a separate output register
456 +(define_expand "mul<ALLHA:mode>3_call"
457 + [(set (reg:ALLHA 24) (match_operand:ALLHA 1 "register_operand" ""))
458 + (set (reg:ALLHA 22) (match_operand:ALLHA 2 "register_operand" ""))
459 + (parallel [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 24)))
460 + (clobber (reg:ALLHA 22))
461 + (clobber (reg:ALLHA 24))])
462 + (set (match_operand:ALLHA 0 "register_operand" "") (reg:ALLHA 18))]
466 +(define_insn "*mul<ALLHA:mode>3_call"
467 + [(set (reg:ALLHA 18) (mult:ALLHA (reg:ALLHA 22) (reg:ALLHA 24)))
468 + (clobber (reg:ALLHA 22))
469 + (clobber (reg:ALLHA 24))]
471 + "%~call __mul<ALLHA:mode>3"
472 + [(set_attr "type" "xcall")
473 + (set_attr "cc" "clobber")])
475 +;; On the enhanced core, don't clobber either input, and use a separate output,
476 +;; r2 is needed as a zero register since r1 is used for mul
477 +(define_expand "mul<ALLSA:mode>3"
478 + [(set (reg:ALLSA 16) (match_operand:ALLSA 1 "register_operand" ""))
479 + (set (reg:ALLSA 20) (match_operand:ALLSA 2 "register_operand" ""))
480 + (parallel [(set (reg:ALLSA 24) (mult:ALLSA (reg:ALLSA 16) (reg:ALLSA 20)))
481 + (clobber (reg:QI 15))])
482 + (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 24))]
488 + emit_insn (gen_mul<ALLSA:mode>3_call (operands[0], operands[1], operands[2]));
493 +(define_insn "*mul<ALLSA:mode>3_enh"
494 + [(set (reg:ALLSA 24) (mult:ALLSA (reg:ALLSA 16) (reg:ALLSA 20)))
495 + (clobber (reg:QI 15))]
497 + "%~call __mul<ALLSA:mode>3"
498 + [(set_attr "type" "xcall")
499 + (set_attr "cc" "clobber")])
501 +; Without multiplier, clobbers both inputs, needs a separate output, and also
502 +; needs two more scratch registers
503 +(define_expand "mul<ALLSA:mode>3_call"
504 + [(set (reg:ALLSA 18) (match_operand:ALLSA 1 "register_operand" ""))
505 + (set (reg:ALLSA 24) (match_operand:ALLSA 2 "register_operand" ""))
506 + (parallel [(set (reg:ALLSA 14) (mult:ALLSA (reg:ALLSA 18) (reg:ALLSA 24)))
507 + (clobber (reg:ALLSA 18))
508 + (clobber (reg:ALLSA 24))
509 + (clobber (reg:HI 22))])
510 + (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 14))]
514 +(define_insn "*mul<ALLSA:mode>3_call"
515 + [(set (reg:ALLSA 14) (mult:ALLSA (reg:ALLSA 18) (reg:ALLSA 24)))
516 + (clobber (reg:ALLSA 18))
517 + (clobber (reg:ALLSA 24))
518 + (clobber (reg:HI 22))]
520 + "%~call __mul<ALLSA:mode>3"
521 + [(set_attr "type" "xcall")
522 + (set_attr "cc" "clobber")])
524 +; / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
527 +(define_code_iterator usdiv [udiv div]) ; do signed and unsigned in one shot
529 +(define_expand "<usdiv:code><ALLQQ:mode>3"
530 + [(set (reg:ALLQQ 25) (match_operand:ALLQQ 1 "register_operand" ""))
531 + (set (reg:ALLQQ 22) (match_operand:ALLQQ 2 "register_operand" ""))
532 + (parallel [(set (reg:ALLQQ 24) (usdiv:ALLQQ (reg:ALLQQ 25) (reg:ALLQQ 22)))
533 + (clobber (reg:ALLQQ 25))
534 + (clobber (reg:QI 23))])
535 + (set (match_operand:ALLQQ 0 "register_operand" "") (reg:ALLQQ 24))]
539 +(define_insn "*<usdiv:code><ALLQQ:mode>3_call"
540 + [(set (reg:ALLQQ 24) (usdiv:ALLQQ (reg:ALLQQ 25) (reg:ALLQQ 22)))
541 + (clobber (reg:ALLQQ 25))
542 + (clobber (reg:QI 23))]
544 + "%~call __<usdiv:code><ALLQQ:mode>3"
545 + [(set_attr "type" "xcall")
546 + (set_attr "cc" "clobber")])
548 +(define_expand "<usdiv:code><ALLHQHA:mode>3"
549 + [(set (reg:ALLHQHA 26) (match_operand:ALLHQHA 1 "register_operand" ""))
550 + (set (reg:ALLHQHA 22) (match_operand:ALLHQHA 2 "register_operand" ""))
551 + (parallel [(set (reg:ALLHQHA 24) (usdiv:ALLHQHA (reg:ALLHQHA 26) (reg:ALLHQHA 22)))
552 + (clobber (reg:ALLHQHA 26))
553 + (clobber (reg:QI 21))])
554 + (set (match_operand:ALLHQHA 0 "register_operand" "") (reg:ALLHQHA 24))]
558 +(define_insn "*<usdiv:code><ALLHQHA:mode>3_call"
559 + [(set (reg:ALLHQHA 24) (usdiv:ALLHQHA (reg:ALLHQHA 26) (reg:ALLHQHA 22)))
560 + (clobber (reg:ALLHQHA 26))
561 + (clobber (reg:QI 21))]
563 + "%~call __<usdiv:code><ALLHQHA:mode>3"
564 + [(set_attr "type" "xcall")
565 + (set_attr "cc" "clobber")])
567 +; note the first parameter gets passed in already offset by 2 bytes
568 +(define_expand "<usdiv:code><ALLSA:mode>3"
569 + [(set (reg:ALLSA 24) (match_operand:ALLSA 1 "register_operand" ""))
570 + (set (reg:ALLSA 18) (match_operand:ALLSA 2 "register_operand" ""))
571 + (parallel [(set (reg:ALLSA 22) (usdiv:ALLSA (reg:ALLSA 24) (reg:ALLSA 18)))
572 + (clobber (reg:HI 26))
573 + (clobber (reg:HI 30))])
574 + (set (match_operand:ALLSA 0 "register_operand" "") (reg:ALLSA 22))]
578 +(define_insn "*<usdiv:code><ALLSA:mode>3_call"
579 + [(set (reg:ALLSA 22) (usdiv:ALLSA (reg:ALLSA 24) (reg:ALLSA 18)))
580 + (clobber (reg:HI 26))
581 + (clobber (reg:HI 30))]
583 + "%~call __<usdiv:code><ALLSA:mode>3"
584 + [(set_attr "type" "xcall")
585 + (set_attr "cc" "clobber")])
588 +;; abs must be defined for fixed types for correct operation
590 +;; abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x)
594 +(define_insn "abs<ALLQQ:mode>2"
595 + [(set (match_operand:ALLQQ 0 "register_operand" "=r")
596 + (abs:ALLQQ (match_operand:ALLQQ 1 "register_operand" "0")))]
600 + [(set_attr "length" "2")
601 + (set_attr "cc" "clobber")])
602 diff -Naurp gcc/config/avr/avr.md gcc/config/avr/avr.md
603 --- gcc/config/avr/avr.md 2011-10-27 16:45:17.000000000 +0530
604 +++ gcc/config/avr/avr.md 2011-10-27 16:55:55.000000000 +0530
606 (include "predicates.md")
607 (include "constraints.md")
609 +; fixed-point instructions.
610 +(include "avr-fixed.md")
611 +(define_mode_iterator ALLQ [(QI "") (QQ "") (UQQ "")])
612 +(define_mode_iterator ALLH [(HI "") (HQ "") (UHQ "") (HA "") (UHA "")])
613 +(define_mode_iterator ALLS [(SI "") (SA "") (USA "")])
614 +(define_mode_iterator ALLQS [(QI "") (QQ "") (UQQ "")
615 + (HI "") (HQ "") (UHQ "") (HA "") (UHA "")
616 + (SI "") (SA "") (USA "")])
618 ;; Condition code settings.
619 (define_attr "cc" "none,set_czn,set_zn,set_n,compare,clobber"
620 (const_string "none"))
621 @@ -179,28 +188,27 @@
626 -(define_insn "*pushqi"
627 - [(set (mem:QI (post_dec:HI (reg:HI REG_SP)))
628 - (match_operand:QI 0 "reg_or_0_operand" "r,L"))]
629 +(define_insn "*push<ALLQ:mode>"
630 + [(set (mem:ALLQ (post_dec:HI (reg:HI REG_SP)))
631 + (match_operand:ALLQ 0 "reg_or_0_operand" "r,L"))]
636 [(set_attr "length" "1,1")])
638 -(define_insn "*pushhi"
639 - [(set (mem:HI (post_dec:HI (reg:HI REG_SP)))
640 - (match_operand:HI 0 "reg_or_0_operand" "r,L"))]
641 +(define_insn "*push<ALLH:mode>"
642 + [(set (mem:ALLH (post_dec:HI (reg:HI REG_SP)))
643 + (match_operand:ALLH 0 "reg_or_0_operand" "r,L"))]
647 push __zero_reg__\;push __zero_reg__"
648 [(set_attr "length" "2,2")])
650 -(define_insn "*pushsi"
651 - [(set (mem:SI (post_dec:HI (reg:HI REG_SP)))
652 - (match_operand:SI 0 "reg_or_0_operand" "r,L"))]
653 +(define_insn "*push<ALLS:mode>"
654 + [(set (mem:ALLS (post_dec:HI (reg:HI REG_SP)))
655 + (match_operand:ALLS 0 "reg_or_0_operand" "r,L"))]
658 push %D0\;push %C0\;push %B0\;push %A0
659 @@ -226,21 +234,21 @@
660 ;; are call-saved registers, and most of LD_REGS are call-used registers,
661 ;; so this may still be a win for registers live across function calls.
663 -(define_expand "movqi"
664 - [(set (match_operand:QI 0 "nonimmediate_operand" "")
665 - (match_operand:QI 1 "general_operand" ""))]
666 +(define_expand "mov<ALLQ:mode>"
667 + [(set (match_operand:ALLQ 0 "nonimmediate_operand" "")
668 + (match_operand:ALLQ 1 "general_operand" ""))]
670 "/* One of the ops has to be in a register. */
671 - if (!register_operand(operand0, QImode)
672 - && ! (register_operand(operand1, QImode) || const0_rtx == operand1))
673 - operands[1] = copy_to_mode_reg(QImode, operand1);
674 + if (!register_operand(operand0, <ALLQ:MODE>mode)
675 + && ! (register_operand(operand1, <ALLQ:MODE>mode) || const0_rtx == operand1))
676 + operands[1] = copy_to_mode_reg(<ALLQ:MODE>mode, operand1);
679 -(define_insn "*movqi"
680 - [(set (match_operand:QI 0 "nonimmediate_operand" "=r,d,Qm,r,q,r,*r")
681 - (match_operand:QI 1 "general_operand" "rL,i,rL,Qm,r,q,i"))]
682 - "(register_operand (operands[0],QImode)
683 - || register_operand (operands[1], QImode) || const0_rtx == operands[1])"
684 +(define_insn "*mov<ALLQ:mode>"
685 + [(set (match_operand:ALLQ 0 "nonimmediate_operand" "=r,d,Qm,r,q,r,*r")
686 + (match_operand:ALLQ 1 "general_operand" "r,i,rL,Qm,r,q,i"))]
687 + "(register_operand (operands[0],<ALLQ:MODE>mode)
688 + || register_operand (operands[1], <ALLQ:MODE>mode) || const0_rtx == operands[1])"
689 "* return output_movqi (insn, operands, NULL);"
690 [(set_attr "length" "1,1,5,5,1,1,4")
691 (set_attr "cc" "none,none,clobber,clobber,none,none,clobber")])
692 @@ -272,17 +280,17 @@
693 ;;============================================================================
694 ;; move word (16 bit)
696 -(define_expand "movhi"
697 - [(set (match_operand:HI 0 "nonimmediate_operand" "")
698 - (match_operand:HI 1 "general_operand" ""))]
699 +(define_expand "mov<ALLH:mode>"
700 + [(set (match_operand:ALLH 0 "nonimmediate_operand" "")
701 + (match_operand:ALLH 1 "general_operand" ""))]
705 /* One of the ops has to be in a register. */
706 - if (!register_operand(operand0, HImode)
707 - && !(register_operand(operand1, HImode) || const0_rtx == operands[1]))
708 + if (!register_operand(operand0, <ALLH:MODE>mode)
709 + && !(register_operand(operand1, <ALLH:MODE>mode) || const0_rtx == operands[1]))
711 - operands[1] = copy_to_mode_reg(HImode, operand1);
712 + operands[1] = copy_to_mode_reg(<ALLH:MODE>mode, operand1);
716 @@ -337,20 +345,20 @@
717 [(set_attr "length" "4")
718 (set_attr "cc" "none")])
720 -(define_insn "*movhi"
721 - [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,d,*r,q,r")
722 - (match_operand:HI 1 "general_operand" "rL,m,rL,i,i,r,q"))]
723 - "(register_operand (operands[0],HImode)
724 - || register_operand (operands[1],HImode) || const0_rtx == operands[1])"
725 +(define_insn "*mov<ALLH:mode>"
726 + [(set (match_operand:ALLH 0 "nonimmediate_operand" "=r,r,m,d,*r,q,r")
727 + (match_operand:ALLH 1 "general_operand" "r,m,rL,i,i,r,q"))]
728 + "(register_operand (operands[0],<ALLH:MODE>mode)
729 + || register_operand (operands[1],<ALLH:MODE>mode) || const0_rtx == operands[1])"
730 "* return output_movhi (insn, operands, NULL);"
731 [(set_attr "length" "2,6,7,2,6,5,2")
732 (set_attr "cc" "none,clobber,clobber,none,clobber,none,none")])
734 (define_peephole2 ; movw
735 - [(set (match_operand:QI 0 "even_register_operand" "")
736 - (match_operand:QI 1 "even_register_operand" ""))
737 - (set (match_operand:QI 2 "odd_register_operand" "")
738 - (match_operand:QI 3 "odd_register_operand" ""))]
739 + [(set (match_operand:ALLQ 0 "even_register_operand" "")
740 + (match_operand:ALLQ 1 "even_register_operand" ""))
741 + (set (match_operand:ALLQ 2 "odd_register_operand" "")
742 + (match_operand:ALLQ 3 "odd_register_operand" ""))]
744 && REGNO (operands[0]) == REGNO (operands[2]) - 1
745 && REGNO (operands[1]) == REGNO (operands[3]) - 1)"
746 @@ -361,10 +369,10 @@
749 (define_peephole2 ; movw_r
750 - [(set (match_operand:QI 0 "odd_register_operand" "")
751 - (match_operand:QI 1 "odd_register_operand" ""))
752 - (set (match_operand:QI 2 "even_register_operand" "")
753 - (match_operand:QI 3 "even_register_operand" ""))]
754 + [(set (match_operand:ALLQ 0 "odd_register_operand" "")
755 + (match_operand:ALLQ 1 "odd_register_operand" ""))
756 + (set (match_operand:ALLQ 2 "even_register_operand" "")
757 + (match_operand:ALLQ 3 "even_register_operand" ""))]
759 && REGNO (operands[2]) == REGNO (operands[0]) - 1
760 && REGNO (operands[3]) == REGNO (operands[1]) - 1)"
761 @@ -377,26 +385,24 @@
762 ;;==========================================================================
763 ;; move double word (32 bit)
765 -(define_expand "movsi"
766 - [(set (match_operand:SI 0 "nonimmediate_operand" "")
767 - (match_operand:SI 1 "general_operand" ""))]
768 +(define_expand "mov<ALLS:mode>"
769 + [(set (match_operand:ALLS 0 "nonimmediate_operand" "")
770 + (match_operand:ALLS 1 "general_operand" ""))]
774 /* One of the ops has to be in a register. */
775 - if (!register_operand (operand0, SImode)
776 - && !(register_operand (operand1, SImode) || const0_rtx == operand1))
777 + if (!register_operand (operand0, <ALLS:MODE>mode)
778 + && !(register_operand (operand1, <ALLS:MODE>mode) || const0_rtx == operand1))
780 - operands[1] = copy_to_mode_reg (SImode, operand1);
781 + operands[1] = copy_to_mode_reg (<ALLS:MODE>mode, operand1);
787 (define_peephole2 ; movsi_lreg_const
788 [(match_scratch:QI 2 "d")
789 - (set (match_operand:SI 0 "l_register_operand" "")
790 - (match_operand:SI 1 "immediate_operand" ""))
791 + (set (match_operand:ALLS 0 "l_register_operand" "")
792 + (match_operand:ALLS 1 "immediate_operand" ""))
794 "(operands[1] != const0_rtx
795 && operands[1] != constm1_rtx)"
798 ;; '*' because it is not used in rtl generation.
799 (define_insn "*reload_insi"
800 - [(set (match_operand:SI 0 "register_operand" "=r")
801 - (match_operand:SI 1 "immediate_operand" "i"))
802 + [(set (match_operand:ALLS 0 "register_operand" "=r")
803 + (match_operand:ALLS 1 "immediate_operand" "i"))
804 (clobber (match_operand:QI 2 "register_operand" "=&d"))]
806 "* return output_reload_insisf (insn, operands, NULL);"
807 @@ -415,11 +421,11 @@
808 (set_attr "cc" "none")])
811 -(define_insn "*movsi"
812 - [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,Qm,!d,r")
813 - (match_operand:SI 1 "general_operand" "r,L,Qm,rL,i,i"))]
814 - "(register_operand (operands[0],SImode)
815 - || register_operand (operands[1],SImode) || const0_rtx == operands[1])"
816 +(define_insn "*mov<ALLS:mode>"
817 + [(set (match_operand:ALLS 0 "nonimmediate_operand" "=r,r,r,Qm,!d,r")
818 + (match_operand:ALLS 1 "general_operand" "r,L,Qm,rL,i,i"))]
819 + "(register_operand (operands[0],<ALLS:MODE>mode)
820 + || register_operand (operands[1],<ALLS:MODE>mode) || const0_rtx == operands[1])"
821 "* return output_movsisf (insn, operands, NULL);"
822 [(set_attr "length" "4,4,8,9,4,10")
823 (set_attr "cc" "none,set_zn,clobber,clobber,none,clobber")])
824 @@ -956,23 +962,54 @@
825 [(set_attr "type" "xcall")
826 (set_attr "cc" "clobber")])
828 -(define_insn "mulqihi3"
829 +;; Define code iterators
830 +(define_code_iterator any_extend [sign_extend zero_extend])
831 +(define_code_attr s [(sign_extend "s") (zero_extend "")])
832 +(define_code_attr u [(sign_extend "") (zero_extend "u")])
833 +(define_code_attr su [(sign_extend "s") (zero_extend "u")])
835 +(define_insn "<any_extend:su>mulqi3_highpart"
836 + [(set (match_operand:QI 0 "register_operand" "=r")
839 + (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "d"))
840 + (any_extend:HI (match_operand:QI 2 "register_operand" "d")))
842 + "AVR_HAVE_MUL && !optimize_size"
843 + "mul<any_extend:s> %1,%2
846 + [(set_attr "length" "3")
847 + (set_attr "cc" "clobber")])
849 +(define_insn "<any_extend:u>mulqihi3"
850 [(set (match_operand:HI 0 "register_operand" "=r")
851 - (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "d"))
852 - (sign_extend:HI (match_operand:QI 2 "register_operand" "d"))))]
853 + (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "d"))
854 + (any_extend:HI (match_operand:QI 2 "register_operand" "d"))))]
857 + "mul<any_extend:s> %1,%2
860 [(set_attr "length" "3")
861 (set_attr "cc" "clobber")])
863 -(define_insn "umulqihi3"
864 +(define_insn "*sumulqihi3"
865 [(set (match_operand:HI 0 "register_operand" "=r")
866 - (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))
867 - (zero_extend:HI (match_operand:QI 2 "register_operand" "r"))))]
868 + (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "a"))
869 + (zero_extend:HI (match_operand:QI 2 "register_operand" "a"))))]
875 + [(set_attr "length" "3")
876 + (set_attr "cc" "clobber")])
878 +(define_insn "*usmulqihi3"
879 + [(set (match_operand:HI 0 "register_operand" "=r")
880 + (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "a"))
881 + (sign_extend:HI (match_operand:QI 2 "register_operand" "a"))))]
886 [(set_attr "length" "3")
887 @@ -1026,6 +1063,50 @@
888 [(set_attr "type" "xcall")
889 (set_attr "cc" "clobber")])
891 +(define_expand "<any_extend:u>mulhisi3"
892 + [(set (reg:HI 18) (match_operand:SI 1 "register_operand" ""))
893 + (set (reg:HI 20) (match_operand:SI 2 "register_operand" ""))
895 + (mult:SI (any_extend:SI (reg:HI 18))
896 + (any_extend:SI (reg:HI 20))))
897 + (set (match_operand:SI 0 "register_operand" "") (reg:SI 22))]
901 +(define_insn "*<any_extend:u>mulhisi3_call"
903 + (mult:SI (any_extend:SI (reg:HI 18))
904 + (any_extend:SI (reg:HI 20))))]
906 + "%~call __<any_extend:u>mulhisi3"
907 + [(set_attr "type" "xcall")
908 + (set_attr "cc" "clobber")])
910 +(define_expand "<any_extend:su>mulhi3_highpart"
911 + [(set (reg:HI 18) (match_operand:HI 1 "register_operand" ""))
912 + (set (reg:HI 20) (match_operand:HI 2 "register_operand" ""))
913 + (set (reg:HI 24) (truncate:HI (lshiftrt:SI
914 + (mult:SI (any_extend:SI (reg:HI 18))
915 + (any_extend:SI (reg:HI 20)))
917 + (set (match_operand:SI 0 "register_operand" "") (reg:HI 24))]
921 +(define_insn_and_split "*<any_extend:su>mulhi3_highpart_call"
922 + [(set (reg:HI 24) (truncate:HI (lshiftrt:SI
923 + (mult:SI (any_extend:SI (reg:HI 18))
924 + (any_extend:SI (reg:HI 20)))
930 + (mult:SI (any_extend:SI (reg:HI 18))
931 + (any_extend:SI (reg:HI 20))))
932 + (clobber (reg:HI 22))]
935 ;; Operand 2 (reg:SI 18) not clobbered on the enhanced core.
936 ;; All call-used registers clobbered otherwise - normal library call.
937 (define_expand "mulsi3"
938 @@ -1574,9 +1655,9 @@
939 ;;<< << << << << << << << << << << << << << << << << << << << << << << << << <<
940 ;; arithmetic shift left
942 -(define_expand "ashlqi3"
943 - [(set (match_operand:QI 0 "register_operand" "")
944 - (ashift:QI (match_operand:QI 1 "register_operand" "")
945 +(define_expand "ashl<ALLQ:mode>3"
946 + [(set (match_operand:ALLQ 0 "register_operand" "")
947 + (ashift:ALLQ (match_operand:ALLQ 1 "register_operand" "")
948 (match_operand:QI 2 "general_operand" "")))]
951 @@ -1610,27 +1691,27 @@
952 (set (match_dup 0) (and:QI (match_dup 0) (const_int -64)))]
955 -(define_insn "*ashlqi3"
956 - [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,!d,r,r")
957 - (ashift:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0,0")
958 +(define_insn "*ashl<ALLQ:mode>3"
959 + [(set (match_operand:ALLQ 0 "register_operand" "=r,r,r,r,!d,r,r")
960 + (ashift:ALLQ (match_operand:ALLQ 1 "register_operand" "0,0,0,0,0,0,0")
961 (match_operand:QI 2 "general_operand" "r,L,P,K,n,n,Qm")))]
963 "* return ashlqi3_out (insn, operands, NULL);"
964 [(set_attr "length" "5,0,1,2,4,6,9")
965 (set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,set_czn,clobber")])
967 -(define_insn "ashlhi3"
968 - [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
969 - (ashift:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
970 +(define_insn "ashl<ALLH:mode>3"
971 + [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r,r,r")
972 + (ashift:ALLH (match_operand:ALLH 1 "register_operand" "0,0,0,r,0,0,0")
973 (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
975 "* return ashlhi3_out (insn, operands, NULL);"
976 [(set_attr "length" "6,0,2,2,4,10,10")
977 (set_attr "cc" "clobber,none,set_n,clobber,set_n,clobber,clobber")])
979 -(define_insn "ashlsi3"
980 - [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
981 - (ashift:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0,0,0")
982 +(define_insn "ashl<ALLS:mode>3"
983 + [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r,r,r,r")
984 + (ashift:ALLS (match_operand:ALLS 1 "register_operand" "0,0,0,r,0,0,0")
985 (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
987 "* return ashlsi3_out (insn, operands, NULL);"
988 @@ -1676,17 +1757,17 @@
991 [(match_scratch:QI 3 "d")
992 - (set (match_operand:HI 0 "register_operand" "")
993 - (ashift:HI (match_operand:HI 1 "register_operand" "")
994 + (set (match_operand:ALLH 0 "register_operand" "")
995 + (ashift:ALLH (match_operand:ALLH 1 "register_operand" "")
996 (match_operand:QI 2 "const_int_operand" "")))]
998 - [(parallel [(set (match_dup 0) (ashift:HI (match_dup 1) (match_dup 2)))
999 + [(parallel [(set (match_dup 0) (ashift:ALLH (match_dup 1) (match_dup 2)))
1000 (clobber (match_dup 3))])]
1003 -(define_insn "*ashlhi3_const"
1004 - [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
1005 - (ashift:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
1006 +(define_insn "*ashl<ALLH:mode>3_const"
1007 + [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r")
1008 + (ashift:ALLH (match_operand:ALLH 1 "register_operand" "0,0,r,0,0")
1009 (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
1010 (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
1012 @@ -1696,17 +1777,17 @@
1015 [(match_scratch:QI 3 "d")
1016 - (set (match_operand:SI 0 "register_operand" "")
1017 - (ashift:SI (match_operand:SI 1 "register_operand" "")
1018 + (set (match_operand:ALLS 0 "register_operand" "")
1019 + (ashift:ALLS (match_operand:ALLS 1 "register_operand" "")
1020 (match_operand:QI 2 "const_int_operand" "")))]
1022 - [(parallel [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
1023 + [(parallel [(set (match_dup 0) (ashift:ALLS (match_dup 1) (match_dup 2)))
1024 (clobber (match_dup 3))])]
1027 -(define_insn "*ashlsi3_const"
1028 - [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
1029 - (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
1030 +(define_insn "*ashl<ALLS:mode>3_const"
1031 + [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r")
1032 + (ashift:ALLS (match_operand:ALLS 1 "register_operand" "0,0,r,0")
1033 (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
1034 (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
1036 @@ -1717,27 +1798,27 @@
1037 ;; >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
1038 ;; arithmetic shift right
1040 -(define_insn "ashrqi3"
1041 - [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,r,r")
1042 - (ashiftrt:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0")
1043 +(define_insn "ashr<ALLQ:mode>3"
1044 + [(set (match_operand:ALLQ 0 "register_operand" "=r,r,r,r,r,r")
1045 + (ashiftrt:ALLQ (match_operand:ALLQ 1 "register_operand" "0,0,0,0,0,0")
1046 (match_operand:QI 2 "general_operand" "r,L,P,K,n,Qm")))]
1048 "* return ashrqi3_out (insn, operands, NULL);"
1049 [(set_attr "length" "5,0,1,2,5,9")
1050 (set_attr "cc" "clobber,none,clobber,clobber,clobber,clobber")])
1052 -(define_insn "ashrhi3"
1053 - [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
1054 - (ashiftrt:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
1055 +(define_insn "ashr<ALLH:mode>3"
1056 + [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r,r,r")
1057 + (ashiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,0,r,0,0,0")
1058 (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
1060 "* return ashrhi3_out (insn, operands, NULL);"
1061 [(set_attr "length" "6,0,2,4,4,10,10")
1062 (set_attr "cc" "clobber,none,clobber,set_n,clobber,clobber,clobber")])
1064 -(define_insn "ashrsi3"
1065 - [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
1066 - (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0,0,0")
1067 +(define_insn "ashr<ALLS:mode>3"
1068 + [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r,r,r,r")
1069 + (ashiftrt:ALLS (match_operand:ALLS 1 "register_operand" "0,0,0,r,0,0,0")
1070 (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
1072 "* return ashrsi3_out (insn, operands, NULL);"
1073 @@ -1748,17 +1829,17 @@
1076 [(match_scratch:QI 3 "d")
1077 - (set (match_operand:HI 0 "register_operand" "")
1078 - (ashiftrt:HI (match_operand:HI 1 "register_operand" "")
1079 + (set (match_operand:ALLH 0 "register_operand" "")
1080 + (ashiftrt:ALLH (match_operand:ALLH 1 "register_operand" "")
1081 (match_operand:QI 2 "const_int_operand" "")))]
1083 - [(parallel [(set (match_dup 0) (ashiftrt:HI (match_dup 1) (match_dup 2)))
1084 + [(parallel [(set (match_dup 0) (ashiftrt:ALLH (match_dup 1) (match_dup 2)))
1085 (clobber (match_dup 3))])]
1088 (define_insn "*ashrhi3_const"
1089 - [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
1090 - (ashiftrt:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
1091 + [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r")
1092 + (ashiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,r,0,0")
1093 (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
1094 (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
1096 @@ -1768,17 +1849,17 @@
1099 [(match_scratch:QI 3 "d")
1100 - (set (match_operand:SI 0 "register_operand" "")
1101 - (ashiftrt:SI (match_operand:SI 1 "register_operand" "")
1102 + (set (match_operand:ALLS 0 "register_operand" "")
1103 + (ashiftrt:ALLS (match_operand:ALLS 1 "register_operand" "")
1104 (match_operand:QI 2 "const_int_operand" "")))]
1106 - [(parallel [(set (match_dup 0) (ashiftrt:SI (match_dup 1) (match_dup 2)))
1107 + [(parallel [(set (match_dup 0) (ashiftrt:ALLS (match_dup 1) (match_dup 2)))
1108 (clobber (match_dup 3))])]
1111 (define_insn "*ashrsi3_const"
1112 - [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
1113 - (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
1114 + [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r")
1115 + (ashiftrt:ALLS (match_operand:ALLS 1 "register_operand" "0,0,r,0")
1116 (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
1117 (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
1119 @@ -1789,54 +1870,54 @@
1120 ;; >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
1121 ;; logical shift right
1123 -(define_expand "lshrqi3"
1124 - [(set (match_operand:QI 0 "register_operand" "")
1125 - (lshiftrt:QI (match_operand:QI 1 "register_operand" "")
1126 - (match_operand:QI 2 "general_operand" "")))]
1127 +(define_expand "lshr<ALLQ:mode>3"
1128 + [(set (match_operand:ALLQ 0 "register_operand" "")
1129 + (lshiftrt:ALLQ (match_operand:ALLQ 1 "register_operand" "")
1130 + (match_operand:ALLQ 2 "general_operand" "")))]
1134 (define_split ; lshrqi3_const4
1135 - [(set (match_operand:QI 0 "d_register_operand" "")
1136 - (lshiftrt:QI (match_dup 0)
1137 + [(set (match_operand:ALLQ 0 "d_register_operand" "")
1138 + (lshiftrt:ALLQ (match_dup 0)
1141 - [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
1142 - (set (match_dup 0) (and:QI (match_dup 0) (const_int 15)))]
1143 + [(set (match_dup 0) (rotate:ALLQ (match_dup 0) (const_int 4)))
1144 + (set (match_dup 0) (and:ALLQ (match_dup 0) (const_int 15)))]
1147 (define_split ; lshrqi3_const5
1148 - [(set (match_operand:QI 0 "d_register_operand" "")
1149 - (lshiftrt:QI (match_dup 0)
1150 + [(set (match_operand:ALLQ 0 "d_register_operand" "")
1151 + (lshiftrt:ALLQ (match_dup 0)
1154 - [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
1155 - (set (match_dup 0) (lshiftrt:QI (match_dup 0) (const_int 1)))
1156 - (set (match_dup 0) (and:QI (match_dup 0) (const_int 7)))]
1157 + [(set (match_dup 0) (rotate:ALLQ (match_dup 0) (const_int 4)))
1158 + (set (match_dup 0) (lshiftrt:ALLQ (match_dup 0) (const_int 1)))
1159 + (set (match_dup 0) (and:ALLQ (match_dup 0) (const_int 7)))]
1162 (define_split ; lshrqi3_const6
1163 - [(set (match_operand:QI 0 "d_register_operand" "")
1164 - (lshiftrt:QI (match_dup 0)
1165 + [(set (match_operand:ALLQ 0 "d_register_operand" "")
1166 + (lshiftrt:ALLQ (match_dup 0)
1169 - [(set (match_dup 0) (rotate:QI (match_dup 0) (const_int 4)))
1170 - (set (match_dup 0) (lshiftrt:QI (match_dup 0) (const_int 2)))
1171 - (set (match_dup 0) (and:QI (match_dup 0) (const_int 3)))]
1172 + [(set (match_dup 0) (rotate:ALLQ (match_dup 0) (const_int 4)))
1173 + (set (match_dup 0) (lshiftrt:ALLQ (match_dup 0) (const_int 2)))
1174 + (set (match_dup 0) (and:ALLQ (match_dup 0) (const_int 3)))]
1177 (define_insn "*lshrqi3"
1178 - [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,!d,r,r")
1179 - (lshiftrt:QI (match_operand:QI 1 "register_operand" "0,0,0,0,0,0,0")
1180 - (match_operand:QI 2 "general_operand" "r,L,P,K,n,n,Qm")))]
1181 + [(set (match_operand:ALLQ 0 "register_operand" "=r,r,r,r,!d,r,r")
1182 + (lshiftrt:ALLQ (match_operand:ALLQ 1 "register_operand" "0,0,0,0,0,0,0")
1183 + (match_operand:ALLQ 2 "general_operand" "r,L,P,K,n,n,Qm")))]
1185 "* return lshrqi3_out (insn, operands, NULL);"
1186 [(set_attr "length" "5,0,1,2,4,6,9")
1187 (set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,set_czn,clobber")])
1189 -(define_insn "lshrhi3"
1190 - [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r,r")
1191 - (lshiftrt:HI (match_operand:HI 1 "register_operand" "0,0,0,r,0,0,0")
1192 +(define_insn "lshr<ALLH:mode>3"
1193 + [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r,r,r")
1194 + (lshiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,0,r,0,0,0")
1195 (match_operand:QI 2 "general_operand" "r,L,P,O,K,n,Qm")))]
1197 "* return lshrhi3_out (insn, operands, NULL);"
1198 @@ -1891,17 +1972,17 @@
1201 [(match_scratch:QI 3 "d")
1202 - (set (match_operand:HI 0 "register_operand" "")
1203 - (lshiftrt:HI (match_operand:HI 1 "register_operand" "")
1204 + (set (match_operand:ALLH 0 "register_operand" "")
1205 + (lshiftrt:ALLH (match_operand:ALLH 1 "register_operand" "")
1206 (match_operand:QI 2 "const_int_operand" "")))]
1208 - [(parallel [(set (match_dup 0) (lshiftrt:HI (match_dup 1) (match_dup 2)))
1209 + [(parallel [(set (match_dup 0) (lshiftrt:ALLH (match_dup 1) (match_dup 2)))
1210 (clobber (match_dup 3))])]
1213 -(define_insn "*lshrhi3_const"
1214 - [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r")
1215 - (lshiftrt:HI (match_operand:HI 1 "register_operand" "0,0,r,0,0")
1216 +(define_insn "*lshr<ALLH:mode>3_const"
1217 + [(set (match_operand:ALLH 0 "register_operand" "=r,r,r,r,r")
1218 + (lshiftrt:ALLH (match_operand:ALLH 1 "register_operand" "0,0,r,0,0")
1219 (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
1220 (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
1222 @@ -1919,9 +2000,9 @@
1223 (clobber (match_dup 3))])]
1226 -(define_insn "*lshrsi3_const"
1227 - [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
1228 - (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r,0")
1229 +(define_insn "*lshr<ALLS:mode>3_const"
1230 + [(set (match_operand:ALLS 0 "register_operand" "=r,r,r,r")
1231 + (lshiftrt:ALLS (match_operand:ALLS 1 "register_operand" "0,0,r,0")
1232 (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
1233 (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
1235 @@ -2171,27 +2252,27 @@
1238 ; Optimize negated tests into reverse compare if overflow is undefined.
1239 -(define_insn "*negated_tstqi"
1240 +(define_insn "*negated_tst<ALLQ:mode>"
1242 - (compare (neg:QI (match_operand:QI 0 "register_operand" "r"))
1243 + (compare (neg:ALLQ (match_operand:ALLQ 0 "register_operand" "r"))
1245 "(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
1246 "cp __zero_reg__,%0"
1247 [(set_attr "cc" "compare")
1248 (set_attr "length" "1")])
1250 -(define_insn "*reversed_tstqi"
1251 +(define_insn "*reversed_tst<ALLQ:mode>"
1253 (compare (const_int 0)
1254 - (match_operand:QI 0 "register_operand" "r")))]
1255 + (match_operand:ALLQ 0 "register_operand" "r")))]
1257 "cp __zero_reg__,%0"
1258 [(set_attr "cc" "compare")
1259 (set_attr "length" "2")])
1261 -(define_insn "*negated_tsthi"
1262 +(define_insn "*negated_tst<ALLH:mode>"
1264 - (compare (neg:HI (match_operand:HI 0 "register_operand" "r"))
1265 + (compare (neg:ALLH (match_operand:ALLH 0 "register_operand" "r"))
1267 "(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
1268 "cp __zero_reg__,%A0
1269 @@ -2201,10 +2282,10 @@
1271 ;; Leave here the clobber used by the cmphi pattern for simplicity, even
1272 ;; though it is unused, because this pattern is synthesized by avr_reorg.
1273 -(define_insn "*reversed_tsthi"
1274 +(define_insn "*reversed_tst<ALLH:mode>"
1276 (compare (const_int 0)
1277 - (match_operand:HI 0 "register_operand" "r")))
1278 + (match_operand:ALLH 0 "register_operand" "r")))
1279 (clobber (match_scratch:QI 1 "=X"))]
1281 "cp __zero_reg__,%A0
1282 @@ -2212,9 +2293,9 @@
1283 [(set_attr "cc" "compare")
1284 (set_attr "length" "2")])
1286 -(define_insn "*negated_tstsi"
1287 +(define_insn "*negated_tst<ALLS:mode>"
1289 - (compare (neg:SI (match_operand:SI 0 "register_operand" "r"))
1290 + (compare (neg:ALLS (match_operand:ALLS 0 "register_operand" "r"))
1292 "(!flag_wrapv && !flag_trapv && flag_strict_overflow)"
1293 "cp __zero_reg__,%A0
1294 @@ -2224,10 +2305,10 @@
1295 [(set_attr "cc" "compare")
1296 (set_attr "length" "4")])
1298 -(define_insn "*reversed_tstsi"
1299 +(define_insn "*reversed_tst<ALLS:mode>"
1301 (compare (const_int 0)
1302 - (match_operand:SI 0 "register_operand" "r")))
1303 + (match_operand:ALLS 0 "register_operand" "r")))
1304 (clobber (match_scratch:QI 1 "=X"))]
1306 "cp __zero_reg__,%A0
1307 @@ -2238,10 +2319,10 @@
1308 (set_attr "length" "4")])
1311 -(define_insn "*cmpqi"
1312 +(define_insn "*cmp<ALLQ:mode>"
1314 - (compare (match_operand:QI 0 "register_operand" "r,r,d")
1315 - (match_operand:QI 1 "nonmemory_operand" "L,r,i")))]
1316 + (compare (match_operand:ALLQ 0 "register_operand" "r,r,d")
1317 + (match_operand:ALLQ 1 "nonmemory_operand" "L,r,i")))]
1321 @@ -2260,10 +2341,10 @@
1322 [(set_attr "cc" "compare")
1323 (set_attr "length" "1")])
1325 -(define_insn "*cmphi"
1326 +(define_insn "*cmp<ALLH:mode>"
1328 - (compare (match_operand:HI 0 "register_operand" "!w,r,r,d,d,r,r")
1329 - (match_operand:HI 1 "nonmemory_operand" "L,L,r,M,i,M,i")))
1330 + (compare (match_operand:ALLH 0 "register_operand" "!w,r,r,d,d,r,r")
1331 + (match_operand:ALLH 1 "nonmemory_operand" "L,L,r,M,i,M,i")))
1332 (clobber (match_scratch:QI 2 "=X,X,X,X,&d,&d,&d"))]
1335 @@ -2308,10 +2389,10 @@
1336 (set_attr "length" "1,2,2,2,3,3,4")])
1339 -(define_insn "*cmpsi"
1340 +(define_insn "*cmp<ALLS:mode>"
1342 - (compare (match_operand:SI 0 "register_operand" "r,r,d,d,r,r")
1343 - (match_operand:SI 1 "nonmemory_operand" "L,r,M,i,M,i")))
1344 + (compare (match_operand:ALLS 0 "register_operand" "r,r,d,d,r,r")
1345 + (match_operand:ALLS 1 "nonmemory_operand" "L,r,M,i,M,i")))
1346 (clobber (match_scratch:QI 2 "=X,X,X,&d,&d,&d"))]
1349 diff -Naurp gcc/config/avr/avr-modes.def gcc/config/avr/avr-modes.def
1350 --- gcc/config/avr/avr-modes.def 1970-01-01 05:30:00.000000000 +0530
1351 +++ gcc/config/avr/avr-modes.def 2011-10-27 16:55:55.000000000 +0530
1353 +/* Definitions of target machine for GCC for AVR.
1354 + Copyright (C) 2009 Free Software Foundation, Inc.
1356 +This file is part of GCC.
1358 +GCC is free software; you can redistribute it and/or modify
1359 +it under the terms of the GNU General Public License as published by
1360 +the Free Software Foundation; either version 3, or (at your option)
1363 +GCC is distributed in the hope that it will be useful,
1364 +but WITHOUT ANY WARRANTY; without even the implied warranty of
1365 +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1366 +GNU General Public License for more details.
1368 +You should have received a copy of the GNU General Public License
1369 +along with GCC; see the file COPYING3. If not see
1370 +<http://www.gnu.org/licenses/>. */
1372 +/* On 8 bit machines it requires fewer instructions for fixed point
1373 + routines if the decimal place is on a byte boundary which is not
1374 + the default for signed accum types. */
1376 +ADJUST_IBIT (HA, 7);
1377 +ADJUST_FBIT (HA, 8);
1379 +ADJUST_IBIT (SA, 15);
1380 +ADJUST_FBIT (SA, 16);
1382 +ADJUST_IBIT (DA, 31);
1383 +ADJUST_FBIT (DA, 32);
1385 +ADJUST_IBIT (TA, 63);
1386 +ADJUST_FBIT (TA, 64);
1387 diff -Naurp gcc/config/avr/avr-protos.h gcc/config/avr/avr-protos.h
1388 --- gcc/config/avr/avr-protos.h 2011-10-27 16:45:17.000000000 +0530
1389 +++ gcc/config/avr/avr-protos.h 2011-10-27 16:55:55.000000000 +0530
1390 @@ -75,6 +75,8 @@ extern const char *lshrhi3_out (rtx insn
1391 extern const char *lshrsi3_out (rtx insn, rtx operands[], int *len);
1392 extern bool avr_rotate_bytes (rtx operands[]);
1394 +extern const char *fract_out (rtx insn, rtx operands[], int intsigned, int *l);
1396 extern void expand_prologue (void);
1397 extern void expand_epilogue (void);
1398 extern int avr_epilogue_uses (int regno);
1399 diff -Naurp gcc/config/avr/libgcc-fixed.S gcc/config/avr/libgcc-fixed.S
1400 --- gcc/config/avr/libgcc-fixed.S 1970-01-01 05:30:00.000000000 +0530
1401 +++ gcc/config/avr/libgcc-fixed.S 2011-10-27 16:55:55.000000000 +0530
1403 +/* -*- Mode: Asm -*- */
1404 +/* Copyright (C) 2009
1405 + Free Software Foundation, Inc.
1406 + Contributed by Sean D'Epagnier
1408 +This file is free software; you can redistribute it and/or modify it
1409 +under the terms of the GNU General Public License as published by the
1410 +Free Software Foundation; either version 3, or (at your option) any
1413 +In addition to the permissions in the GNU General Public License, the
1414 +Free Software Foundation gives you unlimited permission to link the
1415 +compiled version of this file into combinations with other programs,
1416 +and to distribute those combinations without any restriction coming
1417 +from the use of this file. (The General Public License restrictions
1418 +do apply in other respects; for example, they cover modification of
1419 +the file, and distribution when not linked into a combine
1422 +This file is distributed in the hope that it will be useful, but
1423 +WITHOUT ANY WARRANTY; without even the implied warranty of
1424 +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1425 +General Public License for more details.
1427 +You should have received a copy of the GNU General Public License
1428 +along with this program; see the file COPYING. If not, write to
1429 +the Free Software Foundation, 51 Franklin Street, Fifth Floor,
1430 +Boston, MA 02110-1301, USA. */
1432 +/* Fixed point library routines for avr. */
1434 +#define __zero_reg__ r1
1435 +#define __tmp_reg__ r0
1436 +#define __SREG__ 0x3f
1437 +#define __SP_H__ 0x3e
1438 +#define __SP_L__ 0x3d
1439 +#define __RAMPZ__ 0x3B
1441 +/* Conversions to float. */
1442 +#if defined (L_fractqqsf)
1443 + .global __fractqqsf
1447 + sbrc r24, 7 ; if negative
1448 + ser r25 ; sign extend
1449 + mov r23, r24 ; move in place
1450 + mov r24, r25 ; sign extend lower byte
1453 + rjmp __fractsasf ; call larger conversion
1455 +#endif /* defined (L_fractqqsf) */
1457 +#if defined (L_fractuqqsf)
1458 + .global __fractuqqsf
1459 + .func __fractuqqsf
1465 + rjmp __fractsasf ; call larger conversion
1467 +#endif /* defined (L_fractuqqsf) */
1469 +#if defined (L_fracthqsf)
1470 + .global __fracthqsf
1473 + mov_l r22, r24 ; put fractional part in place
1476 + sbrc r23, 7 ; if negative
1477 + ser r25 ; sign extend
1478 + mov r24, r25 ; sign extend lower byte
1481 + rjmp __fractsasf ; call larger conversion
1483 +#endif /* defined (L_fracthqsf) */
1485 +#if defined (L_fractuhqsf)
1486 + .global __fractuhqsf
1487 + .func __fractuhqsf
1489 + mov_l r22, r24 ; put fractional part in place
1493 + rjmp __fractsasf ; call larger conversion
1495 +#endif /* defined (L_fractuhqsf) */
1497 +#if defined (L_fracthasf)
1498 + .global __fracthasf
1502 + mov r23, r24 ; move into place
1505 + sbrc r24, 7 ; if negative
1506 + ser r25 ; sign extend
1507 + rjmp __fractsasf ; call larger conversion
1508 +#endif /* defined (L_fracthasf) */
1510 +#if defined (L_fractuhasf)
1511 + .global __fractuhasf
1512 + .func __fractuhasf
1515 + mov r23, r24 ; move into place
1516 + rjmp __fractsasf ; call larger conversion
1518 +#endif /* defined (L_fractuhasf) */
1520 +#if defined (L_fractsasf)
1521 + .global __fractsasf
1526 + breq __fractsasf_exit ; skip if zero
1527 + subi r25, 0x08 ; adjust exponent
1531 +#endif /* defined (L_fractsasf) */
1533 +#if defined (L_fractusasf)
1534 + .global __fractusasf
1535 + .func __fractusasf
1537 + rcall __floatunsisf
1539 + breq __fractusasf_exit ; skip if zero
1540 + subi r25, 0x08 ; adjust exponent
1544 +#endif /* defined (L_fractusasf) */
1546 +#if defined (L_fractsfqq) /* Conversions from float. */
1547 + .global __fractsfqq
1550 + subi r25, -11 ; adjust exponent
1554 +#endif /* defined (L_fractqq) */
1556 +#if defined (L_fractsfuqq)
1557 + .global __fractsfuqq
1558 + .func __fractsfuqq
1560 + subi r25, -12 ; adjust exponent
1563 +#endif /* defined (L_fractuqq) */
1565 +#if defined (L_fractsfhq)
1566 + .global __fractsfhq
1569 + subi r25, -15 ; adjust exponent
1573 +#endif /* defined (L_fractsfhq) */
1575 +#if defined (L_fractsfuhq)
1576 + .global __fractsfuhq
1577 + .func __fractsfuhq
1579 + subi r25, -16 ; adjust exponent
1582 +#endif /* defined (L_fractsfuhq) */
1584 +#if defined (L_fractsfha)
1585 + .global __fractsfha
1589 + .global __fractsfuha
1590 + .func __fractsfuha
1592 + subi r25, -12 ; adjust exponent
1595 +#endif /* defined (L_fractsfha) */
1597 +#if defined (L_fractsfsa)
1598 + .global __fractsfsa
1602 + .global __fractsfusa
1603 + .func __fractsfusa
1605 + subi r25, -8 ; adjust exponent
1608 +#endif /* defined (L_fractsfsa) */
1610 +/* For multiplication the functions here are called directly from
1611 + avr-fixed.md patterns, instead of using the standard libcall mechanisms.
1612 + This can make better code because GCC knows exactly which
1613 + of the call-used registers (not all of them) are clobbered. */
1615 +/* mulqq and muluqq open coded on the enhanced core */
1616 +#if !defined (__AVR_HAVE_MUL__)
1617 +/*******************************************************
1618 + Fractional Multiplication 8 x 8
1619 +*******************************************************/
1620 +#define r_arg2 r22 /* multiplicand */
1621 +#define r_arg1 r24 /* multiplier */
1622 +#define r_res __tmp_reg__ /* result */
1624 +#if defined (L_mulqq3)
1633 + brcc __mulqq3_skipneg
1638 + brtc __mulqq3_exit
1644 +#endif /* defined (L_mulqq3) */
1646 +#if defined (L_muluqq3)
1650 + clr r_res ; clear result
1652 + lsr r_arg2 ; shift multiplicand
1655 + breq __muluqq3_exit ; while multiplicand != 0
1657 + brne __muluqq3_loop ; exit if multiplier = 0
1659 + mov r_arg1,r_res ; result to return register
1666 +#endif /* defined (L_muluqq3) */
1667 +#endif /* !defined (__AVR_HAVE_MUL__) */
1669 +/*******************************************************
1670 + Fractional Multiplication 16 x 16
1671 +*******************************************************/
1673 +#if defined (__AVR_HAVE_MUL__)
1674 +#define r_arg1L r22 /* multiplier Low */
1675 +#define r_arg1H r23 /* multiplier High */
1676 +#define r_arg2L r20 /* multiplicand Low */
1677 +#define r_arg2H r21 /* multiplicand High */
1678 +#define r_resL r18 /* result Low */
1679 +#define r_resH r19 /* result High */
1681 +#if defined (L_mulhq3)
1685 + fmuls r_arg1H, r_arg2H
1687 + fmulsu r_arg2H, r_arg1L
1689 + sbc r_resH, r_arg1L
1691 + adc r_resH, r_arg1L
1692 + fmulsu r_arg1H, r_arg2L
1693 + sbc r_resH, r_arg1L
1695 + adc r_resH, r_arg1L
1699 +#endif /* defined (L_mulhq3) */
1701 +#if defined (L_muluhq3)
1705 + mul r_arg1H, r_arg2H
1707 + mul r_arg1H, r_arg2L
1710 + adc r_resH, __zero_reg__
1711 + mul r_arg1L, r_arg2H
1714 + adc r_resH, __zero_reg__
1717 +#endif /* defined (L_muluhq3) */
1720 +#define r_arg1L r24 /* multiplier Low */
1721 +#define r_arg1H r25 /* multiplier High */
1722 +#define r_arg2L r22 /* multiplicand Low */
1723 +#define r_arg2H r23 /* multiplicand High */
1724 +#define r_resL __tmp_reg__ /* result Low */
1725 +#define r_resH __zero_reg__ /* result High */
1727 +#if defined (L_mulhq3)
1731 + mov r_resL, r_arg1H
1732 + eor r_resL, r_arg2H
1738 + brcc mulhq3_skipneg
1753 +#endif /* defined (L_mulhq3) */
1755 +#if defined (L_muluhq3)
1759 + clr r_resL ; clear result
1761 + lsr r_arg2H ; shift multiplicand
1764 + rjmp __muluhq3_skip
1765 + add r_resL,r_arg2L ; result + multiplicand
1766 + adc r_resH,r_arg2H
1768 + lsl r_arg1L ; shift multiplier
1770 + brne __muluhq3_loop
1772 + brne __muluhq3_loop ; exit multiplier = 0
1773 + mov_l r_arg1L,r_resL
1774 + mov_h r_arg1H,r_resH ; result to return register
1775 + clr __zero_reg__ ; zero the zero reg
1778 +#endif /* defined (L_muluhq3) */
1780 +#endif /* defined (__AVR_HAVE_MUL__) */
1789 +/*******************************************************
1790 + Fixed Multiplication 8.8 x 8.8
1791 +*******************************************************/
1793 +#if defined (__AVR_HAVE_MUL__)
1794 +#define r_arg1L r22 /* multiplier Low */
1795 +#define r_arg1H r23 /* multiplier High */
1796 +#define r_arg2L r20 /* multiplicand Low */
1797 +#define r_arg2H r21 /* multiplicand High */
1798 +#define r_resL r18 /* result Low */
1799 +#define r_resH r19 /* result High */
1801 +#if defined (L_mulha3)
1805 + mul r_arg1L, r_arg2L
1807 + muls r_arg1H, r_arg2H
1809 + mulsu r_arg1H, r_arg2L
1812 + mulsu r_arg2H, r_arg1L
1818 +#endif /* defined (L_mulha3) */
1820 +#if defined (L_muluha3)
1824 + mul r_arg1L, r_arg2L
1826 + mul r_arg1H, r_arg2H
1828 + mul r_arg1H, r_arg2L
1831 + mul r_arg1L, r_arg2H
1837 +#endif /* defined (L_muluha3) */
1841 +#define r_arg1L r24 /* multiplier Low */
1842 +#define r_arg1H r25 /* multiplier High */
1843 +#define r_arg2L r22 /* multiplicand Low */
1844 +#define r_arg2H r23 /* multiplicand High */
1845 +#define r_resL r18 /* result Low */
1846 +#define r_resH r19 /* result High */
1847 +#define r_scratchL r0 /* scratch Low */
1848 +#define r_scratchH r1
1850 +#if defined (L_mulha3)
1854 + mov r_resL, r_arg1H
1855 + eor r_resL, r_arg2H
1858 + rjmp __mulha3_arg1pos
1864 + rjmp __mulha3_arg2pos
1870 + brtc __mulha3_exit
1877 +#endif /* defined (L_mulha3) */
1879 +#if defined (L_muluha3)
1883 + clr r_resL ; clear result
1885 + mov_l r0, r_arg1L ; save multiplicand
1889 + rjmp __muluha3_skip1
1890 + add r_resL,r_arg1L ; result + multiplicand
1891 + adc r_resH,r_arg1H
1893 + lsl r_arg1L ; shift multiplicand
1896 + breq __muluha3_loop1_done ; exit multiplicand = 0
1898 + brne __muluha3_loop1 ; exit multiplier = 0
1899 +__muluha3_loop1_done:
1900 + mov_l r_arg1L, r_scratchL ; restore multiplicand
1901 + mov_h r_arg1H, r_scratchH
1903 + lsr r_arg1H ; shift multiplicand
1906 + breq __muluha3_exit ; exit if multiplicand = 0
1908 + rjmp __muluha3_skip2
1909 + add r_resL,r_arg1L ; result + multiplicand
1910 + adc r_resH,r_arg1H
1913 + brne __muluha3_loop2 ; exit if multiplier = 0
1915 + clr __zero_reg__ ; got clobbered
1918 +#endif /* defined (L_muluha3) */
1920 +#endif /* defined (__AVR_HAVE_MUL__) */
1929 +/*******************************************************
1930 + Fixed Multiplication 16.16 x 16.16
1931 +*******************************************************/
1933 +#if defined (__AVR_HAVE_MUL__)
1934 +/* uses nonstandard registers because mulus only works from 16-23 */
1937 +#define r_arg1L r16 /* multiplier Low */
1938 +#define r_arg1H r17
1939 +#define r_arg1HL r18
1940 +#define r_arg1HH r19 /* multiplier High */
1942 +#define r_arg2L r20 /* multiplicand Low */
1943 +#define r_arg2H r21
1944 +#define r_arg2HL r22
1945 +#define r_arg2HH r23 /* multiplicand High */
1947 +#define r_resL r24 /* result Low */
1949 +#define r_resHL r26
1950 +#define r_resHH r27 /* result High */
1952 +#if defined (L_mulsa3)
1960 + mul r_arg1H, r_arg2L
1962 + mul r_arg1L, r_arg2H
1965 + mul r_arg1L, r_arg2HL
1968 + adc r_resHL, r_clr
1969 + mul r_arg1H, r_arg2H
1972 + adc r_resHL, r_clr
1973 + mul r_arg1HL, r_arg2L
1976 + adc r_resHL, r_clr
1977 + mulsu r_arg2HH, r_arg1L
1978 + sbc r_resHH, r_clr
1981 + adc r_resHH, r_clr
1982 + mul r_arg1H, r_arg2HL
1985 + adc r_resHH, r_clr
1986 + mul r_arg1HL, r_arg2H
1989 + adc r_resHH, r_clr
1990 + mulsu r_arg1HH, r_arg2L
1991 + sbc r_resHH, r_clr
1994 + adc r_resHH, r_clr
1995 + mulsu r_arg2HH, r_arg1H
1998 + mul r_arg1HL, r_arg2HL
2001 + mulsu r_arg1HH, r_arg2H
2004 + mulsu r_arg2HH, r_arg1HL
2006 + mulsu r_arg1HH, r_arg2HL
2013 +#if defined (L_mulusa3)
2021 + mul r_arg1H, r_arg2L
2023 + mul r_arg1L, r_arg2H
2026 + mul r_arg1L, r_arg2HL
2029 + adc r_resHL, r_clr
2030 + mul r_arg1H, r_arg2H
2033 + adc r_resHL, r_clr
2034 + mul r_arg1HL, r_arg2L
2037 + adc r_resHL, r_clr
2038 + mul r_arg1L, r_arg2HH
2041 + adc r_resHH, r_clr
2042 + mul r_arg1H, r_arg2HL
2045 + adc r_resHH, r_clr
2046 + mul r_arg1HL, r_arg2H
2049 + adc r_resHH, r_clr
2050 + mul r_arg1HH, r_arg2L
2053 + adc r_resHH, r_clr
2054 + mul r_arg1H, r_arg2HH
2057 + mul r_arg1HL, r_arg2HL
2060 + mul r_arg1HH, r_arg2H
2063 + mul r_arg1HL, r_arg2HH
2065 + mul r_arg1HH, r_arg2HL
2074 +#define r_arg1L r18 /* multiplier Low */
2075 +#define r_arg1H r19
2076 +#define r_arg1HL r20
2077 +#define r_arg1HH r21 /* multiplier High */
2079 +/* these registers needed for sbiw */
2080 +#define r_arg2L r24 /* multiplicand Low */
2081 +#define r_arg2H r25
2082 +#define r_arg2HL r26
2083 +#define r_arg2HH r27 /* multiplicand High */
2085 +#define r_resL r14 /* result Low */
2087 +#define r_resHL r16
2088 +#define r_resHH r17 /* result High */
2090 +#define r_scratchL r0 /* scratch Low */
2091 +#define r_scratchH r1
2092 +#define r_scratchHL r22
2093 +#define r_scratchHH r23 /* scratch High */
2095 +#if defined (L_mulsa3)
2099 + mov r_resL, r_arg1HH
2100 + eor r_resL, r_arg2HH
2103 + rjmp __mulsa3_arg1pos
2113 + rjmp __mulsa3_arg2pos
2123 + brtc __mulsa3_exit
2128 + adc r_resL,__zero_reg__
2129 + adc r_resH,__zero_reg__
2130 + adc r_resHL,__zero_reg__
2131 + adc r_resHH,__zero_reg__
2135 +#endif /* defined (L_mulsa3) */
2137 +#if defined (L_mulusa3)
2141 + clr r_resL ; clear result
2143 + mov_l r_resHL, r_resL
2144 + mov_h r_resHH, r_resH
2145 + mov_l r_scratchL, r_arg1L ; save multiplicand
2146 + mov_h r_scratchH, r_arg1H
2147 + mov_l r_scratchHL, r_arg1HL
2148 + mov_h r_scratchHH, r_arg1HH
2151 + rjmp __mulusa3_skip1
2152 + add r_resL,r_arg1L ; result + multiplicand
2153 + adc r_resH,r_arg1H
2154 + adc r_resHL,r_arg1HL
2155 + adc r_resHH,r_arg1HH
2157 + lsl r_arg1L ; shift multiplicand
2164 + brne __mulusa3_loop1 ; exit multiplier = 0
2165 +__mulusa3_loop1_done:
2166 + mov_l r_arg1L, r_scratchL ; restore multiplicand
2167 + mov_h r_arg1H, r_scratchH
2168 + mov_l r_arg1HL, r_scratchHL
2169 + mov_h r_arg1HH, r_scratchHH
2171 + lsr r_arg1HH ; shift multiplicand
2176 + rjmp __mulusa3_skip2
2177 + add r_resL,r_arg1L ; result + multiplicand
2178 + adc r_resH,r_arg1H
2179 + adc r_resHL,r_arg1HL
2180 + adc r_resHH,r_arg1HH
2185 + brne __mulusa3_loop2 ; exit if multiplier = 0
2187 + clr __zero_reg__ ; got clobbered
2190 +#endif /* defined (L_mulusa3) */
2214 +/*******************************************************
2215 + Fractional Division 8 / 8
2216 +*******************************************************/
2217 +#define r_divd r25 /* dividend */
2218 +#define r_quo r24 /* quotient */
2219 +#define r_div r22 /* divisor */
2220 +#define r_cnt r23 /* loop count */
2222 +#if defined (L_divqq3)
2233 + breq __divqq3_minus1 ; if equal return -1
2236 + sbrc r0, 7 ; negate result if needed
2243 +#endif /* defined (L_divqq3) */
2245 +#if defined (L_udivuqq3)
2246 + .global __udivuqq3
2249 + clr r_quo ; clear quotient
2250 + ldi r_cnt,8 ; init loop counter
2252 + lsl r_divd ; shift dividend
2253 + brcs __udivuqq3_ep ; dividend overflow
2254 + cp r_divd,r_div ; compare dividend & divisor
2255 + brcc __udivuqq3_ep ; dividend >= divisor
2256 + rol r_quo ; shift quotient (with CARRY)
2257 + rjmp __udivuqq3_cont
2259 + sub r_divd,r_div ; restore dividend
2260 + lsl r_quo ; shift quotient (without CARRY)
2262 + dec r_cnt ; decrement loop counter
2263 + brne __udivuqq3_loop
2264 + com r_quo ; complement result
2265 + ; because C flag was complemented in loop
2268 +#endif /* defined (L_udivuqq3) */
2276 +/*******************************************************
2277 + Fractional Division 16 / 16
2278 +*******************************************************/
2279 +#define r_divdL r26 /* dividend Low */
2280 +#define r_divdH r27 /* dividend Hig */
2281 +#define r_quoL r24 /* quotient Low */
2282 +#define r_quoH r25 /* quotient High */
2283 +#define r_divL r22 /* divisor */
2284 +#define r_divH r23 /* divisor */
2287 +#if defined (L_divhq3)
2294 + rjmp __divhq3_divpos
2300 + rjmp __divhq3_divdpos
2305 + cp r_divdL, r_divL
2306 + cpc r_divdH, r_divH
2307 + breq __divhq3_minus1 ; if equal return -1
2311 + sbrs r0, 7 ; negate result if needed
2322 +#endif /* defined (L_divhq3) */
2324 +#if defined (L_udivuhq3)
2325 + .global __udivuhq3
2328 + sub r_quoH,r_quoH ; clear quotient and carry
2329 + .global __udivuha3_entry
2331 + clr r_quoL ; clear quotient
2332 + ldi r_cnt,16 ; init loop counter
2334 + rol r_divdL ; shift dividend (with CARRY)
2336 + brcs __udivuhq3_ep ; dividend overflow
2337 + cp r_divdL,r_divL ; compare dividend & divisor
2338 + cpc r_divdH,r_divH
2339 + brcc __udivuhq3_ep ; dividend >= divisor
2340 + rol r_quoL ; shift quotient (with CARRY)
2341 + rjmp __udivuhq3_cont
2343 + sub r_divdL,r_divL ; restore dividend
2344 + sbc r_divdH,r_divH
2345 + lsl r_quoL ; shift quotient (without CARRY)
2347 + rol r_quoH ; shift quotient
2348 + dec r_cnt ; decrement loop counter
2349 + brne __udivuhq3_loop
2350 + com r_quoL ; complement result
2351 + com r_quoH ; because C flag was complemented in loop
2354 +#endif /* defined (L_udivuhq3) */
2356 +/*******************************************************
2357 + Fixed Division 8.8 / 8.8
2358 +*******************************************************/
2359 +#if defined (L_divha3)
2366 + rjmp __divha3_divpos
2372 + rjmp __divha3_divdpos
2378 + sbrs r0, 7 ; negate result if needed
2385 +#endif /* defined (L_divha3) */
2387 +#if defined (L_udivuha3)
2388 + .global __udivuha3
2391 + mov r_quoH, r_divdL
2392 + mov r_divdL, r_divdH
2394 + lsl r_quoH ; shift quotient into carry
2395 + rjmp __udivuha3_entry ; same as fractional after rearrange
2397 +#endif /* defined (L_udivuha3) */
2407 +/*******************************************************
2408 + Fixed Division 16.16 / 16.16
2409 +*******************************************************/
2410 +#define r_arg1L r24 /* arg1 gets passed already in place */
2411 +#define r_arg1H r25
2412 +#define r_arg1HL r26
2413 +#define r_arg1HH r27
2414 +#define r_divdL r26 /* dividend Low */
2415 +#define r_divdH r27
2416 +#define r_divdHL r30
2417 +#define r_divdHH r31 /* dividend High */
2418 +#define r_quoL r22 /* quotient Low */
2420 +#define r_quoHL r24
2421 +#define r_quoHH r25 /* quotient High */
2422 +#define r_divL r18 /* divisor Low */
2424 +#define r_divHL r20
2425 +#define r_divHH r21 /* divisor High */
2426 +#define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
2428 +#if defined (L_divsa3)
2435 + rjmp __divsa3_divpos
2445 + rjmp __divsa3_arg1pos
2455 + sbrs r0, 7 ; negate result if needed
2466 +#endif /* defined (L_divsa3) */
2468 +#if defined (L_udivusa3)
2469 + .global __udivusa3
2472 + ldi r_divdHL, 32 ; init loop counter
2473 + mov r_cnt, r_divdHL
2476 + mov_l r_quoL, r_divdHL
2477 + mov_h r_quoH, r_divdHH
2478 + lsl r_quoHL ; shift quotient into carry
2481 + rol r_divdL ; shift dividend (with CARRY)
2485 + brcs __udivusa3_ep ; dividend overflow
2486 + cp r_divdL,r_divL ; compare dividend & divisor
2487 + cpc r_divdH,r_divH
2488 + cpc r_divdHL,r_divHL
2489 + cpc r_divdHH,r_divHH
2490 + brcc __udivusa3_ep ; dividend >= divisor
2491 + rol r_quoL ; shift quotient (with CARRY)
2492 + rjmp __udivusa3_cont
2494 + sub r_divdL,r_divL ; restore dividend
2495 + sbc r_divdH,r_divH
2496 + sbc r_divdHL,r_divHL
2497 + sbc r_divdHH,r_divHH
2498 + lsl r_quoL ; shift quotient (without CARRY)
2500 + rol r_quoH ; shift quotient
2503 + dec r_cnt ; decrement loop counter
2504 + brne __udivusa3_loop
2505 + com r_quoL ; complement result
2506 + com r_quoH ; because C flag was complemented in loop
2511 +#endif /* defined (L_udivusa3) */
2526 diff -Naurp gcc/config/avr/libgcc.S gcc/config/avr/libgcc.S
2527 --- gcc/config/avr/libgcc.S 2011-10-27 16:45:17.000000000 +0530
2528 +++ gcc/config/avr/libgcc.S 2011-10-27 16:55:55.000000000 +0530
2529 @@ -163,6 +163,23 @@ __mulhi3_exit:
2533 +#if defined (__AVR_HAVE_MUL__)
2553 @@ -174,6 +191,7 @@ __mulhisi3:
2557 +#endif /* defined (__AVR_HAVE_MUL__) */
2559 #endif /* defined (L_mulhisi3) */
2561 @@ -181,13 +199,31 @@ __mulhisi3:
2567 +#if defined (__AVR_HAVE_MUL__)
2593 #endif /* defined (L_umulhisi3) */
2595 @@ -200,7 +236,6 @@ __umulhisi3:
2596 #define r_arg1HL r24
2597 #define r_arg1HH r25 /* multiplier High */
2600 #define r_arg2L r18 /* multiplicand Low */
2602 #define r_arg2HL r20
2603 @@ -556,6 +591,23 @@ __divmodsi4_neg1:
2605 #endif /* defined (L_divmodsi4) */
2624 /**********************************
2625 * This is a prologue subroutine
2626 **********************************/
2627 @@ -899,3 +951,4 @@ __tablejump_elpm__:
2629 #endif /* defined (L_tablejump_elpm) */
2631 +#include "libgcc-fixed.S"
2632 diff -Naurp gcc/config/avr/t-avr gcc/config/avr/t-avr
2633 --- gcc/config/avr/t-avr 2011-10-27 16:45:17.000000000 +0530
2634 +++ gcc/config/avr/t-avr 2011-10-27 16:55:55.000000000 +0530
2635 @@ -36,6 +36,8 @@ LIB1ASMSRC = avr/libgcc.S
2644 @@ -54,6 +56,39 @@ LIB1ASMFUNCS = \
2648 +# Fixed point routines
2681 # We do not have the DF type.
2682 # Most of the C functions in libgcc2 use almost all registers,
2683 # so use -mcall-prologues for smaller code size.
2684 diff -Naurp gcc/cse.c gcc/cse.c
2685 --- gcc/cse.c 2011-10-27 16:45:17.000000000 +0530
2686 +++ gcc/cse.c 2011-10-27 16:55:55.000000000 +0530
2687 @@ -3702,9 +3702,10 @@ fold_rtx (rtx x, rtx insn)
2688 && exact_log2 (- INTVAL (const_arg1)) >= 0)))
2691 - /* ??? Vector mode shifts by scalar
2692 + /* ??? Vector and Fixed Point shifts by scalar
2693 shift operand are not supported yet. */
2694 - if (is_shift && VECTOR_MODE_P (mode))
2695 + if (is_shift && (VECTOR_MODE_P (mode)
2696 + || ALL_FIXED_POINT_MODE_P (mode)))
2700 diff -Naurp gcc/dwarf2out.c gcc/dwarf2out.c
2701 --- gcc/dwarf2out.c 2011-10-27 16:45:17.000000000 +0530
2702 +++ gcc/dwarf2out.c 2011-10-27 16:55:55.000000000 +0530
2703 @@ -12790,6 +12790,12 @@ base_type_die (tree type)
2705 add_AT_unsigned (base_type_result, DW_AT_byte_size,
2706 int_size_in_bytes (type));
2708 + /* version 3 dwarf specifies that for fixed-point types DW_AT_binary_scale
2709 + describes the location of the decimal place */
2710 + if (TREE_CODE (type) == FIXED_POINT_TYPE)
2711 + add_AT_int (base_type_result, DW_AT_binary_scale, -TYPE_FBIT (type));
2713 add_AT_unsigned (base_type_result, DW_AT_encoding, encoding);
2715 return base_type_result;
2716 @@ -16561,7 +16567,11 @@ add_const_value_attribute (dw_die_ref di
2722 + add_AT_double (die, DW_AT_const_value,
2723 + CONST_FIXED_VALUE_HIGH (rtl), CONST_FIXED_VALUE_LOW (rtl));
2728 if (GET_CODE (XEXP (rtl, 0)) == CONST_STRING
2729 diff -Naurp gcc/fold-const.c gcc/fold-const.c
2730 --- gcc/fold-const.c 2011-10-27 16:45:17.000000000 +0530
2731 +++ gcc/fold-const.c 2011-10-27 16:55:55.000000000 +0530
2732 @@ -11782,6 +11782,11 @@ fold_binary_loc (location_t loc,
2733 if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0)
2736 + /* Since fixed point types cannot perform bitwise and, or, etc..
2737 + don't try to convert to an expression with them. */
2738 + if (TREE_CODE(type) == FIXED_POINT_TYPE)
2741 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2742 if (TREE_CODE (op0) == code && host_integerp (arg1, false)
2743 && TREE_INT_CST_LOW (arg1) < TYPE_PRECISION (type)
2744 diff -Naurp gcc/varasm.c gcc/varasm.c
2745 --- gcc/varasm.c 2011-10-27 16:45:17.000000000 +0530
2746 +++ gcc/varasm.c 2011-10-27 16:55:55.000000000 +0530
2747 @@ -2504,7 +2504,7 @@ assemble_integer (rtx x, unsigned int si
2751 - omode = mode_for_size (subsize * BITS_PER_UNIT, mclass, 0);
2752 + omode = mode_for_size (subsize * BITS_PER_UNIT, MODE_INT, 0);
2753 imode = mode_for_size (size * BITS_PER_UNIT, mclass, 0);
2755 for (i = 0; i < size; i += subsize)