* bitmap.h (BITMAP_XMALLOC): New macro.
[official-gcc.git] / gcc / simplify-rtx.c
blob6e7862794b6a3adcbc219c287e80392eb25875e6
1 /* Common subexpression elimination for GNU compiler.
2 Copyright (C) 1987, 88, 89, 92-7, 1998, 1999 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
22 #include "config.h"
23 /* stdio.h must precede rtl.h for FFS. */
24 #include "system.h"
25 #include <setjmp.h>
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
92 static rtx simplify_plus_minus PROTO((enum rtx_code, enum machine_mode,
93 rtx, rtx));
94 static void check_fold_consts PROTO((PTR));
96 /* Make a binary operation by properly ordering the operands and
97 seeing if the expression folds. */
99 rtx
100 simplify_gen_binary (code, mode, op0, op1)
101 enum rtx_code code;
102 enum machine_mode mode;
103 rtx op0, op1;
105 rtx tem;
107 /* Put complex operands first and constants second if commutative. */
108 if (GET_RTX_CLASS (code) == 'c'
109 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
110 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
111 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
112 || (GET_CODE (op0) == SUBREG
113 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
114 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
115 tem = op0, op0 = op1, op1 = tem;
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
123 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
124 just form the operation. */
126 if (code == PLUS && GET_CODE (op1) == CONST_INT
127 && GET_MODE (op0) != VOIDmode)
128 return plus_constant (op0, INTVAL (op1));
129 else if (code == MINUS && GET_CODE (op1) == CONST_INT
130 && GET_MODE (op0) != VOIDmode)
131 return plus_constant (op0, - INTVAL (op1));
132 else
133 return gen_rtx_fmt_ee (code, mode, op0, op1);
136 /* Try to simplify a unary operation CODE whose output mode is to be
137 MODE with input operand OP whose mode was originally OP_MODE.
138 Return zero if no simplification can be made. */
141 simplify_unary_operation (code, mode, op, op_mode)
142 enum rtx_code code;
143 enum machine_mode mode;
144 rtx op;
145 enum machine_mode op_mode;
147 register int width = GET_MODE_BITSIZE (mode);
149 /* The order of these tests is critical so that, for example, we don't
150 check the wrong mode (input vs. output) for a conversion operation,
151 such as FIX. At some point, this should be simplified. */
153 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
155 if (code == FLOAT && GET_MODE (op) == VOIDmode
156 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
158 HOST_WIDE_INT hv, lv;
159 REAL_VALUE_TYPE d;
161 if (GET_CODE (op) == CONST_INT)
162 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
163 else
164 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
166 #ifdef REAL_ARITHMETIC
167 REAL_VALUE_FROM_INT (d, lv, hv, mode);
168 #else
169 if (hv < 0)
171 d = (double) (~ hv);
172 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
173 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
174 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
175 d = (- d - 1.0);
177 else
179 d = (double) hv;
180 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
181 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
182 d += (double) (unsigned HOST_WIDE_INT) lv;
184 #endif /* REAL_ARITHMETIC */
185 d = real_value_truncate (mode, d);
186 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
188 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
189 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
191 HOST_WIDE_INT hv, lv;
192 REAL_VALUE_TYPE d;
194 if (GET_CODE (op) == CONST_INT)
195 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
196 else
197 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
199 if (op_mode == VOIDmode)
201 /* We don't know how to interpret negative-looking numbers in
202 this case, so don't try to fold those. */
203 if (hv < 0)
204 return 0;
206 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
208 else
209 hv = 0, lv &= GET_MODE_MASK (op_mode);
211 #ifdef REAL_ARITHMETIC
212 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
213 #else
215 d = (double) (unsigned HOST_WIDE_INT) hv;
216 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
217 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
218 d += (double) (unsigned HOST_WIDE_INT) lv;
219 #endif /* REAL_ARITHMETIC */
220 d = real_value_truncate (mode, d);
221 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
223 #endif
225 if (GET_CODE (op) == CONST_INT
226 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
228 register HOST_WIDE_INT arg0 = INTVAL (op);
229 register HOST_WIDE_INT val;
231 switch (code)
233 case NOT:
234 val = ~ arg0;
235 break;
237 case NEG:
238 val = - arg0;
239 break;
241 case ABS:
242 val = (arg0 >= 0 ? arg0 : - arg0);
243 break;
245 case FFS:
246 /* Don't use ffs here. Instead, get low order bit and then its
247 number. If arg0 is zero, this will return 0, as desired. */
248 arg0 &= GET_MODE_MASK (mode);
249 val = exact_log2 (arg0 & (- arg0)) + 1;
250 break;
252 case TRUNCATE:
253 val = arg0;
254 break;
256 case ZERO_EXTEND:
257 if (op_mode == VOIDmode)
258 op_mode = mode;
259 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
261 /* If we were really extending the mode,
262 we would have to distinguish between zero-extension
263 and sign-extension. */
264 if (width != GET_MODE_BITSIZE (op_mode))
265 abort ();
266 val = arg0;
268 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
269 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
270 else
271 return 0;
272 break;
274 case SIGN_EXTEND:
275 if (op_mode == VOIDmode)
276 op_mode = mode;
277 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
279 /* If we were really extending the mode,
280 we would have to distinguish between zero-extension
281 and sign-extension. */
282 if (width != GET_MODE_BITSIZE (op_mode))
283 abort ();
284 val = arg0;
286 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
289 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
290 if (val
291 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
292 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
294 else
295 return 0;
296 break;
298 case SQRT:
299 return 0;
301 default:
302 abort ();
305 val = trunc_int_for_mode (val, mode);
307 return GEN_INT (val);
310 /* We can do some operations on integer CONST_DOUBLEs. Also allow
311 for a DImode operation on a CONST_INT. */
312 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
313 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
315 HOST_WIDE_INT l1, h1, lv, hv;
317 if (GET_CODE (op) == CONST_DOUBLE)
318 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
319 else
320 l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
322 switch (code)
324 case NOT:
325 lv = ~ l1;
326 hv = ~ h1;
327 break;
329 case NEG:
330 neg_double (l1, h1, &lv, &hv);
331 break;
333 case ABS:
334 if (h1 < 0)
335 neg_double (l1, h1, &lv, &hv);
336 else
337 lv = l1, hv = h1;
338 break;
340 case FFS:
341 hv = 0;
342 if (l1 == 0)
343 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
344 else
345 lv = exact_log2 (l1 & (-l1)) + 1;
346 break;
348 case TRUNCATE:
349 /* This is just a change-of-mode, so do nothing. */
350 lv = l1, hv = h1;
351 break;
353 case ZERO_EXTEND:
354 if (op_mode == VOIDmode
355 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
356 return 0;
358 hv = 0;
359 lv = l1 & GET_MODE_MASK (op_mode);
360 break;
362 case SIGN_EXTEND:
363 if (op_mode == VOIDmode
364 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
365 return 0;
366 else
368 lv = l1 & GET_MODE_MASK (op_mode);
369 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
370 && (lv & ((HOST_WIDE_INT) 1
371 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
372 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
374 hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
376 break;
378 case SQRT:
379 return 0;
381 default:
382 return 0;
385 return immed_double_const (lv, hv, mode);
388 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
389 else if (GET_CODE (op) == CONST_DOUBLE
390 && GET_MODE_CLASS (mode) == MODE_FLOAT)
392 REAL_VALUE_TYPE d;
393 jmp_buf handler;
394 rtx x;
396 if (setjmp (handler))
397 /* There used to be a warning here, but that is inadvisable.
398 People may want to cause traps, and the natural way
399 to do it should not get a warning. */
400 return 0;
402 set_float_handler (handler);
404 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
406 switch (code)
408 case NEG:
409 d = REAL_VALUE_NEGATE (d);
410 break;
412 case ABS:
413 if (REAL_VALUE_NEGATIVE (d))
414 d = REAL_VALUE_NEGATE (d);
415 break;
417 case FLOAT_TRUNCATE:
418 d = real_value_truncate (mode, d);
419 break;
421 case FLOAT_EXTEND:
422 /* All this does is change the mode. */
423 break;
425 case FIX:
426 d = REAL_VALUE_RNDZINT (d);
427 break;
429 case UNSIGNED_FIX:
430 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
431 break;
433 case SQRT:
434 return 0;
436 default:
437 abort ();
440 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
441 set_float_handler (NULL_PTR);
442 return x;
445 else if (GET_CODE (op) == CONST_DOUBLE
446 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
447 && GET_MODE_CLASS (mode) == MODE_INT
448 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
450 REAL_VALUE_TYPE d;
451 jmp_buf handler;
452 HOST_WIDE_INT val;
454 if (setjmp (handler))
455 return 0;
457 set_float_handler (handler);
459 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
461 switch (code)
463 case FIX:
464 val = REAL_VALUE_FIX (d);
465 break;
467 case UNSIGNED_FIX:
468 val = REAL_VALUE_UNSIGNED_FIX (d);
469 break;
471 default:
472 abort ();
475 set_float_handler (NULL_PTR);
477 val = trunc_int_for_mode (val, mode);
479 return GEN_INT (val);
481 #endif
482 /* This was formerly used only for non-IEEE float.
483 eggert@twinsun.com says it is safe for IEEE also. */
484 else
486 /* There are some simplifications we can do even if the operands
487 aren't constant. */
488 switch (code)
490 case NEG:
491 case NOT:
492 /* (not (not X)) == X, similarly for NEG. */
493 if (GET_CODE (op) == code)
494 return XEXP (op, 0);
495 break;
497 case SIGN_EXTEND:
498 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
499 becomes just the MINUS if its mode is MODE. This allows
500 folding switch statements on machines using casesi (such as
501 the Vax). */
502 if (GET_CODE (op) == TRUNCATE
503 && GET_MODE (XEXP (op, 0)) == mode
504 && GET_CODE (XEXP (op, 0)) == MINUS
505 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
506 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
507 return XEXP (op, 0);
509 #ifdef POINTERS_EXTEND_UNSIGNED
510 if (! POINTERS_EXTEND_UNSIGNED
511 && mode == Pmode && GET_MODE (op) == ptr_mode
512 && CONSTANT_P (op))
513 return convert_memory_address (Pmode, op);
514 #endif
515 break;
517 #ifdef POINTERS_EXTEND_UNSIGNED
518 case ZERO_EXTEND:
519 if (POINTERS_EXTEND_UNSIGNED
520 && mode == Pmode && GET_MODE (op) == ptr_mode
521 && CONSTANT_P (op))
522 return convert_memory_address (Pmode, op);
523 break;
524 #endif
526 default:
527 break;
530 return 0;
534 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
535 and OP1. Return 0 if no simplification is possible.
537 Don't use this for relational operations such as EQ or LT.
538 Use simplify_relational_operation instead. */
541 simplify_binary_operation (code, mode, op0, op1)
542 enum rtx_code code;
543 enum machine_mode mode;
544 rtx op0, op1;
546 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
547 HOST_WIDE_INT val;
548 int width = GET_MODE_BITSIZE (mode);
549 rtx tem;
551 /* Relational operations don't work here. We must know the mode
552 of the operands in order to do the comparison correctly.
553 Assuming a full word can give incorrect results.
554 Consider comparing 128 with -128 in QImode. */
556 if (GET_RTX_CLASS (code) == '<')
557 abort ();
559 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
560 if (GET_MODE_CLASS (mode) == MODE_FLOAT
561 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
562 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
564 REAL_VALUE_TYPE f0, f1, value;
565 jmp_buf handler;
567 if (setjmp (handler))
568 return 0;
570 set_float_handler (handler);
572 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
573 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
574 f0 = real_value_truncate (mode, f0);
575 f1 = real_value_truncate (mode, f1);
577 #ifdef REAL_ARITHMETIC
578 #ifndef REAL_INFINITY
579 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
580 return 0;
581 #endif
582 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
583 #else
584 switch (code)
586 case PLUS:
587 value = f0 + f1;
588 break;
589 case MINUS:
590 value = f0 - f1;
591 break;
592 case MULT:
593 value = f0 * f1;
594 break;
595 case DIV:
596 #ifndef REAL_INFINITY
597 if (f1 == 0)
598 return 0;
599 #endif
600 value = f0 / f1;
601 break;
602 case SMIN:
603 value = MIN (f0, f1);
604 break;
605 case SMAX:
606 value = MAX (f0, f1);
607 break;
608 default:
609 abort ();
611 #endif
613 value = real_value_truncate (mode, value);
614 set_float_handler (NULL_PTR);
615 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
617 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
619 /* We can fold some multi-word operations. */
620 if (GET_MODE_CLASS (mode) == MODE_INT
621 && width == HOST_BITS_PER_WIDE_INT * 2
622 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
623 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
625 HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
627 if (GET_CODE (op0) == CONST_DOUBLE)
628 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
629 else
630 l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
632 if (GET_CODE (op1) == CONST_DOUBLE)
633 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
634 else
635 l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
637 switch (code)
639 case MINUS:
640 /* A - B == A + (-B). */
641 neg_double (l2, h2, &lv, &hv);
642 l2 = lv, h2 = hv;
644 /* .. fall through ... */
646 case PLUS:
647 add_double (l1, h1, l2, h2, &lv, &hv);
648 break;
650 case MULT:
651 mul_double (l1, h1, l2, h2, &lv, &hv);
652 break;
654 case DIV: case MOD: case UDIV: case UMOD:
655 /* We'd need to include tree.h to do this and it doesn't seem worth
656 it. */
657 return 0;
659 case AND:
660 lv = l1 & l2, hv = h1 & h2;
661 break;
663 case IOR:
664 lv = l1 | l2, hv = h1 | h2;
665 break;
667 case XOR:
668 lv = l1 ^ l2, hv = h1 ^ h2;
669 break;
671 case SMIN:
672 if (h1 < h2
673 || (h1 == h2
674 && ((unsigned HOST_WIDE_INT) l1
675 < (unsigned HOST_WIDE_INT) l2)))
676 lv = l1, hv = h1;
677 else
678 lv = l2, hv = h2;
679 break;
681 case SMAX:
682 if (h1 > h2
683 || (h1 == h2
684 && ((unsigned HOST_WIDE_INT) l1
685 > (unsigned HOST_WIDE_INT) l2)))
686 lv = l1, hv = h1;
687 else
688 lv = l2, hv = h2;
689 break;
691 case UMIN:
692 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
693 || (h1 == h2
694 && ((unsigned HOST_WIDE_INT) l1
695 < (unsigned HOST_WIDE_INT) l2)))
696 lv = l1, hv = h1;
697 else
698 lv = l2, hv = h2;
699 break;
701 case UMAX:
702 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
703 || (h1 == h2
704 && ((unsigned HOST_WIDE_INT) l1
705 > (unsigned HOST_WIDE_INT) l2)))
706 lv = l1, hv = h1;
707 else
708 lv = l2, hv = h2;
709 break;
711 case LSHIFTRT: case ASHIFTRT:
712 case ASHIFT:
713 case ROTATE: case ROTATERT:
714 #ifdef SHIFT_COUNT_TRUNCATED
715 if (SHIFT_COUNT_TRUNCATED)
716 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
717 #endif
719 if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
720 return 0;
722 if (code == LSHIFTRT || code == ASHIFTRT)
723 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
724 code == ASHIFTRT);
725 else if (code == ASHIFT)
726 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
727 else if (code == ROTATE)
728 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
729 else /* code == ROTATERT */
730 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
731 break;
733 default:
734 return 0;
737 return immed_double_const (lv, hv, mode);
740 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
741 || width > HOST_BITS_PER_WIDE_INT || width == 0)
743 /* Even if we can't compute a constant result,
744 there are some cases worth simplifying. */
746 switch (code)
748 case PLUS:
749 /* In IEEE floating point, x+0 is not the same as x. Similarly
750 for the other optimizations below. */
751 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
752 && FLOAT_MODE_P (mode) && ! flag_fast_math)
753 break;
755 if (op1 == CONST0_RTX (mode))
756 return op0;
758 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
759 if (GET_CODE (op0) == NEG)
760 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
761 else if (GET_CODE (op1) == NEG)
762 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
764 /* Handle both-operands-constant cases. We can only add
765 CONST_INTs to constants since the sum of relocatable symbols
766 can't be handled by most assemblers. Don't add CONST_INT
767 to CONST_INT since overflow won't be computed properly if wider
768 than HOST_BITS_PER_WIDE_INT. */
770 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
771 && GET_CODE (op1) == CONST_INT)
772 return plus_constant (op0, INTVAL (op1));
773 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
774 && GET_CODE (op0) == CONST_INT)
775 return plus_constant (op1, INTVAL (op0));
777 /* See if this is something like X * C - X or vice versa or
778 if the multiplication is written as a shift. If so, we can
779 distribute and make a new multiply, shift, or maybe just
780 have X (if C is 2 in the example above). But don't make
781 real multiply if we didn't have one before. */
783 if (! FLOAT_MODE_P (mode))
785 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
786 rtx lhs = op0, rhs = op1;
787 int had_mult = 0;
789 if (GET_CODE (lhs) == NEG)
790 coeff0 = -1, lhs = XEXP (lhs, 0);
791 else if (GET_CODE (lhs) == MULT
792 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
794 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
795 had_mult = 1;
797 else if (GET_CODE (lhs) == ASHIFT
798 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
799 && INTVAL (XEXP (lhs, 1)) >= 0
800 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
802 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
803 lhs = XEXP (lhs, 0);
806 if (GET_CODE (rhs) == NEG)
807 coeff1 = -1, rhs = XEXP (rhs, 0);
808 else if (GET_CODE (rhs) == MULT
809 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
811 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
812 had_mult = 1;
814 else if (GET_CODE (rhs) == ASHIFT
815 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
816 && INTVAL (XEXP (rhs, 1)) >= 0
817 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
819 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
820 rhs = XEXP (rhs, 0);
823 if (rtx_equal_p (lhs, rhs))
825 tem = simplify_gen_binary (MULT, mode, lhs,
826 GEN_INT (coeff0 + coeff1));
827 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
831 /* If one of the operands is a PLUS or a MINUS, see if we can
832 simplify this by the associative law.
833 Don't use the associative law for floating point.
834 The inaccuracy makes it nonassociative,
835 and subtle programs can break if operations are associated. */
837 if (INTEGRAL_MODE_P (mode)
838 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
839 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
840 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
841 return tem;
842 break;
844 case COMPARE:
845 #ifdef HAVE_cc0
846 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
847 using cc0, in which case we want to leave it as a COMPARE
848 so we can distinguish it from a register-register-copy.
850 In IEEE floating point, x-0 is not the same as x. */
852 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
853 || ! FLOAT_MODE_P (mode) || flag_fast_math)
854 && op1 == CONST0_RTX (mode))
855 return op0;
856 #else
857 /* Do nothing here. */
858 #endif
859 break;
861 case MINUS:
862 /* None of these optimizations can be done for IEEE
863 floating point. */
864 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
865 && FLOAT_MODE_P (mode) && ! flag_fast_math)
866 break;
868 /* We can't assume x-x is 0 even with non-IEEE floating point,
869 but since it is zero except in very strange circumstances, we
870 will treat it as zero with -ffast-math. */
871 if (rtx_equal_p (op0, op1)
872 && ! side_effects_p (op0)
873 && (! FLOAT_MODE_P (mode) || flag_fast_math))
874 return CONST0_RTX (mode);
876 /* Change subtraction from zero into negation. */
877 if (op0 == CONST0_RTX (mode))
878 return gen_rtx_NEG (mode, op1);
880 /* (-1 - a) is ~a. */
881 if (op0 == constm1_rtx)
882 return gen_rtx_NOT (mode, op1);
884 /* Subtracting 0 has no effect. */
885 if (op1 == CONST0_RTX (mode))
886 return op0;
888 /* See if this is something like X * C - X or vice versa or
889 if the multiplication is written as a shift. If so, we can
890 distribute and make a new multiply, shift, or maybe just
891 have X (if C is 2 in the example above). But don't make
892 real multiply if we didn't have one before. */
894 if (! FLOAT_MODE_P (mode))
896 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
897 rtx lhs = op0, rhs = op1;
898 int had_mult = 0;
900 if (GET_CODE (lhs) == NEG)
901 coeff0 = -1, lhs = XEXP (lhs, 0);
902 else if (GET_CODE (lhs) == MULT
903 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
905 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
906 had_mult = 1;
908 else if (GET_CODE (lhs) == ASHIFT
909 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
910 && INTVAL (XEXP (lhs, 1)) >= 0
911 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
913 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
914 lhs = XEXP (lhs, 0);
917 if (GET_CODE (rhs) == NEG)
918 coeff1 = - 1, rhs = XEXP (rhs, 0);
919 else if (GET_CODE (rhs) == MULT
920 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
922 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
923 had_mult = 1;
925 else if (GET_CODE (rhs) == ASHIFT
926 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
927 && INTVAL (XEXP (rhs, 1)) >= 0
928 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
930 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
931 rhs = XEXP (rhs, 0);
934 if (rtx_equal_p (lhs, rhs))
936 tem = simplify_gen_binary (MULT, mode, lhs,
937 GEN_INT (coeff0 - coeff1));
938 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
942 /* (a - (-b)) -> (a + b). */
943 if (GET_CODE (op1) == NEG)
944 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
946 /* If one of the operands is a PLUS or a MINUS, see if we can
947 simplify this by the associative law.
948 Don't use the associative law for floating point.
949 The inaccuracy makes it nonassociative,
950 and subtle programs can break if operations are associated. */
952 if (INTEGRAL_MODE_P (mode)
953 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
954 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
955 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
956 return tem;
958 /* Don't let a relocatable value get a negative coeff. */
959 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
960 return plus_constant (op0, - INTVAL (op1));
962 /* (x - (x & y)) -> (x & ~y) */
963 if (GET_CODE (op1) == AND)
965 if (rtx_equal_p (op0, XEXP (op1, 0)))
966 return simplify_gen_binary (AND, mode, op0,
967 gen_rtx_NOT (mode, XEXP (op1, 1)));
968 if (rtx_equal_p (op0, XEXP (op1, 1)))
969 return simplify_gen_binary (AND, mode, op0,
970 gen_rtx_NOT (mode, XEXP (op1, 0)));
972 break;
974 case MULT:
975 if (op1 == constm1_rtx)
977 tem = simplify_unary_operation (NEG, mode, op0, mode);
979 return tem ? tem : gen_rtx_NEG (mode, op0);
982 /* In IEEE floating point, x*0 is not always 0. */
983 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
984 || ! FLOAT_MODE_P (mode) || flag_fast_math)
985 && op1 == CONST0_RTX (mode)
986 && ! side_effects_p (op0))
987 return op1;
989 /* In IEEE floating point, x*1 is not equivalent to x for nans.
990 However, ANSI says we can drop signals,
991 so we can do this anyway. */
992 if (op1 == CONST1_RTX (mode))
993 return op0;
995 /* Convert multiply by constant power of two into shift unless
996 we are still generating RTL. This test is a kludge. */
997 if (GET_CODE (op1) == CONST_INT
998 && (val = exact_log2 (INTVAL (op1))) >= 0
999 /* If the mode is larger than the host word size, and the
1000 uppermost bit is set, then this isn't a power of two due
1001 to implicit sign extension. */
1002 && (width <= HOST_BITS_PER_WIDE_INT
1003 || val != HOST_BITS_PER_WIDE_INT - 1)
1004 && ! rtx_equal_function_value_matters)
1005 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1007 if (GET_CODE (op1) == CONST_DOUBLE
1008 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1010 REAL_VALUE_TYPE d;
1011 jmp_buf handler;
1012 int op1is2, op1ism1;
1014 if (setjmp (handler))
1015 return 0;
1017 set_float_handler (handler);
1018 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1019 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1020 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1021 set_float_handler (NULL_PTR);
1023 /* x*2 is x+x and x*(-1) is -x */
1024 if (op1is2 && GET_MODE (op0) == mode)
1025 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1027 else if (op1ism1 && GET_MODE (op0) == mode)
1028 return gen_rtx_NEG (mode, op0);
1030 break;
1032 case IOR:
1033 if (op1 == const0_rtx)
1034 return op0;
1035 if (GET_CODE (op1) == CONST_INT
1036 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1037 return op1;
1038 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1039 return op0;
1040 /* A | (~A) -> -1 */
1041 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1042 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1043 && ! side_effects_p (op0)
1044 && GET_MODE_CLASS (mode) != MODE_CC)
1045 return constm1_rtx;
1046 break;
1048 case XOR:
1049 if (op1 == const0_rtx)
1050 return op0;
1051 if (GET_CODE (op1) == CONST_INT
1052 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1053 return gen_rtx_NOT (mode, op0);
1054 if (op0 == op1 && ! side_effects_p (op0)
1055 && GET_MODE_CLASS (mode) != MODE_CC)
1056 return const0_rtx;
1057 break;
1059 case AND:
1060 if (op1 == const0_rtx && ! side_effects_p (op0))
1061 return const0_rtx;
1062 if (GET_CODE (op1) == CONST_INT
1063 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1064 return op0;
1065 if (op0 == op1 && ! side_effects_p (op0)
1066 && GET_MODE_CLASS (mode) != MODE_CC)
1067 return op0;
1068 /* A & (~A) -> 0 */
1069 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1070 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1071 && ! side_effects_p (op0)
1072 && GET_MODE_CLASS (mode) != MODE_CC)
1073 return const0_rtx;
1074 break;
1076 case UDIV:
1077 /* Convert divide by power of two into shift (divide by 1 handled
1078 below). */
1079 if (GET_CODE (op1) == CONST_INT
1080 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1081 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1083 /* ... fall through ... */
1085 case DIV:
1086 if (op1 == CONST1_RTX (mode))
1087 return op0;
1089 /* In IEEE floating point, 0/x is not always 0. */
1090 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1091 || ! FLOAT_MODE_P (mode) || flag_fast_math)
1092 && op0 == CONST0_RTX (mode)
1093 && ! side_effects_p (op1))
1094 return op0;
1096 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1097 /* Change division by a constant into multiplication. Only do
1098 this with -ffast-math until an expert says it is safe in
1099 general. */
1100 else if (GET_CODE (op1) == CONST_DOUBLE
1101 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1102 && op1 != CONST0_RTX (mode)
1103 && flag_fast_math)
1105 REAL_VALUE_TYPE d;
1106 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1108 if (! REAL_VALUES_EQUAL (d, dconst0))
1110 #if defined (REAL_ARITHMETIC)
1111 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1112 return gen_rtx_MULT (mode, op0,
1113 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1114 #else
1115 return
1116 gen_rtx_MULT (mode, op0,
1117 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1118 #endif
1121 #endif
1122 break;
1124 case UMOD:
1125 /* Handle modulus by power of two (mod with 1 handled below). */
1126 if (GET_CODE (op1) == CONST_INT
1127 && exact_log2 (INTVAL (op1)) > 0)
1128 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1130 /* ... fall through ... */
1132 case MOD:
1133 if ((op0 == const0_rtx || op1 == const1_rtx)
1134 && ! side_effects_p (op0) && ! side_effects_p (op1))
1135 return const0_rtx;
1136 break;
1138 case ROTATERT:
1139 case ROTATE:
1140 /* Rotating ~0 always results in ~0. */
1141 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1142 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1143 && ! side_effects_p (op1))
1144 return op0;
1146 /* ... fall through ... */
1148 case ASHIFT:
1149 case ASHIFTRT:
1150 case LSHIFTRT:
1151 if (op1 == const0_rtx)
1152 return op0;
1153 if (op0 == const0_rtx && ! side_effects_p (op1))
1154 return op0;
1155 break;
1157 case SMIN:
1158 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1159 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1160 && ! side_effects_p (op0))
1161 return op1;
1162 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1163 return op0;
1164 break;
1166 case SMAX:
1167 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1168 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1169 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1170 && ! side_effects_p (op0))
1171 return op1;
1172 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1173 return op0;
1174 break;
1176 case UMIN:
1177 if (op1 == const0_rtx && ! side_effects_p (op0))
1178 return op1;
1179 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1180 return op0;
1181 break;
1183 case UMAX:
1184 if (op1 == constm1_rtx && ! side_effects_p (op0))
1185 return op1;
1186 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1187 return op0;
1188 break;
1190 default:
1191 abort ();
1194 return 0;
1197 /* Get the integer argument values in two forms:
1198 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1200 arg0 = INTVAL (op0);
1201 arg1 = INTVAL (op1);
1203 if (width < HOST_BITS_PER_WIDE_INT)
1205 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1206 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1208 arg0s = arg0;
1209 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1210 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1212 arg1s = arg1;
1213 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1214 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1216 else
1218 arg0s = arg0;
1219 arg1s = arg1;
1222 /* Compute the value of the arithmetic. */
1224 switch (code)
1226 case PLUS:
1227 val = arg0s + arg1s;
1228 break;
1230 case MINUS:
1231 val = arg0s - arg1s;
1232 break;
1234 case MULT:
1235 val = arg0s * arg1s;
1236 break;
1238 case DIV:
1239 if (arg1s == 0)
1240 return 0;
1241 val = arg0s / arg1s;
1242 break;
1244 case MOD:
1245 if (arg1s == 0)
1246 return 0;
1247 val = arg0s % arg1s;
1248 break;
1250 case UDIV:
1251 if (arg1 == 0)
1252 return 0;
1253 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1254 break;
1256 case UMOD:
1257 if (arg1 == 0)
1258 return 0;
1259 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1260 break;
1262 case AND:
1263 val = arg0 & arg1;
1264 break;
1266 case IOR:
1267 val = arg0 | arg1;
1268 break;
1270 case XOR:
1271 val = arg0 ^ arg1;
1272 break;
1274 case LSHIFTRT:
1275 /* If shift count is undefined, don't fold it; let the machine do
1276 what it wants. But truncate it if the machine will do that. */
1277 if (arg1 < 0)
1278 return 0;
1280 #ifdef SHIFT_COUNT_TRUNCATED
1281 if (SHIFT_COUNT_TRUNCATED)
1282 arg1 %= width;
1283 #endif
1285 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1286 break;
1288 case ASHIFT:
1289 if (arg1 < 0)
1290 return 0;
1292 #ifdef SHIFT_COUNT_TRUNCATED
1293 if (SHIFT_COUNT_TRUNCATED)
1294 arg1 %= width;
1295 #endif
1297 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1298 break;
1300 case ASHIFTRT:
1301 if (arg1 < 0)
1302 return 0;
1304 #ifdef SHIFT_COUNT_TRUNCATED
1305 if (SHIFT_COUNT_TRUNCATED)
1306 arg1 %= width;
1307 #endif
1309 val = arg0s >> arg1;
1311 /* Bootstrap compiler may not have sign extended the right shift.
1312 Manually extend the sign to insure bootstrap cc matches gcc. */
1313 if (arg0s < 0 && arg1 > 0)
1314 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1316 break;
1318 case ROTATERT:
1319 if (arg1 < 0)
1320 return 0;
1322 arg1 %= width;
1323 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1324 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1325 break;
1327 case ROTATE:
1328 if (arg1 < 0)
1329 return 0;
1331 arg1 %= width;
1332 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1333 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1334 break;
1336 case COMPARE:
1337 /* Do nothing here. */
1338 return 0;
1340 case SMIN:
1341 val = arg0s <= arg1s ? arg0s : arg1s;
1342 break;
1344 case UMIN:
1345 val = ((unsigned HOST_WIDE_INT) arg0
1346 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1347 break;
1349 case SMAX:
1350 val = arg0s > arg1s ? arg0s : arg1s;
1351 break;
1353 case UMAX:
1354 val = ((unsigned HOST_WIDE_INT) arg0
1355 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1356 break;
1358 default:
1359 abort ();
1362 val = trunc_int_for_mode (val, mode);
1364 return GEN_INT (val);
1367 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1368 PLUS or MINUS.
1370 Rather than test for specific case, we do this by a brute-force method
1371 and do all possible simplifications until no more changes occur. Then
1372 we rebuild the operation. */
1374 static rtx
1375 simplify_plus_minus (code, mode, op0, op1)
1376 enum rtx_code code;
1377 enum machine_mode mode;
1378 rtx op0, op1;
1380 rtx ops[8];
1381 int negs[8];
1382 rtx result, tem;
1383 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1384 int first = 1, negate = 0, changed;
1385 int i, j;
1387 bzero ((char *) ops, sizeof ops);
1389 /* Set up the two operands and then expand them until nothing has been
1390 changed. If we run out of room in our array, give up; this should
1391 almost never happen. */
1393 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1395 changed = 1;
1396 while (changed)
1398 changed = 0;
1400 for (i = 0; i < n_ops; i++)
1401 switch (GET_CODE (ops[i]))
1403 case PLUS:
1404 case MINUS:
1405 if (n_ops == 7)
1406 return 0;
1408 ops[n_ops] = XEXP (ops[i], 1);
1409 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1410 ops[i] = XEXP (ops[i], 0);
1411 input_ops++;
1412 changed = 1;
1413 break;
1415 case NEG:
1416 ops[i] = XEXP (ops[i], 0);
1417 negs[i] = ! negs[i];
1418 changed = 1;
1419 break;
1421 case CONST:
1422 ops[i] = XEXP (ops[i], 0);
1423 input_consts++;
1424 changed = 1;
1425 break;
1427 case NOT:
1428 /* ~a -> (-a - 1) */
1429 if (n_ops != 7)
1431 ops[n_ops] = constm1_rtx;
1432 negs[n_ops++] = negs[i];
1433 ops[i] = XEXP (ops[i], 0);
1434 negs[i] = ! negs[i];
1435 changed = 1;
1437 break;
1439 case CONST_INT:
1440 if (negs[i])
1441 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1442 break;
1444 default:
1445 break;
1449 /* If we only have two operands, we can't do anything. */
1450 if (n_ops <= 2)
1451 return 0;
1453 /* Now simplify each pair of operands until nothing changes. The first
1454 time through just simplify constants against each other. */
1456 changed = 1;
1457 while (changed)
1459 changed = first;
1461 for (i = 0; i < n_ops - 1; i++)
1462 for (j = i + 1; j < n_ops; j++)
1463 if (ops[i] != 0 && ops[j] != 0
1464 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1466 rtx lhs = ops[i], rhs = ops[j];
1467 enum rtx_code ncode = PLUS;
1469 if (negs[i] && ! negs[j])
1470 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1471 else if (! negs[i] && negs[j])
1472 ncode = MINUS;
1474 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1475 if (tem)
1477 ops[i] = tem, ops[j] = 0;
1478 negs[i] = negs[i] && negs[j];
1479 if (GET_CODE (tem) == NEG)
1480 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1482 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1483 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1484 changed = 1;
1488 first = 0;
1491 /* Pack all the operands to the lower-numbered entries and give up if
1492 we didn't reduce the number of operands we had. Make sure we
1493 count a CONST as two operands. If we have the same number of
1494 operands, but have made more CONSTs than we had, this is also
1495 an improvement, so accept it. */
1497 for (i = 0, j = 0; j < n_ops; j++)
1498 if (ops[j] != 0)
1500 ops[i] = ops[j], negs[i++] = negs[j];
1501 if (GET_CODE (ops[j]) == CONST)
1502 n_consts++;
1505 if (i + n_consts > input_ops
1506 || (i + n_consts == input_ops && n_consts <= input_consts))
1507 return 0;
1509 n_ops = i;
1511 /* If we have a CONST_INT, put it last. */
1512 for (i = 0; i < n_ops - 1; i++)
1513 if (GET_CODE (ops[i]) == CONST_INT)
1515 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1516 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1519 /* Put a non-negated operand first. If there aren't any, make all
1520 operands positive and negate the whole thing later. */
1521 for (i = 0; i < n_ops && negs[i]; i++)
1524 if (i == n_ops)
1526 for (i = 0; i < n_ops; i++)
1527 negs[i] = 0;
1528 negate = 1;
1530 else if (i != 0)
1532 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1533 j = negs[0], negs[0] = negs[i], negs[i] = j;
1536 /* Now make the result by performing the requested operations. */
1537 result = ops[0];
1538 for (i = 1; i < n_ops; i++)
1539 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1541 return negate ? gen_rtx_NEG (mode, result) : result;
1544 struct cfc_args
1546 rtx op0, op1; /* Input */
1547 int equal, op0lt, op1lt; /* Output */
1550 static void
1551 check_fold_consts (data)
1552 PTR data;
1554 struct cfc_args *args = (struct cfc_args *) data;
1555 REAL_VALUE_TYPE d0, d1;
1557 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1558 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1559 args->equal = REAL_VALUES_EQUAL (d0, d1);
1560 args->op0lt = REAL_VALUES_LESS (d0, d1);
1561 args->op1lt = REAL_VALUES_LESS (d1, d0);
1564 /* Like simplify_binary_operation except used for relational operators.
1565 MODE is the mode of the operands, not that of the result. If MODE
1566 is VOIDmode, both operands must also be VOIDmode and we compare the
1567 operands in "infinite precision".
1569 If no simplification is possible, this function returns zero. Otherwise,
1570 it returns either const_true_rtx or const0_rtx. */
1573 simplify_relational_operation (code, mode, op0, op1)
1574 enum rtx_code code;
1575 enum machine_mode mode;
1576 rtx op0, op1;
1578 int equal, op0lt, op0ltu, op1lt, op1ltu;
1579 rtx tem;
1581 /* If op0 is a compare, extract the comparison arguments from it. */
1582 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1583 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1585 /* We can't simplify MODE_CC values since we don't know what the
1586 actual comparison is. */
1587 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1588 #ifdef HAVE_cc0
1589 || op0 == cc0_rtx
1590 #endif
1592 return 0;
1594 /* For integer comparisons of A and B maybe we can simplify A - B and can
1595 then simplify a comparison of that with zero. If A and B are both either
1596 a register or a CONST_INT, this can't help; testing for these cases will
1597 prevent infinite recursion here and speed things up.
1599 If CODE is an unsigned comparison, then we can never do this optimization,
1600 because it gives an incorrect result if the subtraction wraps around zero.
1601 ANSI C defines unsigned operations such that they never overflow, and
1602 thus such cases can not be ignored. */
1604 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1605 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1606 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1607 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1608 && code != GTU && code != GEU && code != LTU && code != LEU)
1609 return simplify_relational_operation (signed_condition (code),
1610 mode, tem, const0_rtx);
1612 /* For non-IEEE floating-point, if the two operands are equal, we know the
1613 result. */
1614 if (rtx_equal_p (op0, op1)
1615 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1616 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
1617 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1619 /* If the operands are floating-point constants, see if we can fold
1620 the result. */
1621 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1622 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1623 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1625 struct cfc_args args;
1627 /* Setup input for check_fold_consts() */
1628 args.op0 = op0;
1629 args.op1 = op1;
1631 if (do_float_handler(check_fold_consts, (PTR) &args) == 0)
1632 /* We got an exception from check_fold_consts() */
1633 return 0;
1635 /* Receive output from check_fold_consts() */
1636 equal = args.equal;
1637 op0lt = op0ltu = args.op0lt;
1638 op1lt = op1ltu = args.op1lt;
1640 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1642 /* Otherwise, see if the operands are both integers. */
1643 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1644 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1645 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1647 int width = GET_MODE_BITSIZE (mode);
1648 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1649 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1651 /* Get the two words comprising each integer constant. */
1652 if (GET_CODE (op0) == CONST_DOUBLE)
1654 l0u = l0s = CONST_DOUBLE_LOW (op0);
1655 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1657 else
1659 l0u = l0s = INTVAL (op0);
1660 h0u = h0s = l0s < 0 ? -1 : 0;
1663 if (GET_CODE (op1) == CONST_DOUBLE)
1665 l1u = l1s = CONST_DOUBLE_LOW (op1);
1666 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1668 else
1670 l1u = l1s = INTVAL (op1);
1671 h1u = h1s = l1s < 0 ? -1 : 0;
1674 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1675 we have to sign or zero-extend the values. */
1676 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1677 h0u = h1u = 0, h0s = l0s < 0 ? -1 : 0, h1s = l1s < 0 ? -1 : 0;
1679 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1681 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1682 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1684 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1685 l0s |= ((HOST_WIDE_INT) (-1) << width);
1687 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1688 l1s |= ((HOST_WIDE_INT) (-1) << width);
1691 equal = (h0u == h1u && l0u == l1u);
1692 op0lt = (h0s < h1s || (h0s == h1s && l0s < l1s));
1693 op1lt = (h1s < h0s || (h1s == h0s && l1s < l0s));
1694 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1695 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1698 /* Otherwise, there are some code-specific tests we can make. */
1699 else
1701 switch (code)
1703 case EQ:
1704 /* References to the frame plus a constant or labels cannot
1705 be zero, but a SYMBOL_REF can due to #pragma weak. */
1706 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1707 || GET_CODE (op0) == LABEL_REF)
1708 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1709 /* On some machines, the ap reg can be 0 sometimes. */
1710 && op0 != arg_pointer_rtx
1711 #endif
1713 return const0_rtx;
1714 break;
1716 case NE:
1717 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1718 || GET_CODE (op0) == LABEL_REF)
1719 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1720 && op0 != arg_pointer_rtx
1721 #endif
1723 return const_true_rtx;
1724 break;
1726 case GEU:
1727 /* Unsigned values are never negative. */
1728 if (op1 == const0_rtx)
1729 return const_true_rtx;
1730 break;
1732 case LTU:
1733 if (op1 == const0_rtx)
1734 return const0_rtx;
1735 break;
1737 case LEU:
1738 /* Unsigned values are never greater than the largest
1739 unsigned value. */
1740 if (GET_CODE (op1) == CONST_INT
1741 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1742 && INTEGRAL_MODE_P (mode))
1743 return const_true_rtx;
1744 break;
1746 case GTU:
1747 if (GET_CODE (op1) == CONST_INT
1748 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1749 && INTEGRAL_MODE_P (mode))
1750 return const0_rtx;
1751 break;
1753 default:
1754 break;
1757 return 0;
1760 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1761 as appropriate. */
1762 switch (code)
1764 case EQ:
1765 return equal ? const_true_rtx : const0_rtx;
1766 case NE:
1767 return ! equal ? const_true_rtx : const0_rtx;
1768 case LT:
1769 return op0lt ? const_true_rtx : const0_rtx;
1770 case GT:
1771 return op1lt ? const_true_rtx : const0_rtx;
1772 case LTU:
1773 return op0ltu ? const_true_rtx : const0_rtx;
1774 case GTU:
1775 return op1ltu ? const_true_rtx : const0_rtx;
1776 case LE:
1777 return equal || op0lt ? const_true_rtx : const0_rtx;
1778 case GE:
1779 return equal || op1lt ? const_true_rtx : const0_rtx;
1780 case LEU:
1781 return equal || op0ltu ? const_true_rtx : const0_rtx;
1782 case GEU:
1783 return equal || op1ltu ? const_true_rtx : const0_rtx;
1784 default:
1785 abort ();
1789 /* Simplify CODE, an operation with result mode MODE and three operands,
1790 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1791 a constant. Return 0 if no simplifications is possible. */
1794 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
1795 enum rtx_code code;
1796 enum machine_mode mode, op0_mode;
1797 rtx op0, op1, op2;
1799 int width = GET_MODE_BITSIZE (mode);
1801 /* VOIDmode means "infinite" precision. */
1802 if (width == 0)
1803 width = HOST_BITS_PER_WIDE_INT;
1805 switch (code)
1807 case SIGN_EXTRACT:
1808 case ZERO_EXTRACT:
1809 if (GET_CODE (op0) == CONST_INT
1810 && GET_CODE (op1) == CONST_INT
1811 && GET_CODE (op2) == CONST_INT
1812 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
1813 && width <= HOST_BITS_PER_WIDE_INT)
1815 /* Extracting a bit-field from a constant */
1816 HOST_WIDE_INT val = INTVAL (op0);
1818 if (BITS_BIG_ENDIAN)
1819 val >>= (GET_MODE_BITSIZE (op0_mode)
1820 - INTVAL (op2) - INTVAL (op1));
1821 else
1822 val >>= INTVAL (op2);
1824 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
1826 /* First zero-extend. */
1827 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
1828 /* If desired, propagate sign bit. */
1829 if (code == SIGN_EXTRACT
1830 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
1831 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
1834 /* Clear the bits that don't belong in our mode,
1835 unless they and our sign bit are all one.
1836 So we get either a reasonable negative value or a reasonable
1837 unsigned value for this mode. */
1838 if (width < HOST_BITS_PER_WIDE_INT
1839 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
1840 != ((HOST_WIDE_INT) (-1) << (width - 1))))
1841 val &= ((HOST_WIDE_INT) 1 << width) - 1;
1843 return GEN_INT (val);
1845 break;
1847 case IF_THEN_ELSE:
1848 if (GET_CODE (op0) == CONST_INT)
1849 return op0 != const0_rtx ? op1 : op2;
1851 /* Convert a == b ? b : a to "a". */
1852 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
1853 && rtx_equal_p (XEXP (op0, 0), op1)
1854 && rtx_equal_p (XEXP (op0, 1), op2))
1855 return op1;
1856 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
1857 && rtx_equal_p (XEXP (op0, 1), op1)
1858 && rtx_equal_p (XEXP (op0, 0), op2))
1859 return op2;
1860 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
1862 rtx temp;
1863 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
1864 XEXP (op0, 0), XEXP (op0, 1));
1865 /* See if any simplifications were possible. */
1866 if (temp == const0_rtx)
1867 return op2;
1868 else if (temp == const1_rtx)
1869 return op1;
1871 break;
1873 default:
1874 abort ();
1877 return 0;
1880 /* Simplify X, an rtx expression.
1882 Return the simplified expression or NULL if no simplifications
1883 were possible.
1885 This is the preferred entry point into the simplification routines;
1886 however, we still allow passes to call the more specific routines.
1888 Right now GCC has three (yes, three) major bodies of RTL simplficiation
1889 code that need to be unified.
1891 1. fold_rtx in cse.c. This code uses various CSE specific
1892 information to aid in RTL simplification.
1894 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
1895 it uses combine specific information to aid in RTL
1896 simplification.
1898 3. The routines in this file.
1901 Long term we want to only have one body of simplification code; to
1902 get to that state I recommend the following steps:
1904 1. Pour over fold_rtx & simplify_rtx and move any simplifications
1905 which are not pass dependent state into these routines.
1907 2. As code is moved by #1, change fold_rtx & simplify_rtx to
1908 use this routine whenever possible.
1910 3. Allow for pass dependent state to be provided to these
1911 routines and add simplifications based on the pass dependent
1912 state. Remove code from cse.c & combine.c that becomes
1913 redundant/dead.
1915 It will take time, but ultimately the compiler will be easier to
1916 maintain and improve. It's totally silly that when we add a
1917 simplification that it needs to be added to 4 places (3 for RTL
1918 simplification and 1 for tree simplification. */
1921 simplify_rtx (x)
1922 rtx x;
1924 enum rtx_code code;
1925 enum machine_mode mode;
1926 rtx new;
1928 mode = GET_MODE (x);
1929 code = GET_CODE (x);
1931 switch (GET_RTX_CLASS (code))
1933 case '1':
1934 return simplify_unary_operation (code, mode,
1935 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
1936 case '2':
1937 case 'c':
1938 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
1940 case '3':
1941 case 'b':
1942 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
1943 XEXP (x, 0), XEXP (x, 1), XEXP (x, 2));
1945 case '<':
1946 return simplify_relational_operation (code, GET_MODE (XEXP (x, 0)),
1947 XEXP (x, 0), XEXP (x, 1));
1948 default:
1949 return NULL;