Add config suport for s390
[official-gcc.git] / gcc / simplify-rtx.c
blobfa3dfe17f0f4e1cfd0f0936cbbef7219d110340a
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include <setjmp.h>
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
39 #include "ggc.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
66 /* Similar, but also allows reference to the stack pointer.
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
95 signed wide int. */
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
99 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
100 enum machine_mode, rtx, rtx));
101 static void check_fold_consts PARAMS ((PTR));
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
107 simplify_gen_binary (code, mode, op0, op1)
108 enum rtx_code code;
109 enum machine_mode mode;
110 rtx op0, op1;
112 rtx tem;
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == 'c'
116 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
117 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
118 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
119 || (GET_CODE (op0) == SUBREG
120 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
121 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
122 tem = op0, op0 = op1, op1 = tem;
124 /* If this simplifies, do it. */
125 tem = simplify_binary_operation (code, mode, op0, op1);
127 if (tem)
128 return tem;
130 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
131 just form the operation. */
133 if (code == PLUS && GET_CODE (op1) == CONST_INT
134 && GET_MODE (op0) != VOIDmode)
135 return plus_constant (op0, INTVAL (op1));
136 else if (code == MINUS && GET_CODE (op1) == CONST_INT
137 && GET_MODE (op0) != VOIDmode)
138 return plus_constant (op0, - INTVAL (op1));
139 else
140 return gen_rtx_fmt_ee (code, mode, op0, op1);
143 /* Try to simplify a unary operation CODE whose output mode is to be
144 MODE with input operand OP whose mode was originally OP_MODE.
145 Return zero if no simplification can be made. */
148 simplify_unary_operation (code, mode, op, op_mode)
149 enum rtx_code code;
150 enum machine_mode mode;
151 rtx op;
152 enum machine_mode op_mode;
154 unsigned int width = GET_MODE_BITSIZE (mode);
156 /* The order of these tests is critical so that, for example, we don't
157 check the wrong mode (input vs. output) for a conversion operation,
158 such as FIX. At some point, this should be simplified. */
160 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
162 if (code == FLOAT && GET_MODE (op) == VOIDmode
163 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
165 HOST_WIDE_INT hv, lv;
166 REAL_VALUE_TYPE d;
168 if (GET_CODE (op) == CONST_INT)
169 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
170 else
171 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
173 #ifdef REAL_ARITHMETIC
174 REAL_VALUE_FROM_INT (d, lv, hv, mode);
175 #else
176 if (hv < 0)
178 d = (double) (~ hv);
179 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
180 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
181 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
182 d = (- d - 1.0);
184 else
186 d = (double) hv;
187 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
188 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
189 d += (double) (unsigned HOST_WIDE_INT) lv;
191 #endif /* REAL_ARITHMETIC */
192 d = real_value_truncate (mode, d);
193 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
195 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
196 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
198 HOST_WIDE_INT hv, lv;
199 REAL_VALUE_TYPE d;
201 if (GET_CODE (op) == CONST_INT)
202 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
203 else
204 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
206 if (op_mode == VOIDmode)
208 /* We don't know how to interpret negative-looking numbers in
209 this case, so don't try to fold those. */
210 if (hv < 0)
211 return 0;
213 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
215 else
216 hv = 0, lv &= GET_MODE_MASK (op_mode);
218 #ifdef REAL_ARITHMETIC
219 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
220 #else
222 d = (double) (unsigned HOST_WIDE_INT) hv;
223 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
224 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
225 d += (double) (unsigned HOST_WIDE_INT) lv;
226 #endif /* REAL_ARITHMETIC */
227 d = real_value_truncate (mode, d);
228 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
230 #endif
232 if (GET_CODE (op) == CONST_INT
233 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
235 register HOST_WIDE_INT arg0 = INTVAL (op);
236 register HOST_WIDE_INT val;
238 switch (code)
240 case NOT:
241 val = ~ arg0;
242 break;
244 case NEG:
245 val = - arg0;
246 break;
248 case ABS:
249 val = (arg0 >= 0 ? arg0 : - arg0);
250 break;
252 case FFS:
253 /* Don't use ffs here. Instead, get low order bit and then its
254 number. If arg0 is zero, this will return 0, as desired. */
255 arg0 &= GET_MODE_MASK (mode);
256 val = exact_log2 (arg0 & (- arg0)) + 1;
257 break;
259 case TRUNCATE:
260 val = arg0;
261 break;
263 case ZERO_EXTEND:
264 if (op_mode == VOIDmode)
265 op_mode = mode;
266 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
268 /* If we were really extending the mode,
269 we would have to distinguish between zero-extension
270 and sign-extension. */
271 if (width != GET_MODE_BITSIZE (op_mode))
272 abort ();
273 val = arg0;
275 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
276 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
277 else
278 return 0;
279 break;
281 case SIGN_EXTEND:
282 if (op_mode == VOIDmode)
283 op_mode = mode;
284 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
286 /* If we were really extending the mode,
287 we would have to distinguish between zero-extension
288 and sign-extension. */
289 if (width != GET_MODE_BITSIZE (op_mode))
290 abort ();
291 val = arg0;
293 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
296 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
297 if (val
298 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
299 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
301 else
302 return 0;
303 break;
305 case SQRT:
306 case FLOAT_EXTEND:
307 case FLOAT_TRUNCATE:
308 return 0;
310 default:
311 abort ();
314 val = trunc_int_for_mode (val, mode);
316 return GEN_INT (val);
319 /* We can do some operations on integer CONST_DOUBLEs. Also allow
320 for a DImode operation on a CONST_INT. */
321 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
322 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
324 unsigned HOST_WIDE_INT l1, lv;
325 HOST_WIDE_INT h1, hv;
327 if (GET_CODE (op) == CONST_DOUBLE)
328 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
329 else
330 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
332 switch (code)
334 case NOT:
335 lv = ~ l1;
336 hv = ~ h1;
337 break;
339 case NEG:
340 neg_double (l1, h1, &lv, &hv);
341 break;
343 case ABS:
344 if (h1 < 0)
345 neg_double (l1, h1, &lv, &hv);
346 else
347 lv = l1, hv = h1;
348 break;
350 case FFS:
351 hv = 0;
352 if (l1 == 0)
353 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
354 else
355 lv = exact_log2 (l1 & (-l1)) + 1;
356 break;
358 case TRUNCATE:
359 /* This is just a change-of-mode, so do nothing. */
360 lv = l1, hv = h1;
361 break;
363 case ZERO_EXTEND:
364 if (op_mode == VOIDmode
365 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
366 return 0;
368 hv = 0;
369 lv = l1 & GET_MODE_MASK (op_mode);
370 break;
372 case SIGN_EXTEND:
373 if (op_mode == VOIDmode
374 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
375 return 0;
376 else
378 lv = l1 & GET_MODE_MASK (op_mode);
379 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
380 && (lv & ((HOST_WIDE_INT) 1
381 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
382 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
384 hv = HWI_SIGN_EXTEND (lv);
386 break;
388 case SQRT:
389 return 0;
391 default:
392 return 0;
395 return immed_double_const (lv, hv, mode);
398 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
399 else if (GET_CODE (op) == CONST_DOUBLE
400 && GET_MODE_CLASS (mode) == MODE_FLOAT)
402 REAL_VALUE_TYPE d;
403 jmp_buf handler;
404 rtx x;
406 if (setjmp (handler))
407 /* There used to be a warning here, but that is inadvisable.
408 People may want to cause traps, and the natural way
409 to do it should not get a warning. */
410 return 0;
412 set_float_handler (handler);
414 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
416 switch (code)
418 case NEG:
419 d = REAL_VALUE_NEGATE (d);
420 break;
422 case ABS:
423 if (REAL_VALUE_NEGATIVE (d))
424 d = REAL_VALUE_NEGATE (d);
425 break;
427 case FLOAT_TRUNCATE:
428 d = real_value_truncate (mode, d);
429 break;
431 case FLOAT_EXTEND:
432 /* All this does is change the mode. */
433 break;
435 case FIX:
436 d = REAL_VALUE_RNDZINT (d);
437 break;
439 case UNSIGNED_FIX:
440 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
441 break;
443 case SQRT:
444 return 0;
446 default:
447 abort ();
450 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
451 set_float_handler (NULL_PTR);
452 return x;
455 else if (GET_CODE (op) == CONST_DOUBLE
456 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
457 && GET_MODE_CLASS (mode) == MODE_INT
458 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
460 REAL_VALUE_TYPE d;
461 jmp_buf handler;
462 HOST_WIDE_INT val;
464 if (setjmp (handler))
465 return 0;
467 set_float_handler (handler);
469 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
471 switch (code)
473 case FIX:
474 val = REAL_VALUE_FIX (d);
475 break;
477 case UNSIGNED_FIX:
478 val = REAL_VALUE_UNSIGNED_FIX (d);
479 break;
481 default:
482 abort ();
485 set_float_handler (NULL_PTR);
487 val = trunc_int_for_mode (val, mode);
489 return GEN_INT (val);
491 #endif
492 /* This was formerly used only for non-IEEE float.
493 eggert@twinsun.com says it is safe for IEEE also. */
494 else
496 enum rtx_code reversed;
497 /* There are some simplifications we can do even if the operands
498 aren't constant. */
499 switch (code)
501 case NOT:
502 /* (not (not X)) == X. */
503 if (GET_CODE (op) == NOT)
504 return XEXP (op, 0);
506 /* (not (eq X Y)) == (ne X Y), etc. */
507 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
508 && ((reversed = reversed_comparison_code (op, NULL_RTX))
509 != UNKNOWN))
510 return gen_rtx_fmt_ee (reversed,
511 op_mode, XEXP (op, 0), XEXP (op, 1));
512 break;
514 case NEG:
515 /* (neg (neg X)) == X. */
516 if (GET_CODE (op) == NEG)
517 return XEXP (op, 0);
518 break;
520 case SIGN_EXTEND:
521 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
522 becomes just the MINUS if its mode is MODE. This allows
523 folding switch statements on machines using casesi (such as
524 the Vax). */
525 if (GET_CODE (op) == TRUNCATE
526 && GET_MODE (XEXP (op, 0)) == mode
527 && GET_CODE (XEXP (op, 0)) == MINUS
528 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
529 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
530 return XEXP (op, 0);
532 #ifdef POINTERS_EXTEND_UNSIGNED
533 if (! POINTERS_EXTEND_UNSIGNED
534 && mode == Pmode && GET_MODE (op) == ptr_mode
535 && (CONSTANT_P (op)
536 || (GET_CODE (op) == SUBREG
537 && GET_CODE (SUBREG_REG (op)) == REG
538 && REG_POINTER (SUBREG_REG (op))
539 && GET_MODE (SUBREG_REG (op)) == Pmode)))
540 return convert_memory_address (Pmode, op);
541 #endif
542 break;
544 #ifdef POINTERS_EXTEND_UNSIGNED
545 case ZERO_EXTEND:
546 if (POINTERS_EXTEND_UNSIGNED
547 && mode == Pmode && GET_MODE (op) == ptr_mode
548 && (CONSTANT_P (op)
549 || (GET_CODE (op) == SUBREG
550 && GET_CODE (SUBREG_REG (op)) == REG
551 && REG_POINTER (SUBREG_REG (op))
552 && GET_MODE (SUBREG_REG (op)) == Pmode)))
553 return convert_memory_address (Pmode, op);
554 break;
555 #endif
557 default:
558 break;
561 return 0;
565 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
566 and OP1. Return 0 if no simplification is possible.
568 Don't use this for relational operations such as EQ or LT.
569 Use simplify_relational_operation instead. */
572 simplify_binary_operation (code, mode, op0, op1)
573 enum rtx_code code;
574 enum machine_mode mode;
575 rtx op0, op1;
577 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
578 HOST_WIDE_INT val;
579 unsigned int width = GET_MODE_BITSIZE (mode);
580 rtx tem;
582 /* Relational operations don't work here. We must know the mode
583 of the operands in order to do the comparison correctly.
584 Assuming a full word can give incorrect results.
585 Consider comparing 128 with -128 in QImode. */
587 if (GET_RTX_CLASS (code) == '<')
588 abort ();
590 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
591 if (GET_MODE_CLASS (mode) == MODE_FLOAT
592 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
593 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
595 REAL_VALUE_TYPE f0, f1, value;
596 jmp_buf handler;
598 if (setjmp (handler))
599 return 0;
601 set_float_handler (handler);
603 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
604 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
605 f0 = real_value_truncate (mode, f0);
606 f1 = real_value_truncate (mode, f1);
608 #ifdef REAL_ARITHMETIC
609 #ifndef REAL_INFINITY
610 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
611 return 0;
612 #endif
613 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
614 #else
615 switch (code)
617 case PLUS:
618 value = f0 + f1;
619 break;
620 case MINUS:
621 value = f0 - f1;
622 break;
623 case MULT:
624 value = f0 * f1;
625 break;
626 case DIV:
627 #ifndef REAL_INFINITY
628 if (f1 == 0)
629 return 0;
630 #endif
631 value = f0 / f1;
632 break;
633 case SMIN:
634 value = MIN (f0, f1);
635 break;
636 case SMAX:
637 value = MAX (f0, f1);
638 break;
639 default:
640 abort ();
642 #endif
644 value = real_value_truncate (mode, value);
645 set_float_handler (NULL_PTR);
646 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
648 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
650 /* We can fold some multi-word operations. */
651 if (GET_MODE_CLASS (mode) == MODE_INT
652 && width == HOST_BITS_PER_WIDE_INT * 2
653 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
654 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
656 unsigned HOST_WIDE_INT l1, l2, lv;
657 HOST_WIDE_INT h1, h2, hv;
659 if (GET_CODE (op0) == CONST_DOUBLE)
660 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
661 else
662 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
664 if (GET_CODE (op1) == CONST_DOUBLE)
665 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
666 else
667 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
669 switch (code)
671 case MINUS:
672 /* A - B == A + (-B). */
673 neg_double (l2, h2, &lv, &hv);
674 l2 = lv, h2 = hv;
676 /* .. fall through ... */
678 case PLUS:
679 add_double (l1, h1, l2, h2, &lv, &hv);
680 break;
682 case MULT:
683 mul_double (l1, h1, l2, h2, &lv, &hv);
684 break;
686 case DIV: case MOD: case UDIV: case UMOD:
687 /* We'd need to include tree.h to do this and it doesn't seem worth
688 it. */
689 return 0;
691 case AND:
692 lv = l1 & l2, hv = h1 & h2;
693 break;
695 case IOR:
696 lv = l1 | l2, hv = h1 | h2;
697 break;
699 case XOR:
700 lv = l1 ^ l2, hv = h1 ^ h2;
701 break;
703 case SMIN:
704 if (h1 < h2
705 || (h1 == h2
706 && ((unsigned HOST_WIDE_INT) l1
707 < (unsigned HOST_WIDE_INT) l2)))
708 lv = l1, hv = h1;
709 else
710 lv = l2, hv = h2;
711 break;
713 case SMAX:
714 if (h1 > h2
715 || (h1 == h2
716 && ((unsigned HOST_WIDE_INT) l1
717 > (unsigned HOST_WIDE_INT) l2)))
718 lv = l1, hv = h1;
719 else
720 lv = l2, hv = h2;
721 break;
723 case UMIN:
724 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
725 || (h1 == h2
726 && ((unsigned HOST_WIDE_INT) l1
727 < (unsigned HOST_WIDE_INT) l2)))
728 lv = l1, hv = h1;
729 else
730 lv = l2, hv = h2;
731 break;
733 case UMAX:
734 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
735 || (h1 == h2
736 && ((unsigned HOST_WIDE_INT) l1
737 > (unsigned HOST_WIDE_INT) l2)))
738 lv = l1, hv = h1;
739 else
740 lv = l2, hv = h2;
741 break;
743 case LSHIFTRT: case ASHIFTRT:
744 case ASHIFT:
745 case ROTATE: case ROTATERT:
746 #ifdef SHIFT_COUNT_TRUNCATED
747 if (SHIFT_COUNT_TRUNCATED)
748 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
749 #endif
751 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
752 return 0;
754 if (code == LSHIFTRT || code == ASHIFTRT)
755 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
756 code == ASHIFTRT);
757 else if (code == ASHIFT)
758 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
759 else if (code == ROTATE)
760 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
761 else /* code == ROTATERT */
762 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
763 break;
765 default:
766 return 0;
769 return immed_double_const (lv, hv, mode);
772 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
773 || width > HOST_BITS_PER_WIDE_INT || width == 0)
775 /* Even if we can't compute a constant result,
776 there are some cases worth simplifying. */
778 switch (code)
780 case PLUS:
781 /* In IEEE floating point, x+0 is not the same as x. Similarly
782 for the other optimizations below. */
783 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
784 && FLOAT_MODE_P (mode) && ! flag_fast_math)
785 break;
787 if (op1 == CONST0_RTX (mode))
788 return op0;
790 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
791 if (GET_CODE (op0) == NEG)
792 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
793 else if (GET_CODE (op1) == NEG)
794 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
796 /* Handle both-operands-constant cases. We can only add
797 CONST_INTs to constants since the sum of relocatable symbols
798 can't be handled by most assemblers. Don't add CONST_INT
799 to CONST_INT since overflow won't be computed properly if wider
800 than HOST_BITS_PER_WIDE_INT. */
802 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
803 && GET_CODE (op1) == CONST_INT)
804 return plus_constant (op0, INTVAL (op1));
805 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
806 && GET_CODE (op0) == CONST_INT)
807 return plus_constant (op1, INTVAL (op0));
809 /* See if this is something like X * C - X or vice versa or
810 if the multiplication is written as a shift. If so, we can
811 distribute and make a new multiply, shift, or maybe just
812 have X (if C is 2 in the example above). But don't make
813 real multiply if we didn't have one before. */
815 if (! FLOAT_MODE_P (mode))
817 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
818 rtx lhs = op0, rhs = op1;
819 int had_mult = 0;
821 if (GET_CODE (lhs) == NEG)
822 coeff0 = -1, lhs = XEXP (lhs, 0);
823 else if (GET_CODE (lhs) == MULT
824 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
826 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
827 had_mult = 1;
829 else if (GET_CODE (lhs) == ASHIFT
830 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
831 && INTVAL (XEXP (lhs, 1)) >= 0
832 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
834 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
835 lhs = XEXP (lhs, 0);
838 if (GET_CODE (rhs) == NEG)
839 coeff1 = -1, rhs = XEXP (rhs, 0);
840 else if (GET_CODE (rhs) == MULT
841 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
843 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
844 had_mult = 1;
846 else if (GET_CODE (rhs) == ASHIFT
847 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
848 && INTVAL (XEXP (rhs, 1)) >= 0
849 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
851 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
852 rhs = XEXP (rhs, 0);
855 if (rtx_equal_p (lhs, rhs))
857 tem = simplify_gen_binary (MULT, mode, lhs,
858 GEN_INT (coeff0 + coeff1));
859 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
863 /* If one of the operands is a PLUS or a MINUS, see if we can
864 simplify this by the associative law.
865 Don't use the associative law for floating point.
866 The inaccuracy makes it nonassociative,
867 and subtle programs can break if operations are associated. */
869 if (INTEGRAL_MODE_P (mode)
870 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
871 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
872 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
873 return tem;
874 break;
876 case COMPARE:
877 #ifdef HAVE_cc0
878 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
879 using cc0, in which case we want to leave it as a COMPARE
880 so we can distinguish it from a register-register-copy.
882 In IEEE floating point, x-0 is not the same as x. */
884 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
885 || ! FLOAT_MODE_P (mode) || flag_fast_math)
886 && op1 == CONST0_RTX (mode))
887 return op0;
888 #endif
890 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
891 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
892 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
893 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
895 rtx xop00 = XEXP (op0, 0);
896 rtx xop10 = XEXP (op1, 0);
898 #ifdef HAVE_cc0
899 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
900 #else
901 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
902 && GET_MODE (xop00) == GET_MODE (xop10)
903 && REGNO (xop00) == REGNO (xop10)
904 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
905 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
906 #endif
907 return xop00;
910 break;
911 case MINUS:
912 /* None of these optimizations can be done for IEEE
913 floating point. */
914 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
915 && FLOAT_MODE_P (mode) && ! flag_fast_math)
916 break;
918 /* We can't assume x-x is 0 even with non-IEEE floating point,
919 but since it is zero except in very strange circumstances, we
920 will treat it as zero with -ffast-math. */
921 if (rtx_equal_p (op0, op1)
922 && ! side_effects_p (op0)
923 && (! FLOAT_MODE_P (mode) || flag_fast_math))
924 return CONST0_RTX (mode);
926 /* Change subtraction from zero into negation. */
927 if (op0 == CONST0_RTX (mode))
928 return gen_rtx_NEG (mode, op1);
930 /* (-1 - a) is ~a. */
931 if (op0 == constm1_rtx)
932 return gen_rtx_NOT (mode, op1);
934 /* Subtracting 0 has no effect. */
935 if (op1 == CONST0_RTX (mode))
936 return op0;
938 /* See if this is something like X * C - X or vice versa or
939 if the multiplication is written as a shift. If so, we can
940 distribute and make a new multiply, shift, or maybe just
941 have X (if C is 2 in the example above). But don't make
942 real multiply if we didn't have one before. */
944 if (! FLOAT_MODE_P (mode))
946 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
947 rtx lhs = op0, rhs = op1;
948 int had_mult = 0;
950 if (GET_CODE (lhs) == NEG)
951 coeff0 = -1, lhs = XEXP (lhs, 0);
952 else if (GET_CODE (lhs) == MULT
953 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
955 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
956 had_mult = 1;
958 else if (GET_CODE (lhs) == ASHIFT
959 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
960 && INTVAL (XEXP (lhs, 1)) >= 0
961 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
963 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
964 lhs = XEXP (lhs, 0);
967 if (GET_CODE (rhs) == NEG)
968 coeff1 = - 1, rhs = XEXP (rhs, 0);
969 else if (GET_CODE (rhs) == MULT
970 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
972 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
973 had_mult = 1;
975 else if (GET_CODE (rhs) == ASHIFT
976 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
977 && INTVAL (XEXP (rhs, 1)) >= 0
978 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
980 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
981 rhs = XEXP (rhs, 0);
984 if (rtx_equal_p (lhs, rhs))
986 tem = simplify_gen_binary (MULT, mode, lhs,
987 GEN_INT (coeff0 - coeff1));
988 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
992 /* (a - (-b)) -> (a + b). */
993 if (GET_CODE (op1) == NEG)
994 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
996 /* If one of the operands is a PLUS or a MINUS, see if we can
997 simplify this by the associative law.
998 Don't use the associative law for floating point.
999 The inaccuracy makes it nonassociative,
1000 and subtle programs can break if operations are associated. */
1002 if (INTEGRAL_MODE_P (mode)
1003 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1004 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1005 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1006 return tem;
1008 /* Don't let a relocatable value get a negative coeff. */
1009 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1010 return plus_constant (op0, - INTVAL (op1));
1012 /* (x - (x & y)) -> (x & ~y) */
1013 if (GET_CODE (op1) == AND)
1015 if (rtx_equal_p (op0, XEXP (op1, 0)))
1016 return simplify_gen_binary (AND, mode, op0,
1017 gen_rtx_NOT (mode, XEXP (op1, 1)));
1018 if (rtx_equal_p (op0, XEXP (op1, 1)))
1019 return simplify_gen_binary (AND, mode, op0,
1020 gen_rtx_NOT (mode, XEXP (op1, 0)));
1022 break;
1024 case MULT:
1025 if (op1 == constm1_rtx)
1027 tem = simplify_unary_operation (NEG, mode, op0, mode);
1029 return tem ? tem : gen_rtx_NEG (mode, op0);
1032 /* In IEEE floating point, x*0 is not always 0. */
1033 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1034 || ! FLOAT_MODE_P (mode) || flag_fast_math)
1035 && op1 == CONST0_RTX (mode)
1036 && ! side_effects_p (op0))
1037 return op1;
1039 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1040 However, ANSI says we can drop signals,
1041 so we can do this anyway. */
1042 if (op1 == CONST1_RTX (mode))
1043 return op0;
1045 /* Convert multiply by constant power of two into shift unless
1046 we are still generating RTL. This test is a kludge. */
1047 if (GET_CODE (op1) == CONST_INT
1048 && (val = exact_log2 (INTVAL (op1))) >= 0
1049 /* If the mode is larger than the host word size, and the
1050 uppermost bit is set, then this isn't a power of two due
1051 to implicit sign extension. */
1052 && (width <= HOST_BITS_PER_WIDE_INT
1053 || val != HOST_BITS_PER_WIDE_INT - 1)
1054 && ! rtx_equal_function_value_matters)
1055 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1057 if (GET_CODE (op1) == CONST_DOUBLE
1058 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1060 REAL_VALUE_TYPE d;
1061 jmp_buf handler;
1062 int op1is2, op1ism1;
1064 if (setjmp (handler))
1065 return 0;
1067 set_float_handler (handler);
1068 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1069 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1070 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1071 set_float_handler (NULL_PTR);
1073 /* x*2 is x+x and x*(-1) is -x */
1074 if (op1is2 && GET_MODE (op0) == mode)
1075 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1077 else if (op1ism1 && GET_MODE (op0) == mode)
1078 return gen_rtx_NEG (mode, op0);
1080 break;
1082 case IOR:
1083 if (op1 == const0_rtx)
1084 return op0;
1085 if (GET_CODE (op1) == CONST_INT
1086 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1087 return op1;
1088 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1089 return op0;
1090 /* A | (~A) -> -1 */
1091 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1092 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1093 && ! side_effects_p (op0)
1094 && GET_MODE_CLASS (mode) != MODE_CC)
1095 return constm1_rtx;
1096 break;
1098 case XOR:
1099 if (op1 == const0_rtx)
1100 return op0;
1101 if (GET_CODE (op1) == CONST_INT
1102 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1103 return gen_rtx_NOT (mode, op0);
1104 if (op0 == op1 && ! side_effects_p (op0)
1105 && GET_MODE_CLASS (mode) != MODE_CC)
1106 return const0_rtx;
1107 break;
1109 case AND:
1110 if (op1 == const0_rtx && ! side_effects_p (op0))
1111 return const0_rtx;
1112 if (GET_CODE (op1) == CONST_INT
1113 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1114 return op0;
1115 if (op0 == op1 && ! side_effects_p (op0)
1116 && GET_MODE_CLASS (mode) != MODE_CC)
1117 return op0;
1118 /* A & (~A) -> 0 */
1119 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1120 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1121 && ! side_effects_p (op0)
1122 && GET_MODE_CLASS (mode) != MODE_CC)
1123 return const0_rtx;
1124 break;
1126 case UDIV:
1127 /* Convert divide by power of two into shift (divide by 1 handled
1128 below). */
1129 if (GET_CODE (op1) == CONST_INT
1130 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1131 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1133 /* ... fall through ... */
1135 case DIV:
1136 if (op1 == CONST1_RTX (mode))
1137 return op0;
1139 /* In IEEE floating point, 0/x is not always 0. */
1140 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1141 || ! FLOAT_MODE_P (mode) || flag_fast_math)
1142 && op0 == CONST0_RTX (mode)
1143 && ! side_effects_p (op1))
1144 return op0;
1146 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1147 /* Change division by a constant into multiplication. Only do
1148 this with -ffast-math until an expert says it is safe in
1149 general. */
1150 else if (GET_CODE (op1) == CONST_DOUBLE
1151 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1152 && op1 != CONST0_RTX (mode)
1153 && flag_fast_math)
1155 REAL_VALUE_TYPE d;
1156 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1158 if (! REAL_VALUES_EQUAL (d, dconst0))
1160 #if defined (REAL_ARITHMETIC)
1161 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1162 return gen_rtx_MULT (mode, op0,
1163 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1164 #else
1165 return
1166 gen_rtx_MULT (mode, op0,
1167 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1168 #endif
1171 #endif
1172 break;
1174 case UMOD:
1175 /* Handle modulus by power of two (mod with 1 handled below). */
1176 if (GET_CODE (op1) == CONST_INT
1177 && exact_log2 (INTVAL (op1)) > 0)
1178 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1180 /* ... fall through ... */
1182 case MOD:
1183 if ((op0 == const0_rtx || op1 == const1_rtx)
1184 && ! side_effects_p (op0) && ! side_effects_p (op1))
1185 return const0_rtx;
1186 break;
1188 case ROTATERT:
1189 case ROTATE:
1190 /* Rotating ~0 always results in ~0. */
1191 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1192 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1193 && ! side_effects_p (op1))
1194 return op0;
1196 /* ... fall through ... */
1198 case ASHIFT:
1199 case ASHIFTRT:
1200 case LSHIFTRT:
1201 if (op1 == const0_rtx)
1202 return op0;
1203 if (op0 == const0_rtx && ! side_effects_p (op1))
1204 return op0;
1205 break;
1207 case SMIN:
1208 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1209 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1210 && ! side_effects_p (op0))
1211 return op1;
1212 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1213 return op0;
1214 break;
1216 case SMAX:
1217 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1218 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1219 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1220 && ! side_effects_p (op0))
1221 return op1;
1222 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1223 return op0;
1224 break;
1226 case UMIN:
1227 if (op1 == const0_rtx && ! side_effects_p (op0))
1228 return op1;
1229 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1230 return op0;
1231 break;
1233 case UMAX:
1234 if (op1 == constm1_rtx && ! side_effects_p (op0))
1235 return op1;
1236 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1237 return op0;
1238 break;
1240 default:
1241 abort ();
1244 return 0;
1247 /* Get the integer argument values in two forms:
1248 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1250 arg0 = INTVAL (op0);
1251 arg1 = INTVAL (op1);
1253 if (width < HOST_BITS_PER_WIDE_INT)
1255 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1256 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1258 arg0s = arg0;
1259 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1260 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1262 arg1s = arg1;
1263 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1264 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1266 else
1268 arg0s = arg0;
1269 arg1s = arg1;
1272 /* Compute the value of the arithmetic. */
1274 switch (code)
1276 case PLUS:
1277 val = arg0s + arg1s;
1278 break;
1280 case MINUS:
1281 val = arg0s - arg1s;
1282 break;
1284 case MULT:
1285 val = arg0s * arg1s;
1286 break;
1288 case DIV:
1289 if (arg1s == 0)
1290 return 0;
1291 val = arg0s / arg1s;
1292 break;
1294 case MOD:
1295 if (arg1s == 0)
1296 return 0;
1297 val = arg0s % arg1s;
1298 break;
1300 case UDIV:
1301 if (arg1 == 0)
1302 return 0;
1303 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1304 break;
1306 case UMOD:
1307 if (arg1 == 0)
1308 return 0;
1309 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1310 break;
1312 case AND:
1313 val = arg0 & arg1;
1314 break;
1316 case IOR:
1317 val = arg0 | arg1;
1318 break;
1320 case XOR:
1321 val = arg0 ^ arg1;
1322 break;
1324 case LSHIFTRT:
1325 /* If shift count is undefined, don't fold it; let the machine do
1326 what it wants. But truncate it if the machine will do that. */
1327 if (arg1 < 0)
1328 return 0;
1330 #ifdef SHIFT_COUNT_TRUNCATED
1331 if (SHIFT_COUNT_TRUNCATED)
1332 arg1 %= width;
1333 #endif
1335 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1336 break;
1338 case ASHIFT:
1339 if (arg1 < 0)
1340 return 0;
1342 #ifdef SHIFT_COUNT_TRUNCATED
1343 if (SHIFT_COUNT_TRUNCATED)
1344 arg1 %= width;
1345 #endif
1347 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1348 break;
1350 case ASHIFTRT:
1351 if (arg1 < 0)
1352 return 0;
1354 #ifdef SHIFT_COUNT_TRUNCATED
1355 if (SHIFT_COUNT_TRUNCATED)
1356 arg1 %= width;
1357 #endif
1359 val = arg0s >> arg1;
1361 /* Bootstrap compiler may not have sign extended the right shift.
1362 Manually extend the sign to insure bootstrap cc matches gcc. */
1363 if (arg0s < 0 && arg1 > 0)
1364 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1366 break;
1368 case ROTATERT:
1369 if (arg1 < 0)
1370 return 0;
1372 arg1 %= width;
1373 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1374 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1375 break;
1377 case ROTATE:
1378 if (arg1 < 0)
1379 return 0;
1381 arg1 %= width;
1382 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1383 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1384 break;
1386 case COMPARE:
1387 /* Do nothing here. */
1388 return 0;
1390 case SMIN:
1391 val = arg0s <= arg1s ? arg0s : arg1s;
1392 break;
1394 case UMIN:
1395 val = ((unsigned HOST_WIDE_INT) arg0
1396 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1397 break;
1399 case SMAX:
1400 val = arg0s > arg1s ? arg0s : arg1s;
1401 break;
1403 case UMAX:
1404 val = ((unsigned HOST_WIDE_INT) arg0
1405 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1406 break;
1408 default:
1409 abort ();
1412 val = trunc_int_for_mode (val, mode);
1414 return GEN_INT (val);
1417 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1418 PLUS or MINUS.
1420 Rather than test for specific case, we do this by a brute-force method
1421 and do all possible simplifications until no more changes occur. Then
1422 we rebuild the operation. */
1424 static rtx
1425 simplify_plus_minus (code, mode, op0, op1)
1426 enum rtx_code code;
1427 enum machine_mode mode;
1428 rtx op0, op1;
1430 rtx ops[8];
1431 int negs[8];
1432 rtx result, tem;
1433 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1434 int first = 1, negate = 0, changed;
1435 int i, j;
1437 memset ((char *) ops, 0, sizeof ops);
1439 /* Set up the two operands and then expand them until nothing has been
1440 changed. If we run out of room in our array, give up; this should
1441 almost never happen. */
1443 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1445 changed = 1;
1446 while (changed)
1448 changed = 0;
1450 for (i = 0; i < n_ops; i++)
1451 switch (GET_CODE (ops[i]))
1453 case PLUS:
1454 case MINUS:
1455 if (n_ops == 7)
1456 return 0;
1458 ops[n_ops] = XEXP (ops[i], 1);
1459 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1460 ops[i] = XEXP (ops[i], 0);
1461 input_ops++;
1462 changed = 1;
1463 break;
1465 case NEG:
1466 ops[i] = XEXP (ops[i], 0);
1467 negs[i] = ! negs[i];
1468 changed = 1;
1469 break;
1471 case CONST:
1472 ops[i] = XEXP (ops[i], 0);
1473 input_consts++;
1474 changed = 1;
1475 break;
1477 case NOT:
1478 /* ~a -> (-a - 1) */
1479 if (n_ops != 7)
1481 ops[n_ops] = constm1_rtx;
1482 negs[n_ops++] = negs[i];
1483 ops[i] = XEXP (ops[i], 0);
1484 negs[i] = ! negs[i];
1485 changed = 1;
1487 break;
1489 case CONST_INT:
1490 if (negs[i])
1491 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1492 break;
1494 default:
1495 break;
1499 /* If we only have two operands, we can't do anything. */
1500 if (n_ops <= 2)
1501 return 0;
1503 /* Now simplify each pair of operands until nothing changes. The first
1504 time through just simplify constants against each other. */
1506 changed = 1;
1507 while (changed)
1509 changed = first;
1511 for (i = 0; i < n_ops - 1; i++)
1512 for (j = i + 1; j < n_ops; j++)
1513 if (ops[i] != 0 && ops[j] != 0
1514 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1516 rtx lhs = ops[i], rhs = ops[j];
1517 enum rtx_code ncode = PLUS;
1519 if (negs[i] && ! negs[j])
1520 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1521 else if (! negs[i] && negs[j])
1522 ncode = MINUS;
1524 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1525 if (tem)
1527 ops[i] = tem, ops[j] = 0;
1528 negs[i] = negs[i] && negs[j];
1529 if (GET_CODE (tem) == NEG)
1530 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1532 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1533 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1534 changed = 1;
1538 first = 0;
1541 /* Pack all the operands to the lower-numbered entries and give up if
1542 we didn't reduce the number of operands we had. Make sure we
1543 count a CONST as two operands. If we have the same number of
1544 operands, but have made more CONSTs than we had, this is also
1545 an improvement, so accept it. */
1547 for (i = 0, j = 0; j < n_ops; j++)
1548 if (ops[j] != 0)
1550 ops[i] = ops[j], negs[i++] = negs[j];
1551 if (GET_CODE (ops[j]) == CONST)
1552 n_consts++;
1555 if (i + n_consts > input_ops
1556 || (i + n_consts == input_ops && n_consts <= input_consts))
1557 return 0;
1559 n_ops = i;
1561 /* If we have a CONST_INT, put it last. */
1562 for (i = 0; i < n_ops - 1; i++)
1563 if (GET_CODE (ops[i]) == CONST_INT)
1565 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1566 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1569 /* Put a non-negated operand first. If there aren't any, make all
1570 operands positive and negate the whole thing later. */
1571 for (i = 0; i < n_ops && negs[i]; i++)
1574 if (i == n_ops)
1576 for (i = 0; i < n_ops; i++)
1577 negs[i] = 0;
1578 negate = 1;
1580 else if (i != 0)
1582 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1583 j = negs[0], negs[0] = negs[i], negs[i] = j;
1586 /* Now make the result by performing the requested operations. */
1587 result = ops[0];
1588 for (i = 1; i < n_ops; i++)
1589 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1591 return negate ? gen_rtx_NEG (mode, result) : result;
1594 struct cfc_args
1596 rtx op0, op1; /* Input */
1597 int equal, op0lt, op1lt; /* Output */
1598 int unordered;
1601 static void
1602 check_fold_consts (data)
1603 PTR data;
1605 struct cfc_args *args = (struct cfc_args *) data;
1606 REAL_VALUE_TYPE d0, d1;
1608 /* We may possibly raise an exception while reading the value. */
1609 args->unordered = 1;
1610 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1611 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1613 /* Comparisons of Inf versus Inf are ordered. */
1614 if (REAL_VALUE_ISNAN (d0)
1615 || REAL_VALUE_ISNAN (d1))
1616 return;
1617 args->equal = REAL_VALUES_EQUAL (d0, d1);
1618 args->op0lt = REAL_VALUES_LESS (d0, d1);
1619 args->op1lt = REAL_VALUES_LESS (d1, d0);
1620 args->unordered = 0;
1623 /* Like simplify_binary_operation except used for relational operators.
1624 MODE is the mode of the operands, not that of the result. If MODE
1625 is VOIDmode, both operands must also be VOIDmode and we compare the
1626 operands in "infinite precision".
1628 If no simplification is possible, this function returns zero. Otherwise,
1629 it returns either const_true_rtx or const0_rtx. */
1632 simplify_relational_operation (code, mode, op0, op1)
1633 enum rtx_code code;
1634 enum machine_mode mode;
1635 rtx op0, op1;
1637 int equal, op0lt, op0ltu, op1lt, op1ltu;
1638 rtx tem;
1640 if (mode == VOIDmode
1641 && (GET_MODE (op0) != VOIDmode
1642 || GET_MODE (op1) != VOIDmode))
1643 abort ();
1645 /* If op0 is a compare, extract the comparison arguments from it. */
1646 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1647 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1649 /* We can't simplify MODE_CC values since we don't know what the
1650 actual comparison is. */
1651 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1652 #ifdef HAVE_cc0
1653 || op0 == cc0_rtx
1654 #endif
1656 return 0;
1658 /* Make sure the constant is second. */
1659 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
1660 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
1662 tem = op0, op0 = op1, op1 = tem;
1663 code = swap_condition (code);
1666 /* For integer comparisons of A and B maybe we can simplify A - B and can
1667 then simplify a comparison of that with zero. If A and B are both either
1668 a register or a CONST_INT, this can't help; testing for these cases will
1669 prevent infinite recursion here and speed things up.
1671 If CODE is an unsigned comparison, then we can never do this optimization,
1672 because it gives an incorrect result if the subtraction wraps around zero.
1673 ANSI C defines unsigned operations such that they never overflow, and
1674 thus such cases can not be ignored. */
1676 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1677 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1678 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1679 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1680 && code != GTU && code != GEU && code != LTU && code != LEU)
1681 return simplify_relational_operation (signed_condition (code),
1682 mode, tem, const0_rtx);
1684 if (flag_fast_math && code == ORDERED)
1685 return const_true_rtx;
1687 if (flag_fast_math && code == UNORDERED)
1688 return const0_rtx;
1690 /* For non-IEEE floating-point, if the two operands are equal, we know the
1691 result. */
1692 if (rtx_equal_p (op0, op1)
1693 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1694 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
1695 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1697 /* If the operands are floating-point constants, see if we can fold
1698 the result. */
1699 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1700 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1701 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1703 struct cfc_args args;
1705 /* Setup input for check_fold_consts() */
1706 args.op0 = op0;
1707 args.op1 = op1;
1710 if (!do_float_handler (check_fold_consts, (PTR) &args))
1711 args.unordered = 1;
1713 if (args.unordered)
1714 switch (code)
1716 case UNEQ:
1717 case UNLT:
1718 case UNGT:
1719 case UNLE:
1720 case UNGE:
1721 case NE:
1722 case UNORDERED:
1723 return const_true_rtx;
1724 case EQ:
1725 case LT:
1726 case GT:
1727 case LE:
1728 case GE:
1729 case LTGT:
1730 case ORDERED:
1731 return const0_rtx;
1732 default:
1733 return 0;
1736 /* Receive output from check_fold_consts() */
1737 equal = args.equal;
1738 op0lt = op0ltu = args.op0lt;
1739 op1lt = op1ltu = args.op1lt;
1741 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1743 /* Otherwise, see if the operands are both integers. */
1744 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1745 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1746 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1748 int width = GET_MODE_BITSIZE (mode);
1749 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1750 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1752 /* Get the two words comprising each integer constant. */
1753 if (GET_CODE (op0) == CONST_DOUBLE)
1755 l0u = l0s = CONST_DOUBLE_LOW (op0);
1756 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1758 else
1760 l0u = l0s = INTVAL (op0);
1761 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1764 if (GET_CODE (op1) == CONST_DOUBLE)
1766 l1u = l1s = CONST_DOUBLE_LOW (op1);
1767 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1769 else
1771 l1u = l1s = INTVAL (op1);
1772 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1775 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1776 we have to sign or zero-extend the values. */
1777 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1779 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1780 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1782 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1783 l0s |= ((HOST_WIDE_INT) (-1) << width);
1785 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1786 l1s |= ((HOST_WIDE_INT) (-1) << width);
1788 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1789 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1791 equal = (h0u == h1u && l0u == l1u);
1792 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1793 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1794 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1795 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1798 /* Otherwise, there are some code-specific tests we can make. */
1799 else
1801 switch (code)
1803 case EQ:
1804 /* References to the frame plus a constant or labels cannot
1805 be zero, but a SYMBOL_REF can due to #pragma weak. */
1806 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1807 || GET_CODE (op0) == LABEL_REF)
1808 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1809 /* On some machines, the ap reg can be 0 sometimes. */
1810 && op0 != arg_pointer_rtx
1811 #endif
1813 return const0_rtx;
1814 break;
1816 case NE:
1817 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1818 || GET_CODE (op0) == LABEL_REF)
1819 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1820 && op0 != arg_pointer_rtx
1821 #endif
1823 return const_true_rtx;
1824 break;
1826 case GEU:
1827 /* Unsigned values are never negative. */
1828 if (op1 == const0_rtx)
1829 return const_true_rtx;
1830 break;
1832 case LTU:
1833 if (op1 == const0_rtx)
1834 return const0_rtx;
1835 break;
1837 case LEU:
1838 /* Unsigned values are never greater than the largest
1839 unsigned value. */
1840 if (GET_CODE (op1) == CONST_INT
1841 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1842 && INTEGRAL_MODE_P (mode))
1843 return const_true_rtx;
1844 break;
1846 case GTU:
1847 if (GET_CODE (op1) == CONST_INT
1848 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1849 && INTEGRAL_MODE_P (mode))
1850 return const0_rtx;
1851 break;
1853 default:
1854 break;
1857 return 0;
1860 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1861 as appropriate. */
1862 switch (code)
1864 case EQ:
1865 case UNEQ:
1866 return equal ? const_true_rtx : const0_rtx;
1867 case NE:
1868 case LTGT:
1869 return ! equal ? const_true_rtx : const0_rtx;
1870 case LT:
1871 case UNLT:
1872 return op0lt ? const_true_rtx : const0_rtx;
1873 case GT:
1874 case UNGT:
1875 return op1lt ? const_true_rtx : const0_rtx;
1876 case LTU:
1877 return op0ltu ? const_true_rtx : const0_rtx;
1878 case GTU:
1879 return op1ltu ? const_true_rtx : const0_rtx;
1880 case LE:
1881 case UNLE:
1882 return equal || op0lt ? const_true_rtx : const0_rtx;
1883 case GE:
1884 case UNGE:
1885 return equal || op1lt ? const_true_rtx : const0_rtx;
1886 case LEU:
1887 return equal || op0ltu ? const_true_rtx : const0_rtx;
1888 case GEU:
1889 return equal || op1ltu ? const_true_rtx : const0_rtx;
1890 case ORDERED:
1891 return const_true_rtx;
1892 case UNORDERED:
1893 return const0_rtx;
1894 default:
1895 abort ();
1899 /* Simplify CODE, an operation with result mode MODE and three operands,
1900 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1901 a constant. Return 0 if no simplifications is possible. */
1904 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
1905 enum rtx_code code;
1906 enum machine_mode mode, op0_mode;
1907 rtx op0, op1, op2;
1909 unsigned int width = GET_MODE_BITSIZE (mode);
1911 /* VOIDmode means "infinite" precision. */
1912 if (width == 0)
1913 width = HOST_BITS_PER_WIDE_INT;
1915 switch (code)
1917 case SIGN_EXTRACT:
1918 case ZERO_EXTRACT:
1919 if (GET_CODE (op0) == CONST_INT
1920 && GET_CODE (op1) == CONST_INT
1921 && GET_CODE (op2) == CONST_INT
1922 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
1923 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
1925 /* Extracting a bit-field from a constant */
1926 HOST_WIDE_INT val = INTVAL (op0);
1928 if (BITS_BIG_ENDIAN)
1929 val >>= (GET_MODE_BITSIZE (op0_mode)
1930 - INTVAL (op2) - INTVAL (op1));
1931 else
1932 val >>= INTVAL (op2);
1934 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
1936 /* First zero-extend. */
1937 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
1938 /* If desired, propagate sign bit. */
1939 if (code == SIGN_EXTRACT
1940 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
1941 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
1944 /* Clear the bits that don't belong in our mode,
1945 unless they and our sign bit are all one.
1946 So we get either a reasonable negative value or a reasonable
1947 unsigned value for this mode. */
1948 if (width < HOST_BITS_PER_WIDE_INT
1949 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
1950 != ((HOST_WIDE_INT) (-1) << (width - 1))))
1951 val &= ((HOST_WIDE_INT) 1 << width) - 1;
1953 return GEN_INT (val);
1955 break;
1957 case IF_THEN_ELSE:
1958 if (GET_CODE (op0) == CONST_INT)
1959 return op0 != const0_rtx ? op1 : op2;
1961 /* Convert a == b ? b : a to "a". */
1962 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
1963 && (! FLOAT_MODE_P (mode) || flag_fast_math)
1964 && rtx_equal_p (XEXP (op0, 0), op1)
1965 && rtx_equal_p (XEXP (op0, 1), op2))
1966 return op1;
1967 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
1968 && (! FLOAT_MODE_P (mode) || flag_fast_math)
1969 && rtx_equal_p (XEXP (op0, 1), op1)
1970 && rtx_equal_p (XEXP (op0, 0), op2))
1971 return op2;
1972 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
1974 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
1975 ? GET_MODE (XEXP (op0, 1))
1976 : GET_MODE (XEXP (op0, 0)));
1977 rtx temp;
1978 if (cmp_mode == VOIDmode)
1979 cmp_mode = op0_mode;
1980 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
1981 XEXP (op0, 0), XEXP (op0, 1));
1983 /* See if any simplifications were possible. */
1984 if (temp == const0_rtx)
1985 return op2;
1986 else if (temp == const1_rtx)
1987 return op1;
1988 else if (temp)
1989 op0 = temp;
1991 /* Look for happy constants in op1 and op2. */
1992 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
1994 HOST_WIDE_INT t = INTVAL (op1);
1995 HOST_WIDE_INT f = INTVAL (op2);
1997 if (t == STORE_FLAG_VALUE && f == 0)
1998 code = GET_CODE (op0);
1999 else if (t == 0 && f == STORE_FLAG_VALUE)
2001 enum rtx_code tmp;
2002 tmp = reversed_comparison_code (op0, NULL_RTX);
2003 if (tmp == UNKNOWN)
2004 break;
2005 code = tmp;
2007 else
2008 break;
2010 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2013 break;
2015 default:
2016 abort ();
2019 return 0;
2022 /* Simplify X, an rtx expression.
2024 Return the simplified expression or NULL if no simplifications
2025 were possible.
2027 This is the preferred entry point into the simplification routines;
2028 however, we still allow passes to call the more specific routines.
2030 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2031 code that need to be unified.
2033 1. fold_rtx in cse.c. This code uses various CSE specific
2034 information to aid in RTL simplification.
2036 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2037 it uses combine specific information to aid in RTL
2038 simplification.
2040 3. The routines in this file.
2043 Long term we want to only have one body of simplification code; to
2044 get to that state I recommend the following steps:
2046 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2047 which are not pass dependent state into these routines.
2049 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2050 use this routine whenever possible.
2052 3. Allow for pass dependent state to be provided to these
2053 routines and add simplifications based on the pass dependent
2054 state. Remove code from cse.c & combine.c that becomes
2055 redundant/dead.
2057 It will take time, but ultimately the compiler will be easier to
2058 maintain and improve. It's totally silly that when we add a
2059 simplification that it needs to be added to 4 places (3 for RTL
2060 simplification and 1 for tree simplification. */
2063 simplify_rtx (x)
2064 rtx x;
2066 enum rtx_code code;
2067 enum machine_mode mode;
2069 mode = GET_MODE (x);
2070 code = GET_CODE (x);
2072 switch (GET_RTX_CLASS (code))
2074 case '1':
2075 return simplify_unary_operation (code, mode,
2076 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2077 case '2':
2078 case 'c':
2079 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2081 case '3':
2082 case 'b':
2083 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2084 XEXP (x, 0), XEXP (x, 1), XEXP (x, 2));
2086 case '<':
2087 return simplify_relational_operation (code,
2088 (GET_MODE (XEXP (x, 0)) != VOIDmode
2089 ? GET_MODE (XEXP (x, 0))
2090 : GET_MODE (XEXP (x, 1))),
2091 XEXP (x, 0), XEXP (x, 1));
2092 default:
2093 return NULL;