2001-07-07 Toon Moene <toon@moene.indiv.nluug.nl>
[official-gcc.git] / gcc / simplify-rtx.c
blob28bfad844c69f1b0c874b1b5e74c95a9280b3c3f
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include <setjmp.h>
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
39 #include "ggc.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
66 /* Similar, but also allows reference to the stack pointer.
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
95 signed wide int. */
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
99 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
100 enum machine_mode, rtx, rtx));
101 static void check_fold_consts PARAMS ((PTR));
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
107 simplify_gen_binary (code, mode, op0, op1)
108 enum rtx_code code;
109 enum machine_mode mode;
110 rtx op0, op1;
112 rtx tem;
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == 'c'
116 && swap_commutative_operands_p (op0, op1))
117 tem = op0, op0 = op1, op1 = tem;
119 /* If this simplifies, do it. */
120 tem = simplify_binary_operation (code, mode, op0, op1);
122 if (tem)
123 return tem;
125 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
126 just form the operation. */
128 if (code == PLUS && GET_CODE (op1) == CONST_INT
129 && GET_MODE (op0) != VOIDmode)
130 return plus_constant (op0, INTVAL (op1));
131 else if (code == MINUS && GET_CODE (op1) == CONST_INT
132 && GET_MODE (op0) != VOIDmode)
133 return plus_constant (op0, - INTVAL (op1));
134 else
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* Make a unary operation by first seeing if it folds and otherwise making
139 the specified operation. */
142 simplify_gen_unary (code, mode, op, op_mode)
143 enum rtx_code code;
144 enum machine_mode mode;
145 rtx op;
146 enum machine_mode op_mode;
148 rtx tem;
150 /* If this simplifies, use it. */
151 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
152 return tem;
154 return gen_rtx_fmt_e (code, mode, op);
157 /* Likewise for ternary operations. */
160 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
161 enum rtx_code code;
162 enum machine_mode mode, op0_mode;
163 rtx op0, op1, op2;
165 rtx tem;
167 /* If this simplifies, use it. */
168 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
169 op0, op1, op2)))
170 return tem;
172 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
175 /* Likewise, for relational operations.
176 CMP_MODE specifies mode comparison is done in.
180 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
181 enum rtx_code code;
182 enum machine_mode mode;
183 enum machine_mode cmp_mode;
184 rtx op0, op1;
186 rtx tem;
188 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
189 return tem;
191 /* Put complex operands first and constants second. */
192 if (swap_commutative_operands_p (op0, op1))
193 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
195 return gen_rtx_fmt_ee (code, mode, op0, op1);
198 /* Replace all occurrences of OLD in X with NEW and try to simplify the
199 resulting RTX. Return a new RTX which is as simplified as possible. */
202 simplify_replace_rtx (x, old, new)
203 rtx x;
204 rtx old;
205 rtx new;
207 enum rtx_code code = GET_CODE (x);
208 enum machine_mode mode = GET_MODE (x);
210 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
211 to build a new expression substituting recursively. If we can't do
212 anything, return our input. */
214 if (x == old)
215 return new;
217 switch (GET_RTX_CLASS (code))
219 case '1':
221 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
222 rtx op = (XEXP (x, 0) == old
223 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
225 return simplify_gen_unary (code, mode, op, op_mode);
228 case '2':
229 case 'c':
230 return
231 simplify_gen_binary (code, mode,
232 simplify_replace_rtx (XEXP (x, 0), old, new),
233 simplify_replace_rtx (XEXP (x, 1), old, new));
234 case '<':
235 return
236 simplify_gen_relational (code, mode,
237 (GET_MODE (XEXP (x, 0)) != VOIDmode
238 ? GET_MODE (XEXP (x, 0))
239 : GET_MODE (XEXP (x, 1))),
240 simplify_replace_rtx (XEXP (x, 0), old, new),
241 simplify_replace_rtx (XEXP (x, 1), old, new));
243 case '3':
244 case 'b':
245 return
246 simplify_gen_ternary (code, mode, GET_MODE (XEXP (x, 0)),
247 simplify_replace_rtx (XEXP (x, 0), old, new),
248 simplify_replace_rtx (XEXP (x, 1), old, new),
249 simplify_replace_rtx (XEXP (x, 2), old, new));
251 case 'x':
252 /* The only case we try to handle is a SUBREG. */
253 if (code == SUBREG)
255 rtx exp;
256 exp = simplify_gen_subreg (GET_MODE (x),
257 simplify_replace_rtx (SUBREG_REG (x),
258 old, new),
259 GET_MODE (SUBREG_REG (x)),
260 SUBREG_BYTE (x));
261 if (exp)
262 x = exp;
264 return x;
266 default:
267 if (GET_CODE (x) == MEM)
269 /* We can't use change_address here, since it verifies memory address
270 for corectness. We don't want such check, since we may handle
271 addresses previously incorect (such as ones in push instructions)
272 and it is caller's work to verify whether resulting insn match. */
273 rtx addr = simplify_replace_rtx (XEXP (x, 0), old, new);
274 rtx mem;
275 if (XEXP (x, 0) != addr)
277 mem = gen_rtx_MEM (GET_MODE (x), addr);
278 MEM_COPY_ATTRIBUTES (mem, x);
280 else
281 mem = x;
282 return mem;
285 return x;
287 return x;
290 /* Try to simplify a unary operation CODE whose output mode is to be
291 MODE with input operand OP whose mode was originally OP_MODE.
292 Return zero if no simplification can be made. */
295 simplify_unary_operation (code, mode, op, op_mode)
296 enum rtx_code code;
297 enum machine_mode mode;
298 rtx op;
299 enum machine_mode op_mode;
301 unsigned int width = GET_MODE_BITSIZE (mode);
303 /* The order of these tests is critical so that, for example, we don't
304 check the wrong mode (input vs. output) for a conversion operation,
305 such as FIX. At some point, this should be simplified. */
307 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
309 if (code == FLOAT && GET_MODE (op) == VOIDmode
310 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
312 HOST_WIDE_INT hv, lv;
313 REAL_VALUE_TYPE d;
315 if (GET_CODE (op) == CONST_INT)
316 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
317 else
318 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
320 #ifdef REAL_ARITHMETIC
321 REAL_VALUE_FROM_INT (d, lv, hv, mode);
322 #else
323 if (hv < 0)
325 d = (double) (~ hv);
326 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
327 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
328 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
329 d = (- d - 1.0);
331 else
333 d = (double) hv;
334 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
335 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
336 d += (double) (unsigned HOST_WIDE_INT) lv;
338 #endif /* REAL_ARITHMETIC */
339 d = real_value_truncate (mode, d);
340 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
342 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
343 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
345 HOST_WIDE_INT hv, lv;
346 REAL_VALUE_TYPE d;
348 if (GET_CODE (op) == CONST_INT)
349 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
350 else
351 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
353 if (op_mode == VOIDmode)
355 /* We don't know how to interpret negative-looking numbers in
356 this case, so don't try to fold those. */
357 if (hv < 0)
358 return 0;
360 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
362 else
363 hv = 0, lv &= GET_MODE_MASK (op_mode);
365 #ifdef REAL_ARITHMETIC
366 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
367 #else
369 d = (double) (unsigned HOST_WIDE_INT) hv;
370 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
371 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
372 d += (double) (unsigned HOST_WIDE_INT) lv;
373 #endif /* REAL_ARITHMETIC */
374 d = real_value_truncate (mode, d);
375 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
377 #endif
379 if (GET_CODE (op) == CONST_INT
380 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
382 register HOST_WIDE_INT arg0 = INTVAL (op);
383 register HOST_WIDE_INT val;
385 switch (code)
387 case NOT:
388 val = ~ arg0;
389 break;
391 case NEG:
392 val = - arg0;
393 break;
395 case ABS:
396 val = (arg0 >= 0 ? arg0 : - arg0);
397 break;
399 case FFS:
400 /* Don't use ffs here. Instead, get low order bit and then its
401 number. If arg0 is zero, this will return 0, as desired. */
402 arg0 &= GET_MODE_MASK (mode);
403 val = exact_log2 (arg0 & (- arg0)) + 1;
404 break;
406 case TRUNCATE:
407 val = arg0;
408 break;
410 case ZERO_EXTEND:
411 if (op_mode == VOIDmode)
412 op_mode = mode;
413 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
415 /* If we were really extending the mode,
416 we would have to distinguish between zero-extension
417 and sign-extension. */
418 if (width != GET_MODE_BITSIZE (op_mode))
419 abort ();
420 val = arg0;
422 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
423 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
424 else
425 return 0;
426 break;
428 case SIGN_EXTEND:
429 if (op_mode == VOIDmode)
430 op_mode = mode;
431 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
433 /* If we were really extending the mode,
434 we would have to distinguish between zero-extension
435 and sign-extension. */
436 if (width != GET_MODE_BITSIZE (op_mode))
437 abort ();
438 val = arg0;
440 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
443 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
444 if (val
445 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
446 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
448 else
449 return 0;
450 break;
452 case SQRT:
453 case FLOAT_EXTEND:
454 case FLOAT_TRUNCATE:
455 return 0;
457 default:
458 abort ();
461 val = trunc_int_for_mode (val, mode);
463 return GEN_INT (val);
466 /* We can do some operations on integer CONST_DOUBLEs. Also allow
467 for a DImode operation on a CONST_INT. */
468 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
469 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
471 unsigned HOST_WIDE_INT l1, lv;
472 HOST_WIDE_INT h1, hv;
474 if (GET_CODE (op) == CONST_DOUBLE)
475 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
476 else
477 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
479 switch (code)
481 case NOT:
482 lv = ~ l1;
483 hv = ~ h1;
484 break;
486 case NEG:
487 neg_double (l1, h1, &lv, &hv);
488 break;
490 case ABS:
491 if (h1 < 0)
492 neg_double (l1, h1, &lv, &hv);
493 else
494 lv = l1, hv = h1;
495 break;
497 case FFS:
498 hv = 0;
499 if (l1 == 0)
500 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
501 else
502 lv = exact_log2 (l1 & (-l1)) + 1;
503 break;
505 case TRUNCATE:
506 /* This is just a change-of-mode, so do nothing. */
507 lv = l1, hv = h1;
508 break;
510 case ZERO_EXTEND:
511 if (op_mode == VOIDmode
512 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
513 return 0;
515 hv = 0;
516 lv = l1 & GET_MODE_MASK (op_mode);
517 break;
519 case SIGN_EXTEND:
520 if (op_mode == VOIDmode
521 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
522 return 0;
523 else
525 lv = l1 & GET_MODE_MASK (op_mode);
526 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
527 && (lv & ((HOST_WIDE_INT) 1
528 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
529 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
531 hv = HWI_SIGN_EXTEND (lv);
533 break;
535 case SQRT:
536 return 0;
538 default:
539 return 0;
542 return immed_double_const (lv, hv, mode);
545 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
546 else if (GET_CODE (op) == CONST_DOUBLE
547 && GET_MODE_CLASS (mode) == MODE_FLOAT)
549 REAL_VALUE_TYPE d;
550 jmp_buf handler;
551 rtx x;
553 if (setjmp (handler))
554 /* There used to be a warning here, but that is inadvisable.
555 People may want to cause traps, and the natural way
556 to do it should not get a warning. */
557 return 0;
559 set_float_handler (handler);
561 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
563 switch (code)
565 case NEG:
566 d = REAL_VALUE_NEGATE (d);
567 break;
569 case ABS:
570 if (REAL_VALUE_NEGATIVE (d))
571 d = REAL_VALUE_NEGATE (d);
572 break;
574 case FLOAT_TRUNCATE:
575 d = real_value_truncate (mode, d);
576 break;
578 case FLOAT_EXTEND:
579 /* All this does is change the mode. */
580 break;
582 case FIX:
583 d = REAL_VALUE_RNDZINT (d);
584 break;
586 case UNSIGNED_FIX:
587 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
588 break;
590 case SQRT:
591 return 0;
593 default:
594 abort ();
597 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
598 set_float_handler (NULL);
599 return x;
602 else if (GET_CODE (op) == CONST_DOUBLE
603 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
604 && GET_MODE_CLASS (mode) == MODE_INT
605 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
607 REAL_VALUE_TYPE d;
608 jmp_buf handler;
609 HOST_WIDE_INT val;
611 if (setjmp (handler))
612 return 0;
614 set_float_handler (handler);
616 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
618 switch (code)
620 case FIX:
621 val = REAL_VALUE_FIX (d);
622 break;
624 case UNSIGNED_FIX:
625 val = REAL_VALUE_UNSIGNED_FIX (d);
626 break;
628 default:
629 abort ();
632 set_float_handler (NULL);
634 val = trunc_int_for_mode (val, mode);
636 return GEN_INT (val);
638 #endif
639 /* This was formerly used only for non-IEEE float.
640 eggert@twinsun.com says it is safe for IEEE also. */
641 else
643 enum rtx_code reversed;
644 /* There are some simplifications we can do even if the operands
645 aren't constant. */
646 switch (code)
648 case NOT:
649 /* (not (not X)) == X. */
650 if (GET_CODE (op) == NOT)
651 return XEXP (op, 0);
653 /* (not (eq X Y)) == (ne X Y), etc. */
654 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
655 && ((reversed = reversed_comparison_code (op, NULL_RTX))
656 != UNKNOWN))
657 return gen_rtx_fmt_ee (reversed,
658 op_mode, XEXP (op, 0), XEXP (op, 1));
659 break;
661 case NEG:
662 /* (neg (neg X)) == X. */
663 if (GET_CODE (op) == NEG)
664 return XEXP (op, 0);
665 break;
667 case SIGN_EXTEND:
668 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
669 becomes just the MINUS if its mode is MODE. This allows
670 folding switch statements on machines using casesi (such as
671 the Vax). */
672 if (GET_CODE (op) == TRUNCATE
673 && GET_MODE (XEXP (op, 0)) == mode
674 && GET_CODE (XEXP (op, 0)) == MINUS
675 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
676 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
677 return XEXP (op, 0);
679 #ifdef POINTERS_EXTEND_UNSIGNED
680 if (! POINTERS_EXTEND_UNSIGNED
681 && mode == Pmode && GET_MODE (op) == ptr_mode
682 && (CONSTANT_P (op)
683 || (GET_CODE (op) == SUBREG
684 && GET_CODE (SUBREG_REG (op)) == REG
685 && REG_POINTER (SUBREG_REG (op))
686 && GET_MODE (SUBREG_REG (op)) == Pmode)))
687 return convert_memory_address (Pmode, op);
688 #endif
689 break;
691 #ifdef POINTERS_EXTEND_UNSIGNED
692 case ZERO_EXTEND:
693 if (POINTERS_EXTEND_UNSIGNED
694 && mode == Pmode && GET_MODE (op) == ptr_mode
695 && (CONSTANT_P (op)
696 || (GET_CODE (op) == SUBREG
697 && GET_CODE (SUBREG_REG (op)) == REG
698 && REG_POINTER (SUBREG_REG (op))
699 && GET_MODE (SUBREG_REG (op)) == Pmode)))
700 return convert_memory_address (Pmode, op);
701 break;
702 #endif
704 default:
705 break;
708 return 0;
712 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
713 and OP1. Return 0 if no simplification is possible.
715 Don't use this for relational operations such as EQ or LT.
716 Use simplify_relational_operation instead. */
719 simplify_binary_operation (code, mode, op0, op1)
720 enum rtx_code code;
721 enum machine_mode mode;
722 rtx op0, op1;
724 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
725 HOST_WIDE_INT val;
726 unsigned int width = GET_MODE_BITSIZE (mode);
727 rtx tem;
729 /* Relational operations don't work here. We must know the mode
730 of the operands in order to do the comparison correctly.
731 Assuming a full word can give incorrect results.
732 Consider comparing 128 with -128 in QImode. */
734 if (GET_RTX_CLASS (code) == '<')
735 abort ();
737 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
738 if (GET_MODE_CLASS (mode) == MODE_FLOAT
739 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
740 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
742 REAL_VALUE_TYPE f0, f1, value;
743 jmp_buf handler;
745 if (setjmp (handler))
746 return 0;
748 set_float_handler (handler);
750 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
751 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
752 f0 = real_value_truncate (mode, f0);
753 f1 = real_value_truncate (mode, f1);
755 #ifdef REAL_ARITHMETIC
756 #ifndef REAL_INFINITY
757 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
758 return 0;
759 #endif
760 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
761 #else
762 switch (code)
764 case PLUS:
765 value = f0 + f1;
766 break;
767 case MINUS:
768 value = f0 - f1;
769 break;
770 case MULT:
771 value = f0 * f1;
772 break;
773 case DIV:
774 #ifndef REAL_INFINITY
775 if (f1 == 0)
776 return 0;
777 #endif
778 value = f0 / f1;
779 break;
780 case SMIN:
781 value = MIN (f0, f1);
782 break;
783 case SMAX:
784 value = MAX (f0, f1);
785 break;
786 default:
787 abort ();
789 #endif
791 value = real_value_truncate (mode, value);
792 set_float_handler (NULL);
793 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
795 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
797 /* We can fold some multi-word operations. */
798 if (GET_MODE_CLASS (mode) == MODE_INT
799 && width == HOST_BITS_PER_WIDE_INT * 2
800 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
801 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
803 unsigned HOST_WIDE_INT l1, l2, lv;
804 HOST_WIDE_INT h1, h2, hv;
806 if (GET_CODE (op0) == CONST_DOUBLE)
807 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
808 else
809 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
811 if (GET_CODE (op1) == CONST_DOUBLE)
812 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
813 else
814 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
816 switch (code)
818 case MINUS:
819 /* A - B == A + (-B). */
820 neg_double (l2, h2, &lv, &hv);
821 l2 = lv, h2 = hv;
823 /* .. fall through ... */
825 case PLUS:
826 add_double (l1, h1, l2, h2, &lv, &hv);
827 break;
829 case MULT:
830 mul_double (l1, h1, l2, h2, &lv, &hv);
831 break;
833 case DIV: case MOD: case UDIV: case UMOD:
834 /* We'd need to include tree.h to do this and it doesn't seem worth
835 it. */
836 return 0;
838 case AND:
839 lv = l1 & l2, hv = h1 & h2;
840 break;
842 case IOR:
843 lv = l1 | l2, hv = h1 | h2;
844 break;
846 case XOR:
847 lv = l1 ^ l2, hv = h1 ^ h2;
848 break;
850 case SMIN:
851 if (h1 < h2
852 || (h1 == h2
853 && ((unsigned HOST_WIDE_INT) l1
854 < (unsigned HOST_WIDE_INT) l2)))
855 lv = l1, hv = h1;
856 else
857 lv = l2, hv = h2;
858 break;
860 case SMAX:
861 if (h1 > h2
862 || (h1 == h2
863 && ((unsigned HOST_WIDE_INT) l1
864 > (unsigned HOST_WIDE_INT) l2)))
865 lv = l1, hv = h1;
866 else
867 lv = l2, hv = h2;
868 break;
870 case UMIN:
871 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
872 || (h1 == h2
873 && ((unsigned HOST_WIDE_INT) l1
874 < (unsigned HOST_WIDE_INT) l2)))
875 lv = l1, hv = h1;
876 else
877 lv = l2, hv = h2;
878 break;
880 case UMAX:
881 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
882 || (h1 == h2
883 && ((unsigned HOST_WIDE_INT) l1
884 > (unsigned HOST_WIDE_INT) l2)))
885 lv = l1, hv = h1;
886 else
887 lv = l2, hv = h2;
888 break;
890 case LSHIFTRT: case ASHIFTRT:
891 case ASHIFT:
892 case ROTATE: case ROTATERT:
893 #ifdef SHIFT_COUNT_TRUNCATED
894 if (SHIFT_COUNT_TRUNCATED)
895 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
896 #endif
898 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
899 return 0;
901 if (code == LSHIFTRT || code == ASHIFTRT)
902 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
903 code == ASHIFTRT);
904 else if (code == ASHIFT)
905 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
906 else if (code == ROTATE)
907 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
908 else /* code == ROTATERT */
909 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
910 break;
912 default:
913 return 0;
916 return immed_double_const (lv, hv, mode);
919 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
920 || width > HOST_BITS_PER_WIDE_INT || width == 0)
922 /* Even if we can't compute a constant result,
923 there are some cases worth simplifying. */
925 switch (code)
927 case PLUS:
928 /* In IEEE floating point, x+0 is not the same as x. Similarly
929 for the other optimizations below. */
930 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
931 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
932 break;
934 if (op1 == CONST0_RTX (mode))
935 return op0;
937 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
938 if (GET_CODE (op0) == NEG)
939 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
940 else if (GET_CODE (op1) == NEG)
941 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
943 /* (~a) + 1 -> -a */
944 if (INTEGRAL_MODE_P (mode)
945 && GET_CODE (op0) == NOT
946 && GET_CODE (op1) == CONST_INT
947 && INTVAL (op1) == 1)
948 return gen_rtx_NEG (mode, XEXP (op0, 0));
950 /* Handle both-operands-constant cases. We can only add
951 CONST_INTs to constants since the sum of relocatable symbols
952 can't be handled by most assemblers. Don't add CONST_INT
953 to CONST_INT since overflow won't be computed properly if wider
954 than HOST_BITS_PER_WIDE_INT. */
956 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
957 && GET_CODE (op1) == CONST_INT)
958 return plus_constant (op0, INTVAL (op1));
959 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
960 && GET_CODE (op0) == CONST_INT)
961 return plus_constant (op1, INTVAL (op0));
963 /* See if this is something like X * C - X or vice versa or
964 if the multiplication is written as a shift. If so, we can
965 distribute and make a new multiply, shift, or maybe just
966 have X (if C is 2 in the example above). But don't make
967 real multiply if we didn't have one before. */
969 if (! FLOAT_MODE_P (mode))
971 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
972 rtx lhs = op0, rhs = op1;
973 int had_mult = 0;
975 if (GET_CODE (lhs) == NEG)
976 coeff0 = -1, lhs = XEXP (lhs, 0);
977 else if (GET_CODE (lhs) == MULT
978 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
980 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
981 had_mult = 1;
983 else if (GET_CODE (lhs) == ASHIFT
984 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
985 && INTVAL (XEXP (lhs, 1)) >= 0
986 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
988 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
989 lhs = XEXP (lhs, 0);
992 if (GET_CODE (rhs) == NEG)
993 coeff1 = -1, rhs = XEXP (rhs, 0);
994 else if (GET_CODE (rhs) == MULT
995 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
997 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
998 had_mult = 1;
1000 else if (GET_CODE (rhs) == ASHIFT
1001 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1002 && INTVAL (XEXP (rhs, 1)) >= 0
1003 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1005 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1006 rhs = XEXP (rhs, 0);
1009 if (rtx_equal_p (lhs, rhs))
1011 tem = simplify_gen_binary (MULT, mode, lhs,
1012 GEN_INT (coeff0 + coeff1));
1013 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1017 /* If one of the operands is a PLUS or a MINUS, see if we can
1018 simplify this by the associative law.
1019 Don't use the associative law for floating point.
1020 The inaccuracy makes it nonassociative,
1021 and subtle programs can break if operations are associated. */
1023 if (INTEGRAL_MODE_P (mode)
1024 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1025 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1026 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1027 return tem;
1028 break;
1030 case COMPARE:
1031 #ifdef HAVE_cc0
1032 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1033 using cc0, in which case we want to leave it as a COMPARE
1034 so we can distinguish it from a register-register-copy.
1036 In IEEE floating point, x-0 is not the same as x. */
1038 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1039 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1040 && op1 == CONST0_RTX (mode))
1041 return op0;
1042 #endif
1044 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1045 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1046 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1047 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1049 rtx xop00 = XEXP (op0, 0);
1050 rtx xop10 = XEXP (op1, 0);
1052 #ifdef HAVE_cc0
1053 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1054 #else
1055 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1056 && GET_MODE (xop00) == GET_MODE (xop10)
1057 && REGNO (xop00) == REGNO (xop10)
1058 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1059 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1060 #endif
1061 return xop00;
1064 break;
1065 case MINUS:
1066 /* None of these optimizations can be done for IEEE
1067 floating point. */
1068 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1069 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1070 break;
1072 /* We can't assume x-x is 0 even with non-IEEE floating point,
1073 but since it is zero except in very strange circumstances, we
1074 will treat it as zero with -funsafe-math-optimizations. */
1075 if (rtx_equal_p (op0, op1)
1076 && ! side_effects_p (op0)
1077 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1078 return CONST0_RTX (mode);
1080 /* Change subtraction from zero into negation. */
1081 if (op0 == CONST0_RTX (mode))
1082 return gen_rtx_NEG (mode, op1);
1084 /* (-1 - a) is ~a. */
1085 if (op0 == constm1_rtx)
1086 return gen_rtx_NOT (mode, op1);
1088 /* Subtracting 0 has no effect. */
1089 if (op1 == CONST0_RTX (mode))
1090 return op0;
1092 /* See if this is something like X * C - X or vice versa or
1093 if the multiplication is written as a shift. If so, we can
1094 distribute and make a new multiply, shift, or maybe just
1095 have X (if C is 2 in the example above). But don't make
1096 real multiply if we didn't have one before. */
1098 if (! FLOAT_MODE_P (mode))
1100 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1101 rtx lhs = op0, rhs = op1;
1102 int had_mult = 0;
1104 if (GET_CODE (lhs) == NEG)
1105 coeff0 = -1, lhs = XEXP (lhs, 0);
1106 else if (GET_CODE (lhs) == MULT
1107 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1109 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1110 had_mult = 1;
1112 else if (GET_CODE (lhs) == ASHIFT
1113 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1114 && INTVAL (XEXP (lhs, 1)) >= 0
1115 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1117 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1118 lhs = XEXP (lhs, 0);
1121 if (GET_CODE (rhs) == NEG)
1122 coeff1 = - 1, rhs = XEXP (rhs, 0);
1123 else if (GET_CODE (rhs) == MULT
1124 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1126 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1127 had_mult = 1;
1129 else if (GET_CODE (rhs) == ASHIFT
1130 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1131 && INTVAL (XEXP (rhs, 1)) >= 0
1132 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1134 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1135 rhs = XEXP (rhs, 0);
1138 if (rtx_equal_p (lhs, rhs))
1140 tem = simplify_gen_binary (MULT, mode, lhs,
1141 GEN_INT (coeff0 - coeff1));
1142 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1146 /* (a - (-b)) -> (a + b). */
1147 if (GET_CODE (op1) == NEG)
1148 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1150 /* If one of the operands is a PLUS or a MINUS, see if we can
1151 simplify this by the associative law.
1152 Don't use the associative law for floating point.
1153 The inaccuracy makes it nonassociative,
1154 and subtle programs can break if operations are associated. */
1156 if (INTEGRAL_MODE_P (mode)
1157 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1158 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1159 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1160 return tem;
1162 /* Don't let a relocatable value get a negative coeff. */
1163 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1164 return plus_constant (op0, - INTVAL (op1));
1166 /* (x - (x & y)) -> (x & ~y) */
1167 if (GET_CODE (op1) == AND)
1169 if (rtx_equal_p (op0, XEXP (op1, 0)))
1170 return simplify_gen_binary (AND, mode, op0,
1171 gen_rtx_NOT (mode, XEXP (op1, 1)));
1172 if (rtx_equal_p (op0, XEXP (op1, 1)))
1173 return simplify_gen_binary (AND, mode, op0,
1174 gen_rtx_NOT (mode, XEXP (op1, 0)));
1176 break;
1178 case MULT:
1179 if (op1 == constm1_rtx)
1181 tem = simplify_unary_operation (NEG, mode, op0, mode);
1183 return tem ? tem : gen_rtx_NEG (mode, op0);
1186 /* In IEEE floating point, x*0 is not always 0. */
1187 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1188 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1189 && op1 == CONST0_RTX (mode)
1190 && ! side_effects_p (op0))
1191 return op1;
1193 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1194 However, ANSI says we can drop signals,
1195 so we can do this anyway. */
1196 if (op1 == CONST1_RTX (mode))
1197 return op0;
1199 /* Convert multiply by constant power of two into shift unless
1200 we are still generating RTL. This test is a kludge. */
1201 if (GET_CODE (op1) == CONST_INT
1202 && (val = exact_log2 (INTVAL (op1))) >= 0
1203 /* If the mode is larger than the host word size, and the
1204 uppermost bit is set, then this isn't a power of two due
1205 to implicit sign extension. */
1206 && (width <= HOST_BITS_PER_WIDE_INT
1207 || val != HOST_BITS_PER_WIDE_INT - 1)
1208 && ! rtx_equal_function_value_matters)
1209 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1211 if (GET_CODE (op1) == CONST_DOUBLE
1212 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1214 REAL_VALUE_TYPE d;
1215 jmp_buf handler;
1216 int op1is2, op1ism1;
1218 if (setjmp (handler))
1219 return 0;
1221 set_float_handler (handler);
1222 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1223 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1224 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1225 set_float_handler (NULL);
1227 /* x*2 is x+x and x*(-1) is -x */
1228 if (op1is2 && GET_MODE (op0) == mode)
1229 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1231 else if (op1ism1 && GET_MODE (op0) == mode)
1232 return gen_rtx_NEG (mode, op0);
1234 break;
1236 case IOR:
1237 if (op1 == const0_rtx)
1238 return op0;
1239 if (GET_CODE (op1) == CONST_INT
1240 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1241 return op1;
1242 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1243 return op0;
1244 /* A | (~A) -> -1 */
1245 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1246 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1247 && ! side_effects_p (op0)
1248 && GET_MODE_CLASS (mode) != MODE_CC)
1249 return constm1_rtx;
1250 break;
1252 case XOR:
1253 if (op1 == const0_rtx)
1254 return op0;
1255 if (GET_CODE (op1) == CONST_INT
1256 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1257 return gen_rtx_NOT (mode, op0);
1258 if (op0 == op1 && ! side_effects_p (op0)
1259 && GET_MODE_CLASS (mode) != MODE_CC)
1260 return const0_rtx;
1261 break;
1263 case AND:
1264 if (op1 == const0_rtx && ! side_effects_p (op0))
1265 return const0_rtx;
1266 if (GET_CODE (op1) == CONST_INT
1267 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1268 return op0;
1269 if (op0 == op1 && ! side_effects_p (op0)
1270 && GET_MODE_CLASS (mode) != MODE_CC)
1271 return op0;
1272 /* A & (~A) -> 0 */
1273 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1274 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1275 && ! side_effects_p (op0)
1276 && GET_MODE_CLASS (mode) != MODE_CC)
1277 return const0_rtx;
1278 break;
1280 case UDIV:
1281 /* Convert divide by power of two into shift (divide by 1 handled
1282 below). */
1283 if (GET_CODE (op1) == CONST_INT
1284 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1285 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1287 /* ... fall through ... */
1289 case DIV:
1290 if (op1 == CONST1_RTX (mode))
1291 return op0;
1293 /* In IEEE floating point, 0/x is not always 0. */
1294 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1295 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1296 && op0 == CONST0_RTX (mode)
1297 && ! side_effects_p (op1))
1298 return op0;
1300 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1301 /* Change division by a constant into multiplication. Only do
1302 this with -funsafe-math-optimizations. */
1303 else if (GET_CODE (op1) == CONST_DOUBLE
1304 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1305 && op1 != CONST0_RTX (mode)
1306 && flag_unsafe_math_optimizations)
1308 REAL_VALUE_TYPE d;
1309 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1311 if (! REAL_VALUES_EQUAL (d, dconst0))
1313 #if defined (REAL_ARITHMETIC)
1314 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1315 return gen_rtx_MULT (mode, op0,
1316 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1317 #else
1318 return
1319 gen_rtx_MULT (mode, op0,
1320 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1321 #endif
1324 #endif
1325 break;
1327 case UMOD:
1328 /* Handle modulus by power of two (mod with 1 handled below). */
1329 if (GET_CODE (op1) == CONST_INT
1330 && exact_log2 (INTVAL (op1)) > 0)
1331 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1333 /* ... fall through ... */
1335 case MOD:
1336 if ((op0 == const0_rtx || op1 == const1_rtx)
1337 && ! side_effects_p (op0) && ! side_effects_p (op1))
1338 return const0_rtx;
1339 break;
1341 case ROTATERT:
1342 case ROTATE:
1343 /* Rotating ~0 always results in ~0. */
1344 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1345 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1346 && ! side_effects_p (op1))
1347 return op0;
1349 /* ... fall through ... */
1351 case ASHIFT:
1352 case ASHIFTRT:
1353 case LSHIFTRT:
1354 if (op1 == const0_rtx)
1355 return op0;
1356 if (op0 == const0_rtx && ! side_effects_p (op1))
1357 return op0;
1358 break;
1360 case SMIN:
1361 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1362 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1363 && ! side_effects_p (op0))
1364 return op1;
1365 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1366 return op0;
1367 break;
1369 case SMAX:
1370 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1371 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1372 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1373 && ! side_effects_p (op0))
1374 return op1;
1375 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1376 return op0;
1377 break;
1379 case UMIN:
1380 if (op1 == const0_rtx && ! side_effects_p (op0))
1381 return op1;
1382 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1383 return op0;
1384 break;
1386 case UMAX:
1387 if (op1 == constm1_rtx && ! side_effects_p (op0))
1388 return op1;
1389 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1390 return op0;
1391 break;
1393 default:
1394 abort ();
1397 return 0;
1400 /* Get the integer argument values in two forms:
1401 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1403 arg0 = INTVAL (op0);
1404 arg1 = INTVAL (op1);
1406 if (width < HOST_BITS_PER_WIDE_INT)
1408 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1409 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1411 arg0s = arg0;
1412 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1413 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1415 arg1s = arg1;
1416 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1417 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1419 else
1421 arg0s = arg0;
1422 arg1s = arg1;
1425 /* Compute the value of the arithmetic. */
1427 switch (code)
1429 case PLUS:
1430 val = arg0s + arg1s;
1431 break;
1433 case MINUS:
1434 val = arg0s - arg1s;
1435 break;
1437 case MULT:
1438 val = arg0s * arg1s;
1439 break;
1441 case DIV:
1442 if (arg1s == 0
1443 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1444 && arg1s == -1))
1445 return 0;
1446 val = arg0s / arg1s;
1447 break;
1449 case MOD:
1450 if (arg1s == 0
1451 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1452 && arg1s == -1))
1453 return 0;
1454 val = arg0s % arg1s;
1455 break;
1457 case UDIV:
1458 if (arg1 == 0
1459 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1460 && arg1s == -1))
1461 return 0;
1462 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1463 break;
1465 case UMOD:
1466 if (arg1 == 0
1467 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1468 && arg1s == -1))
1469 return 0;
1470 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1471 break;
1473 case AND:
1474 val = arg0 & arg1;
1475 break;
1477 case IOR:
1478 val = arg0 | arg1;
1479 break;
1481 case XOR:
1482 val = arg0 ^ arg1;
1483 break;
1485 case LSHIFTRT:
1486 /* If shift count is undefined, don't fold it; let the machine do
1487 what it wants. But truncate it if the machine will do that. */
1488 if (arg1 < 0)
1489 return 0;
1491 #ifdef SHIFT_COUNT_TRUNCATED
1492 if (SHIFT_COUNT_TRUNCATED)
1493 arg1 %= width;
1494 #endif
1496 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1497 break;
1499 case ASHIFT:
1500 if (arg1 < 0)
1501 return 0;
1503 #ifdef SHIFT_COUNT_TRUNCATED
1504 if (SHIFT_COUNT_TRUNCATED)
1505 arg1 %= width;
1506 #endif
1508 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1509 break;
1511 case ASHIFTRT:
1512 if (arg1 < 0)
1513 return 0;
1515 #ifdef SHIFT_COUNT_TRUNCATED
1516 if (SHIFT_COUNT_TRUNCATED)
1517 arg1 %= width;
1518 #endif
1520 val = arg0s >> arg1;
1522 /* Bootstrap compiler may not have sign extended the right shift.
1523 Manually extend the sign to insure bootstrap cc matches gcc. */
1524 if (arg0s < 0 && arg1 > 0)
1525 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1527 break;
1529 case ROTATERT:
1530 if (arg1 < 0)
1531 return 0;
1533 arg1 %= width;
1534 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1535 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1536 break;
1538 case ROTATE:
1539 if (arg1 < 0)
1540 return 0;
1542 arg1 %= width;
1543 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1544 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1545 break;
1547 case COMPARE:
1548 /* Do nothing here. */
1549 return 0;
1551 case SMIN:
1552 val = arg0s <= arg1s ? arg0s : arg1s;
1553 break;
1555 case UMIN:
1556 val = ((unsigned HOST_WIDE_INT) arg0
1557 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1558 break;
1560 case SMAX:
1561 val = arg0s > arg1s ? arg0s : arg1s;
1562 break;
1564 case UMAX:
1565 val = ((unsigned HOST_WIDE_INT) arg0
1566 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1567 break;
1569 default:
1570 abort ();
1573 val = trunc_int_for_mode (val, mode);
1575 return GEN_INT (val);
1578 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1579 PLUS or MINUS.
1581 Rather than test for specific case, we do this by a brute-force method
1582 and do all possible simplifications until no more changes occur. Then
1583 we rebuild the operation. */
1585 static rtx
1586 simplify_plus_minus (code, mode, op0, op1)
1587 enum rtx_code code;
1588 enum machine_mode mode;
1589 rtx op0, op1;
1591 rtx ops[8];
1592 int negs[8];
1593 rtx result, tem;
1594 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1595 int first = 1, negate = 0, changed;
1596 int i, j;
1598 memset ((char *) ops, 0, sizeof ops);
1600 /* Set up the two operands and then expand them until nothing has been
1601 changed. If we run out of room in our array, give up; this should
1602 almost never happen. */
1604 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1606 changed = 1;
1607 while (changed)
1609 changed = 0;
1611 for (i = 0; i < n_ops; i++)
1612 switch (GET_CODE (ops[i]))
1614 case PLUS:
1615 case MINUS:
1616 if (n_ops == 7)
1617 return 0;
1619 ops[n_ops] = XEXP (ops[i], 1);
1620 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1621 ops[i] = XEXP (ops[i], 0);
1622 input_ops++;
1623 changed = 1;
1624 break;
1626 case NEG:
1627 ops[i] = XEXP (ops[i], 0);
1628 negs[i] = ! negs[i];
1629 changed = 1;
1630 break;
1632 case CONST:
1633 ops[i] = XEXP (ops[i], 0);
1634 input_consts++;
1635 changed = 1;
1636 break;
1638 case NOT:
1639 /* ~a -> (-a - 1) */
1640 if (n_ops != 7)
1642 ops[n_ops] = constm1_rtx;
1643 negs[n_ops++] = negs[i];
1644 ops[i] = XEXP (ops[i], 0);
1645 negs[i] = ! negs[i];
1646 changed = 1;
1648 break;
1650 case CONST_INT:
1651 if (negs[i])
1652 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1653 break;
1655 default:
1656 break;
1660 /* If we only have two operands, we can't do anything. */
1661 if (n_ops <= 2)
1662 return 0;
1664 /* Now simplify each pair of operands until nothing changes. The first
1665 time through just simplify constants against each other. */
1667 changed = 1;
1668 while (changed)
1670 changed = first;
1672 for (i = 0; i < n_ops - 1; i++)
1673 for (j = i + 1; j < n_ops; j++)
1674 if (ops[i] != 0 && ops[j] != 0
1675 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1677 rtx lhs = ops[i], rhs = ops[j];
1678 enum rtx_code ncode = PLUS;
1680 if (negs[i] && ! negs[j])
1681 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1682 else if (! negs[i] && negs[j])
1683 ncode = MINUS;
1685 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1686 if (tem)
1688 ops[i] = tem, ops[j] = 0;
1689 negs[i] = negs[i] && negs[j];
1690 if (GET_CODE (tem) == NEG)
1691 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1693 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1694 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1695 changed = 1;
1699 first = 0;
1702 /* Pack all the operands to the lower-numbered entries and give up if
1703 we didn't reduce the number of operands we had. Make sure we
1704 count a CONST as two operands. If we have the same number of
1705 operands, but have made more CONSTs than we had, this is also
1706 an improvement, so accept it. */
1708 for (i = 0, j = 0; j < n_ops; j++)
1709 if (ops[j] != 0)
1711 ops[i] = ops[j], negs[i++] = negs[j];
1712 if (GET_CODE (ops[j]) == CONST)
1713 n_consts++;
1716 if (i + n_consts > input_ops
1717 || (i + n_consts == input_ops && n_consts <= input_consts))
1718 return 0;
1720 n_ops = i;
1722 /* If we have a CONST_INT, put it last. */
1723 for (i = 0; i < n_ops - 1; i++)
1724 if (GET_CODE (ops[i]) == CONST_INT)
1726 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1727 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1730 /* Put a non-negated operand first. If there aren't any, make all
1731 operands positive and negate the whole thing later. */
1732 for (i = 0; i < n_ops && negs[i]; i++)
1735 if (i == n_ops)
1737 for (i = 0; i < n_ops; i++)
1738 negs[i] = 0;
1739 negate = 1;
1741 else if (i != 0)
1743 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1744 j = negs[0], negs[0] = negs[i], negs[i] = j;
1747 /* Now make the result by performing the requested operations. */
1748 result = ops[0];
1749 for (i = 1; i < n_ops; i++)
1750 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1752 return negate ? gen_rtx_NEG (mode, result) : result;
1755 struct cfc_args
1757 rtx op0, op1; /* Input */
1758 int equal, op0lt, op1lt; /* Output */
1759 int unordered;
1762 static void
1763 check_fold_consts (data)
1764 PTR data;
1766 struct cfc_args *args = (struct cfc_args *) data;
1767 REAL_VALUE_TYPE d0, d1;
1769 /* We may possibly raise an exception while reading the value. */
1770 args->unordered = 1;
1771 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1772 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1774 /* Comparisons of Inf versus Inf are ordered. */
1775 if (REAL_VALUE_ISNAN (d0)
1776 || REAL_VALUE_ISNAN (d1))
1777 return;
1778 args->equal = REAL_VALUES_EQUAL (d0, d1);
1779 args->op0lt = REAL_VALUES_LESS (d0, d1);
1780 args->op1lt = REAL_VALUES_LESS (d1, d0);
1781 args->unordered = 0;
1784 /* Like simplify_binary_operation except used for relational operators.
1785 MODE is the mode of the operands, not that of the result. If MODE
1786 is VOIDmode, both operands must also be VOIDmode and we compare the
1787 operands in "infinite precision".
1789 If no simplification is possible, this function returns zero. Otherwise,
1790 it returns either const_true_rtx or const0_rtx. */
1793 simplify_relational_operation (code, mode, op0, op1)
1794 enum rtx_code code;
1795 enum machine_mode mode;
1796 rtx op0, op1;
1798 int equal, op0lt, op0ltu, op1lt, op1ltu;
1799 rtx tem;
1801 if (mode == VOIDmode
1802 && (GET_MODE (op0) != VOIDmode
1803 || GET_MODE (op1) != VOIDmode))
1804 abort ();
1806 /* If op0 is a compare, extract the comparison arguments from it. */
1807 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1808 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1810 /* We can't simplify MODE_CC values since we don't know what the
1811 actual comparison is. */
1812 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1813 #ifdef HAVE_cc0
1814 || op0 == cc0_rtx
1815 #endif
1817 return 0;
1819 /* Make sure the constant is second. */
1820 if (swap_commutative_operands_p (op0, op1))
1822 tem = op0, op0 = op1, op1 = tem;
1823 code = swap_condition (code);
1826 /* For integer comparisons of A and B maybe we can simplify A - B and can
1827 then simplify a comparison of that with zero. If A and B are both either
1828 a register or a CONST_INT, this can't help; testing for these cases will
1829 prevent infinite recursion here and speed things up.
1831 If CODE is an unsigned comparison, then we can never do this optimization,
1832 because it gives an incorrect result if the subtraction wraps around zero.
1833 ANSI C defines unsigned operations such that they never overflow, and
1834 thus such cases can not be ignored. */
1836 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1837 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1838 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1839 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1840 && code != GTU && code != GEU && code != LTU && code != LEU)
1841 return simplify_relational_operation (signed_condition (code),
1842 mode, tem, const0_rtx);
1844 if (flag_unsafe_math_optimizations && code == ORDERED)
1845 return const_true_rtx;
1847 if (flag_unsafe_math_optimizations && code == UNORDERED)
1848 return const0_rtx;
1850 /* For non-IEEE floating-point, if the two operands are equal, we know the
1851 result. */
1852 if (rtx_equal_p (op0, op1)
1853 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1854 || ! FLOAT_MODE_P (GET_MODE (op0))
1855 || flag_unsafe_math_optimizations))
1856 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1858 /* If the operands are floating-point constants, see if we can fold
1859 the result. */
1860 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1861 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1862 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1864 struct cfc_args args;
1866 /* Setup input for check_fold_consts() */
1867 args.op0 = op0;
1868 args.op1 = op1;
1871 if (!do_float_handler (check_fold_consts, (PTR) &args))
1872 args.unordered = 1;
1874 if (args.unordered)
1875 switch (code)
1877 case UNEQ:
1878 case UNLT:
1879 case UNGT:
1880 case UNLE:
1881 case UNGE:
1882 case NE:
1883 case UNORDERED:
1884 return const_true_rtx;
1885 case EQ:
1886 case LT:
1887 case GT:
1888 case LE:
1889 case GE:
1890 case LTGT:
1891 case ORDERED:
1892 return const0_rtx;
1893 default:
1894 return 0;
1897 /* Receive output from check_fold_consts() */
1898 equal = args.equal;
1899 op0lt = op0ltu = args.op0lt;
1900 op1lt = op1ltu = args.op1lt;
1902 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1904 /* Otherwise, see if the operands are both integers. */
1905 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1906 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1907 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1909 int width = GET_MODE_BITSIZE (mode);
1910 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1911 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1913 /* Get the two words comprising each integer constant. */
1914 if (GET_CODE (op0) == CONST_DOUBLE)
1916 l0u = l0s = CONST_DOUBLE_LOW (op0);
1917 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1919 else
1921 l0u = l0s = INTVAL (op0);
1922 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1925 if (GET_CODE (op1) == CONST_DOUBLE)
1927 l1u = l1s = CONST_DOUBLE_LOW (op1);
1928 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1930 else
1932 l1u = l1s = INTVAL (op1);
1933 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1936 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1937 we have to sign or zero-extend the values. */
1938 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1940 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1941 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1943 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1944 l0s |= ((HOST_WIDE_INT) (-1) << width);
1946 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1947 l1s |= ((HOST_WIDE_INT) (-1) << width);
1949 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1950 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1952 equal = (h0u == h1u && l0u == l1u);
1953 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1954 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1955 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1956 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1959 /* Otherwise, there are some code-specific tests we can make. */
1960 else
1962 switch (code)
1964 case EQ:
1965 /* References to the frame plus a constant or labels cannot
1966 be zero, but a SYMBOL_REF can due to #pragma weak. */
1967 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1968 || GET_CODE (op0) == LABEL_REF)
1969 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1970 /* On some machines, the ap reg can be 0 sometimes. */
1971 && op0 != arg_pointer_rtx
1972 #endif
1974 return const0_rtx;
1975 break;
1977 case NE:
1978 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1979 || GET_CODE (op0) == LABEL_REF)
1980 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1981 && op0 != arg_pointer_rtx
1982 #endif
1984 return const_true_rtx;
1985 break;
1987 case GEU:
1988 /* Unsigned values are never negative. */
1989 if (op1 == const0_rtx)
1990 return const_true_rtx;
1991 break;
1993 case LTU:
1994 if (op1 == const0_rtx)
1995 return const0_rtx;
1996 break;
1998 case LEU:
1999 /* Unsigned values are never greater than the largest
2000 unsigned value. */
2001 if (GET_CODE (op1) == CONST_INT
2002 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
2003 && INTEGRAL_MODE_P (mode))
2004 return const_true_rtx;
2005 break;
2007 case GTU:
2008 if (GET_CODE (op1) == CONST_INT
2009 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
2010 && INTEGRAL_MODE_P (mode))
2011 return const0_rtx;
2012 break;
2014 default:
2015 break;
2018 return 0;
2021 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2022 as appropriate. */
2023 switch (code)
2025 case EQ:
2026 case UNEQ:
2027 return equal ? const_true_rtx : const0_rtx;
2028 case NE:
2029 case LTGT:
2030 return ! equal ? const_true_rtx : const0_rtx;
2031 case LT:
2032 case UNLT:
2033 return op0lt ? const_true_rtx : const0_rtx;
2034 case GT:
2035 case UNGT:
2036 return op1lt ? const_true_rtx : const0_rtx;
2037 case LTU:
2038 return op0ltu ? const_true_rtx : const0_rtx;
2039 case GTU:
2040 return op1ltu ? const_true_rtx : const0_rtx;
2041 case LE:
2042 case UNLE:
2043 return equal || op0lt ? const_true_rtx : const0_rtx;
2044 case GE:
2045 case UNGE:
2046 return equal || op1lt ? const_true_rtx : const0_rtx;
2047 case LEU:
2048 return equal || op0ltu ? const_true_rtx : const0_rtx;
2049 case GEU:
2050 return equal || op1ltu ? const_true_rtx : const0_rtx;
2051 case ORDERED:
2052 return const_true_rtx;
2053 case UNORDERED:
2054 return const0_rtx;
2055 default:
2056 abort ();
2060 /* Simplify CODE, an operation with result mode MODE and three operands,
2061 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2062 a constant. Return 0 if no simplifications is possible. */
2065 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2066 enum rtx_code code;
2067 enum machine_mode mode, op0_mode;
2068 rtx op0, op1, op2;
2070 unsigned int width = GET_MODE_BITSIZE (mode);
2072 /* VOIDmode means "infinite" precision. */
2073 if (width == 0)
2074 width = HOST_BITS_PER_WIDE_INT;
2076 switch (code)
2078 case SIGN_EXTRACT:
2079 case ZERO_EXTRACT:
2080 if (GET_CODE (op0) == CONST_INT
2081 && GET_CODE (op1) == CONST_INT
2082 && GET_CODE (op2) == CONST_INT
2083 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2084 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2086 /* Extracting a bit-field from a constant */
2087 HOST_WIDE_INT val = INTVAL (op0);
2089 if (BITS_BIG_ENDIAN)
2090 val >>= (GET_MODE_BITSIZE (op0_mode)
2091 - INTVAL (op2) - INTVAL (op1));
2092 else
2093 val >>= INTVAL (op2);
2095 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2097 /* First zero-extend. */
2098 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2099 /* If desired, propagate sign bit. */
2100 if (code == SIGN_EXTRACT
2101 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2102 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2105 /* Clear the bits that don't belong in our mode,
2106 unless they and our sign bit are all one.
2107 So we get either a reasonable negative value or a reasonable
2108 unsigned value for this mode. */
2109 if (width < HOST_BITS_PER_WIDE_INT
2110 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2111 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2112 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2114 return GEN_INT (val);
2116 break;
2118 case IF_THEN_ELSE:
2119 if (GET_CODE (op0) == CONST_INT)
2120 return op0 != const0_rtx ? op1 : op2;
2122 /* Convert a == b ? b : a to "a". */
2123 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2124 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2125 && rtx_equal_p (XEXP (op0, 0), op1)
2126 && rtx_equal_p (XEXP (op0, 1), op2))
2127 return op1;
2128 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2129 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2130 && rtx_equal_p (XEXP (op0, 1), op1)
2131 && rtx_equal_p (XEXP (op0, 0), op2))
2132 return op2;
2133 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2135 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2136 ? GET_MODE (XEXP (op0, 1))
2137 : GET_MODE (XEXP (op0, 0)));
2138 rtx temp;
2139 if (cmp_mode == VOIDmode)
2140 cmp_mode = op0_mode;
2141 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2142 XEXP (op0, 0), XEXP (op0, 1));
2144 /* See if any simplifications were possible. */
2145 if (temp == const0_rtx)
2146 return op2;
2147 else if (temp == const1_rtx)
2148 return op1;
2149 else if (temp)
2150 op0 = temp;
2152 /* Look for happy constants in op1 and op2. */
2153 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2155 HOST_WIDE_INT t = INTVAL (op1);
2156 HOST_WIDE_INT f = INTVAL (op2);
2158 if (t == STORE_FLAG_VALUE && f == 0)
2159 code = GET_CODE (op0);
2160 else if (t == 0 && f == STORE_FLAG_VALUE)
2162 enum rtx_code tmp;
2163 tmp = reversed_comparison_code (op0, NULL_RTX);
2164 if (tmp == UNKNOWN)
2165 break;
2166 code = tmp;
2168 else
2169 break;
2171 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2174 break;
2176 default:
2177 abort ();
2180 return 0;
2183 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2184 Return 0 if no simplifications is possible. */
2186 simplify_subreg (outermode, op, innermode, byte)
2187 rtx op;
2188 unsigned int byte;
2189 enum machine_mode outermode, innermode;
2191 /* Little bit of sanity checking. */
2192 if (innermode == VOIDmode || outermode == VOIDmode
2193 || innermode == BLKmode || outermode == BLKmode)
2194 abort ();
2196 if (GET_MODE (op) != innermode
2197 && GET_MODE (op) != VOIDmode)
2198 abort ();
2200 if (byte % GET_MODE_SIZE (outermode)
2201 || byte >= GET_MODE_SIZE (innermode))
2202 abort ();
2204 if (outermode == innermode && !byte)
2205 return op;
2207 /* Attempt to simplify constant to non-SUBREG expression. */
2208 if (CONSTANT_P (op))
2210 int offset, part;
2211 unsigned HOST_WIDE_INT val;
2213 /* ??? This code is partly redundant with code bellow, but can handle
2214 the subregs of floats and similar corner cases.
2215 Later it we should move all simplification code here and rewrite
2216 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2217 using SIMPLIFY_SUBREG. */
2218 if (subreg_lowpart_offset (outermode, innermode) == byte)
2220 rtx new = gen_lowpart_if_possible (outermode, op);
2221 if (new)
2222 return new;
2225 /* Similar comment as above apply here. */
2226 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2227 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2228 && GET_MODE_CLASS (outermode) == MODE_INT)
2230 rtx new = constant_subword (op,
2231 (byte / UNITS_PER_WORD),
2232 innermode);
2233 if (new)
2234 return new;
2237 offset = byte * BITS_PER_UNIT;
2238 switch (GET_CODE (op))
2240 case CONST_DOUBLE:
2241 if (GET_MODE (op) != VOIDmode)
2242 break;
2244 /* We can't handle this case yet. */
2245 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2246 return NULL;
2248 part = offset >= HOST_BITS_PER_WIDE_INT;
2249 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2250 && BYTES_BIG_ENDIAN)
2251 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2252 && WORDS_BIG_ENDIAN))
2253 part = !part;
2254 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2255 offset %= HOST_BITS_PER_WIDE_INT;
2257 /* We've already picked the word we want from a double, so
2258 pretend this is actually an integer. */
2259 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2261 /* FALLTHROUGH */
2262 case CONST_INT:
2263 if (GET_CODE (op) == CONST_INT)
2264 val = INTVAL (op);
2266 /* We don't handle synthetizing of non-integral constants yet. */
2267 if (GET_MODE_CLASS (outermode) != MODE_INT)
2268 return NULL;
2270 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2272 if (WORDS_BIG_ENDIAN)
2273 offset = (GET_MODE_BITSIZE (innermode)
2274 - GET_MODE_BITSIZE (outermode) - offset);
2275 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2276 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2277 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2278 - 2 * (offset % BITS_PER_WORD));
2281 if (offset >= HOST_BITS_PER_WIDE_INT)
2282 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2283 else
2285 val >>= offset;
2286 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2287 val = trunc_int_for_mode (val, outermode);
2288 return GEN_INT (val);
2290 default:
2291 break;
2295 /* Changing mode twice with SUBREG => just change it once,
2296 or not at all if changing back op starting mode. */
2297 if (GET_CODE (op) == SUBREG)
2299 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2300 int final_offset = byte + SUBREG_BYTE (op);
2301 rtx new;
2303 if (outermode == innermostmode
2304 && byte == 0 && SUBREG_BYTE (op) == 0)
2305 return SUBREG_REG (op);
2307 /* The SUBREG_BYTE represents offset, as if the value were stored
2308 in memory. Irritating exception is paradoxical subreg, where
2309 we define SUBREG_BYTE to be 0. On big endian machines, this
2310 value should be negative. For a moment, undo this exception. */
2311 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2313 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2314 if (WORDS_BIG_ENDIAN)
2315 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2316 if (BYTES_BIG_ENDIAN)
2317 final_offset += difference % UNITS_PER_WORD;
2319 if (SUBREG_BYTE (op) == 0
2320 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2322 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2323 if (WORDS_BIG_ENDIAN)
2324 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2325 if (BYTES_BIG_ENDIAN)
2326 final_offset += difference % UNITS_PER_WORD;
2329 /* See whether resulting subreg will be paradoxical. */
2330 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2332 /* In nonparadoxical subregs we can't handle negative offsets. */
2333 if (final_offset < 0)
2334 return NULL_RTX;
2335 /* Bail out in case resulting subreg would be incorrect. */
2336 if (final_offset % GET_MODE_SIZE (outermode)
2337 || final_offset >= GET_MODE_SIZE (innermostmode))
2338 return NULL;
2340 else
2342 int offset = 0;
2343 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2345 /* In paradoxical subreg, see if we are still looking on lower part.
2346 If so, our SUBREG_BYTE will be 0. */
2347 if (WORDS_BIG_ENDIAN)
2348 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2349 if (BYTES_BIG_ENDIAN)
2350 offset += difference % UNITS_PER_WORD;
2351 if (offset == final_offset)
2352 final_offset = 0;
2353 else
2354 return NULL;
2357 /* Recurse for futher possible simplifications. */
2358 new = simplify_subreg (outermode, SUBREG_REG (op),
2359 GET_MODE (SUBREG_REG (op)),
2360 final_offset);
2361 if (new)
2362 return new;
2363 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2366 /* SUBREG of a hard register => just change the register number
2367 and/or mode. If the hard register is not valid in that mode,
2368 suppress this simplification. If the hard register is the stack,
2369 frame, or argument pointer, leave this as a SUBREG. */
2371 if (REG_P (op)
2372 && (! REG_FUNCTION_VALUE_P (op)
2373 || ! rtx_equal_function_value_matters)
2374 #ifdef CLASS_CANNOT_CHANGE_MODE
2375 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2376 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2377 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2378 && (TEST_HARD_REG_BIT
2379 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2380 REGNO (op))))
2381 #endif
2382 && REGNO (op) < FIRST_PSEUDO_REGISTER
2383 && ((reload_completed && !frame_pointer_needed)
2384 || (REGNO (op) != FRAME_POINTER_REGNUM
2385 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2386 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2387 #endif
2389 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2390 && REGNO (op) != ARG_POINTER_REGNUM
2391 #endif
2392 && REGNO (op) != STACK_POINTER_REGNUM)
2394 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2397 /* ??? We do allow it if the current REG is not valid for
2398 its mode. This is a kludge to work around how float/complex
2399 arguments are passed on 32-bit Sparc and should be fixed. */
2400 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2401 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2402 return gen_rtx_REG (outermode, final_regno);
2405 /* If we have a SUBREG of a register that we are replacing and we are
2406 replacing it with a MEM, make a new MEM and try replacing the
2407 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2408 or if we would be widening it. */
2410 if (GET_CODE (op) == MEM
2411 && ! mode_dependent_address_p (XEXP (op, 0))
2412 /* Allow splitting of volatile memory references in case we don't
2413 have instruction to move the whole thing. */
2414 && (! MEM_VOLATILE_P (op)
2415 || (mov_optab->handlers[(int) innermode].insn_code
2416 == CODE_FOR_nothing))
2417 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2419 rtx new;
2421 new = gen_rtx_MEM (outermode, plus_constant (XEXP (op, 0), byte));
2422 MEM_COPY_ATTRIBUTES (new, op);
2423 return new;
2426 /* Handle complex values represented as CONCAT
2427 of real and imaginary part. */
2428 if (GET_CODE (op) == CONCAT)
2430 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2431 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2432 unsigned int final_offset;
2433 rtx res;
2435 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2436 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2437 if (res)
2438 return res;
2439 /* We can at least simplify it by referring directly to the relevent part. */
2440 return gen_rtx_SUBREG (outermode, part, final_offset);
2443 return NULL_RTX;
2445 /* Make a SUBREG operation or equivalent if it folds. */
2448 simplify_gen_subreg (outermode, op, innermode, byte)
2449 rtx op;
2450 unsigned int byte;
2451 enum machine_mode outermode, innermode;
2453 rtx new;
2454 /* Little bit of sanity checking. */
2455 if (innermode == VOIDmode || outermode == VOIDmode
2456 || innermode == BLKmode || outermode == BLKmode)
2457 abort ();
2459 if (GET_MODE (op) != innermode
2460 && GET_MODE (op) != VOIDmode)
2461 abort ();
2463 if (byte % GET_MODE_SIZE (outermode)
2464 || byte >= GET_MODE_SIZE (innermode))
2465 abort ();
2467 new = simplify_subreg (outermode, op, innermode, byte);
2468 if (new)
2469 return new;
2471 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2472 return NULL_RTX;
2474 return gen_rtx_SUBREG (outermode, op, byte);
2476 /* Simplify X, an rtx expression.
2478 Return the simplified expression or NULL if no simplifications
2479 were possible.
2481 This is the preferred entry point into the simplification routines;
2482 however, we still allow passes to call the more specific routines.
2484 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2485 code that need to be unified.
2487 1. fold_rtx in cse.c. This code uses various CSE specific
2488 information to aid in RTL simplification.
2490 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2491 it uses combine specific information to aid in RTL
2492 simplification.
2494 3. The routines in this file.
2497 Long term we want to only have one body of simplification code; to
2498 get to that state I recommend the following steps:
2500 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2501 which are not pass dependent state into these routines.
2503 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2504 use this routine whenever possible.
2506 3. Allow for pass dependent state to be provided to these
2507 routines and add simplifications based on the pass dependent
2508 state. Remove code from cse.c & combine.c that becomes
2509 redundant/dead.
2511 It will take time, but ultimately the compiler will be easier to
2512 maintain and improve. It's totally silly that when we add a
2513 simplification that it needs to be added to 4 places (3 for RTL
2514 simplification and 1 for tree simplification. */
2517 simplify_rtx (x)
2518 rtx x;
2520 enum rtx_code code = GET_CODE (x);
2521 enum machine_mode mode = GET_MODE (x);
2523 switch (GET_RTX_CLASS (code))
2525 case '1':
2526 return simplify_unary_operation (code, mode,
2527 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2528 case 'c':
2529 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2531 rtx tem;
2533 tem = XEXP (x, 0);
2534 XEXP (x, 0) = XEXP (x, 1);
2535 XEXP (x, 1) = tem;
2536 return simplify_binary_operation (code, mode,
2537 XEXP (x, 0), XEXP (x, 1));
2540 case '2':
2541 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2543 case '3':
2544 case 'b':
2545 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2546 XEXP (x, 0), XEXP (x, 1),
2547 XEXP (x, 2));
2549 case '<':
2550 return simplify_relational_operation (code,
2551 ((GET_MODE (XEXP (x, 0))
2552 != VOIDmode)
2553 ? GET_MODE (XEXP (x, 0))
2554 : GET_MODE (XEXP (x, 1))),
2555 XEXP (x, 0), XEXP (x, 1));
2556 case 'x':
2557 /* The only case we try to handle is a SUBREG. */
2558 if (code == SUBREG)
2559 return simplify_gen_subreg (mode, SUBREG_REG (x),
2560 GET_MODE (SUBREG_REG (x)),
2561 SUBREG_BYTE (x));
2562 return NULL;
2563 default:
2564 return NULL;