configure.in (GLIBCPP_ENABLE_CXX_FLAGS): Do not pass arguments, let the defaults...
[official-gcc.git] / gcc / simplify-rtx.c
blob1949f24f1a375a9a4f5ecca6a12eb3b4e21705f9
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
47 signed wide int. */
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
52 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
53 const void *));
54 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
55 enum machine_mode, rtx,
56 rtx, int));
58 /* Negate a CONST_INT rtx, truncating (because a conversion from a
59 maximally negative number can overflow). */
60 static rtx
61 neg_const_int (mode, i)
62 enum machine_mode mode;
63 rtx i;
65 return gen_int_mode (- INTVAL (i), mode);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
72 rtx
73 simplify_gen_binary (code, mode, op0, op1)
74 enum rtx_code code;
75 enum machine_mode mode;
76 rtx op0, op1;
78 rtx tem;
80 /* Put complex operands first and constants second if commutative. */
81 if (GET_RTX_CLASS (code) == 'c'
82 && swap_commutative_operands_p (op0, op1))
83 tem = op0, op0 = op1, op1 = tem;
85 /* If this simplifies, do it. */
86 tem = simplify_binary_operation (code, mode, op0, op1);
87 if (tem)
88 return tem;
90 /* Handle addition and subtraction specially. Otherwise, just form
91 the operation. */
93 if (code == PLUS || code == MINUS)
95 tem = simplify_plus_minus (code, mode, op0, op1, 1);
96 if (tem)
97 return tem;
100 return gen_rtx_fmt_ee (code, mode, op0, op1);
103 /* If X is a MEM referencing the constant pool, return the real value.
104 Otherwise return X. */
106 avoid_constant_pool_reference (x)
107 rtx x;
109 rtx c, addr;
110 enum machine_mode cmode;
112 if (GET_CODE (x) != MEM)
113 return x;
114 addr = XEXP (x, 0);
116 if (GET_CODE (addr) == LO_SUM)
117 addr = XEXP (addr, 1);
119 if (GET_CODE (addr) != SYMBOL_REF
120 || ! CONSTANT_POOL_ADDRESS_P (addr))
121 return x;
123 c = get_pool_constant (addr);
124 cmode = get_pool_mode (addr);
126 /* If we're accessing the constant in a different mode than it was
127 originally stored, attempt to fix that up via subreg simplifications.
128 If that fails we have no choice but to return the original memory. */
129 if (cmode != GET_MODE (x))
131 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
132 return c ? c : x;
135 return c;
138 /* Make a unary operation by first seeing if it folds and otherwise making
139 the specified operation. */
142 simplify_gen_unary (code, mode, op, op_mode)
143 enum rtx_code code;
144 enum machine_mode mode;
145 rtx op;
146 enum machine_mode op_mode;
148 rtx tem;
150 /* If this simplifies, use it. */
151 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
152 return tem;
154 return gen_rtx_fmt_e (code, mode, op);
157 /* Likewise for ternary operations. */
160 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
161 enum rtx_code code;
162 enum machine_mode mode, op0_mode;
163 rtx op0, op1, op2;
165 rtx tem;
167 /* If this simplifies, use it. */
168 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
169 op0, op1, op2)))
170 return tem;
172 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
175 /* Likewise, for relational operations.
176 CMP_MODE specifies mode comparison is done in.
180 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
181 enum rtx_code code;
182 enum machine_mode mode;
183 enum machine_mode cmp_mode;
184 rtx op0, op1;
186 rtx tem;
188 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
189 return tem;
191 /* For the following tests, ensure const0_rtx is op1. */
192 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
193 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
195 /* If op0 is a compare, extract the comparison arguments from it. */
196 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
197 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
199 /* If op0 is a comparison, extract the comparison arguments form it. */
200 if (code == NE && op1 == const0_rtx
201 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
202 return op0;
203 else if (code == EQ && op1 == const0_rtx)
205 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
206 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
207 if (new != UNKNOWN)
209 code = new;
210 mode = cmp_mode;
211 op1 = XEXP (op0, 1);
212 op0 = XEXP (op0, 0);
216 /* Put complex operands first and constants second. */
217 if (swap_commutative_operands_p (op0, op1))
218 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
220 return gen_rtx_fmt_ee (code, mode, op0, op1);
223 /* Replace all occurrences of OLD in X with NEW and try to simplify the
224 resulting RTX. Return a new RTX which is as simplified as possible. */
227 simplify_replace_rtx (x, old, new)
228 rtx x;
229 rtx old;
230 rtx new;
232 enum rtx_code code = GET_CODE (x);
233 enum machine_mode mode = GET_MODE (x);
235 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
236 to build a new expression substituting recursively. If we can't do
237 anything, return our input. */
239 if (x == old)
240 return new;
242 switch (GET_RTX_CLASS (code))
244 case '1':
246 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
247 rtx op = (XEXP (x, 0) == old
248 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
250 return simplify_gen_unary (code, mode, op, op_mode);
253 case '2':
254 case 'c':
255 return
256 simplify_gen_binary (code, mode,
257 simplify_replace_rtx (XEXP (x, 0), old, new),
258 simplify_replace_rtx (XEXP (x, 1), old, new));
259 case '<':
261 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
262 ? GET_MODE (XEXP (x, 0))
263 : GET_MODE (XEXP (x, 1)));
264 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
265 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
267 return
268 simplify_gen_relational (code, mode,
269 (op_mode != VOIDmode
270 ? op_mode
271 : GET_MODE (op0) != VOIDmode
272 ? GET_MODE (op0)
273 : GET_MODE (op1)),
274 op0, op1);
277 case '3':
278 case 'b':
280 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
281 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
283 return
284 simplify_gen_ternary (code, mode,
285 (op_mode != VOIDmode
286 ? op_mode
287 : GET_MODE (op0)),
288 op0,
289 simplify_replace_rtx (XEXP (x, 1), old, new),
290 simplify_replace_rtx (XEXP (x, 2), old, new));
293 case 'x':
294 /* The only case we try to handle is a SUBREG. */
295 if (code == SUBREG)
297 rtx exp;
298 exp = simplify_gen_subreg (GET_MODE (x),
299 simplify_replace_rtx (SUBREG_REG (x),
300 old, new),
301 GET_MODE (SUBREG_REG (x)),
302 SUBREG_BYTE (x));
303 if (exp)
304 x = exp;
306 return x;
308 case 'o':
309 if (code == MEM)
310 return replace_equiv_address_nv (x,
311 simplify_replace_rtx (XEXP (x, 0),
312 old, new));
313 else if (code == LO_SUM)
315 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
316 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
318 /* (lo_sum (high x) x) -> x */
319 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
320 return op1;
322 return gen_rtx_LO_SUM (mode, op0, op1);
324 else if (code == REG)
326 if (REG_P (old) && REGNO (x) == REGNO (old))
327 return new;
330 return x;
332 default:
333 return x;
335 return x;
338 /* Try to simplify a unary operation CODE whose output mode is to be
339 MODE with input operand OP whose mode was originally OP_MODE.
340 Return zero if no simplification can be made. */
342 simplify_unary_operation (code, mode, op, op_mode)
343 enum rtx_code code;
344 enum machine_mode mode;
345 rtx op;
346 enum machine_mode op_mode;
348 unsigned int width = GET_MODE_BITSIZE (mode);
349 rtx trueop = avoid_constant_pool_reference (op);
351 /* The order of these tests is critical so that, for example, we don't
352 check the wrong mode (input vs. output) for a conversion operation,
353 such as FIX. At some point, this should be simplified. */
355 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
356 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
358 HOST_WIDE_INT hv, lv;
359 REAL_VALUE_TYPE d;
361 if (GET_CODE (trueop) == CONST_INT)
362 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
363 else
364 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
366 REAL_VALUE_FROM_INT (d, lv, hv, mode);
367 d = real_value_truncate (mode, d);
368 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
370 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
371 && (GET_CODE (trueop) == CONST_DOUBLE
372 || GET_CODE (trueop) == CONST_INT))
374 HOST_WIDE_INT hv, lv;
375 REAL_VALUE_TYPE d;
377 if (GET_CODE (trueop) == CONST_INT)
378 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
379 else
380 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
382 if (op_mode == VOIDmode)
384 /* We don't know how to interpret negative-looking numbers in
385 this case, so don't try to fold those. */
386 if (hv < 0)
387 return 0;
389 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
391 else
392 hv = 0, lv &= GET_MODE_MASK (op_mode);
394 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
395 d = real_value_truncate (mode, d);
396 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
399 if (GET_CODE (trueop) == CONST_INT
400 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
402 HOST_WIDE_INT arg0 = INTVAL (trueop);
403 HOST_WIDE_INT val;
405 switch (code)
407 case NOT:
408 val = ~ arg0;
409 break;
411 case NEG:
412 val = - arg0;
413 break;
415 case ABS:
416 val = (arg0 >= 0 ? arg0 : - arg0);
417 break;
419 case FFS:
420 /* Don't use ffs here. Instead, get low order bit and then its
421 number. If arg0 is zero, this will return 0, as desired. */
422 arg0 &= GET_MODE_MASK (mode);
423 val = exact_log2 (arg0 & (- arg0)) + 1;
424 break;
426 case TRUNCATE:
427 val = arg0;
428 break;
430 case ZERO_EXTEND:
431 /* When zero-extending a CONST_INT, we need to know its
432 original mode. */
433 if (op_mode == VOIDmode)
434 abort ();
435 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
437 /* If we were really extending the mode,
438 we would have to distinguish between zero-extension
439 and sign-extension. */
440 if (width != GET_MODE_BITSIZE (op_mode))
441 abort ();
442 val = arg0;
444 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
445 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
446 else
447 return 0;
448 break;
450 case SIGN_EXTEND:
451 if (op_mode == VOIDmode)
452 op_mode = mode;
453 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
455 /* If we were really extending the mode,
456 we would have to distinguish between zero-extension
457 and sign-extension. */
458 if (width != GET_MODE_BITSIZE (op_mode))
459 abort ();
460 val = arg0;
462 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
465 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
466 if (val
467 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
468 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
470 else
471 return 0;
472 break;
474 case SQRT:
475 case FLOAT_EXTEND:
476 case FLOAT_TRUNCATE:
477 case SS_TRUNCATE:
478 case US_TRUNCATE:
479 return 0;
481 default:
482 abort ();
485 val = trunc_int_for_mode (val, mode);
487 return GEN_INT (val);
490 /* We can do some operations on integer CONST_DOUBLEs. Also allow
491 for a DImode operation on a CONST_INT. */
492 else if (GET_MODE (trueop) == VOIDmode
493 && width <= HOST_BITS_PER_WIDE_INT * 2
494 && (GET_CODE (trueop) == CONST_DOUBLE
495 || GET_CODE (trueop) == CONST_INT))
497 unsigned HOST_WIDE_INT l1, lv;
498 HOST_WIDE_INT h1, hv;
500 if (GET_CODE (trueop) == CONST_DOUBLE)
501 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
502 else
503 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
505 switch (code)
507 case NOT:
508 lv = ~ l1;
509 hv = ~ h1;
510 break;
512 case NEG:
513 neg_double (l1, h1, &lv, &hv);
514 break;
516 case ABS:
517 if (h1 < 0)
518 neg_double (l1, h1, &lv, &hv);
519 else
520 lv = l1, hv = h1;
521 break;
523 case FFS:
524 hv = 0;
525 if (l1 == 0)
526 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
527 else
528 lv = exact_log2 (l1 & (-l1)) + 1;
529 break;
531 case TRUNCATE:
532 /* This is just a change-of-mode, so do nothing. */
533 lv = l1, hv = h1;
534 break;
536 case ZERO_EXTEND:
537 if (op_mode == VOIDmode)
538 abort ();
540 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
541 return 0;
543 hv = 0;
544 lv = l1 & GET_MODE_MASK (op_mode);
545 break;
547 case SIGN_EXTEND:
548 if (op_mode == VOIDmode
549 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
550 return 0;
551 else
553 lv = l1 & GET_MODE_MASK (op_mode);
554 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
555 && (lv & ((HOST_WIDE_INT) 1
556 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
557 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
559 hv = HWI_SIGN_EXTEND (lv);
561 break;
563 case SQRT:
564 return 0;
566 default:
567 return 0;
570 return immed_double_const (lv, hv, mode);
573 else if (GET_CODE (trueop) == CONST_DOUBLE
574 && GET_MODE_CLASS (mode) == MODE_FLOAT)
576 REAL_VALUE_TYPE d, t;
577 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
579 switch (code)
581 case SQRT:
582 if (HONOR_SNANS (mode) && real_isnan (&d))
583 return 0;
584 real_sqrt (&t, mode, &d);
585 d = t;
586 break;
587 case ABS:
588 d = REAL_VALUE_ABS (d);
589 break;
590 case NEG:
591 d = REAL_VALUE_NEGATE (d);
592 break;
593 case FLOAT_TRUNCATE:
594 d = real_value_truncate (mode, d);
595 break;
596 case FLOAT_EXTEND:
597 /* All this does is change the mode. */
598 break;
599 case FIX:
600 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
601 break;
603 default:
604 abort ();
606 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
609 else if (GET_CODE (trueop) == CONST_DOUBLE
610 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
611 && GET_MODE_CLASS (mode) == MODE_INT
612 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
614 HOST_WIDE_INT i;
615 REAL_VALUE_TYPE d;
616 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
617 switch (code)
619 case FIX: i = REAL_VALUE_FIX (d); break;
620 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
621 default:
622 abort ();
624 return gen_int_mode (i, mode);
627 /* This was formerly used only for non-IEEE float.
628 eggert@twinsun.com says it is safe for IEEE also. */
629 else
631 enum rtx_code reversed;
632 /* There are some simplifications we can do even if the operands
633 aren't constant. */
634 switch (code)
636 case NOT:
637 /* (not (not X)) == X. */
638 if (GET_CODE (op) == NOT)
639 return XEXP (op, 0);
641 /* (not (eq X Y)) == (ne X Y), etc. */
642 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
643 && ((reversed = reversed_comparison_code (op, NULL_RTX))
644 != UNKNOWN))
645 return gen_rtx_fmt_ee (reversed,
646 op_mode, XEXP (op, 0), XEXP (op, 1));
647 break;
649 case NEG:
650 /* (neg (neg X)) == X. */
651 if (GET_CODE (op) == NEG)
652 return XEXP (op, 0);
653 break;
655 case SIGN_EXTEND:
656 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
657 becomes just the MINUS if its mode is MODE. This allows
658 folding switch statements on machines using casesi (such as
659 the VAX). */
660 if (GET_CODE (op) == TRUNCATE
661 && GET_MODE (XEXP (op, 0)) == mode
662 && GET_CODE (XEXP (op, 0)) == MINUS
663 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
664 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
665 return XEXP (op, 0);
667 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
668 if (! POINTERS_EXTEND_UNSIGNED
669 && mode == Pmode && GET_MODE (op) == ptr_mode
670 && (CONSTANT_P (op)
671 || (GET_CODE (op) == SUBREG
672 && GET_CODE (SUBREG_REG (op)) == REG
673 && REG_POINTER (SUBREG_REG (op))
674 && GET_MODE (SUBREG_REG (op)) == Pmode)))
675 return convert_memory_address (Pmode, op);
676 #endif
677 break;
679 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
680 case ZERO_EXTEND:
681 if (POINTERS_EXTEND_UNSIGNED > 0
682 && mode == Pmode && GET_MODE (op) == ptr_mode
683 && (CONSTANT_P (op)
684 || (GET_CODE (op) == SUBREG
685 && GET_CODE (SUBREG_REG (op)) == REG
686 && REG_POINTER (SUBREG_REG (op))
687 && GET_MODE (SUBREG_REG (op)) == Pmode)))
688 return convert_memory_address (Pmode, op);
689 break;
690 #endif
692 default:
693 break;
696 return 0;
700 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
701 and OP1. Return 0 if no simplification is possible.
703 Don't use this for relational operations such as EQ or LT.
704 Use simplify_relational_operation instead. */
706 simplify_binary_operation (code, mode, op0, op1)
707 enum rtx_code code;
708 enum machine_mode mode;
709 rtx op0, op1;
711 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
712 HOST_WIDE_INT val;
713 unsigned int width = GET_MODE_BITSIZE (mode);
714 rtx tem;
715 rtx trueop0 = avoid_constant_pool_reference (op0);
716 rtx trueop1 = avoid_constant_pool_reference (op1);
718 /* Relational operations don't work here. We must know the mode
719 of the operands in order to do the comparison correctly.
720 Assuming a full word can give incorrect results.
721 Consider comparing 128 with -128 in QImode. */
723 if (GET_RTX_CLASS (code) == '<')
724 abort ();
726 /* Make sure the constant is second. */
727 if (GET_RTX_CLASS (code) == 'c'
728 && swap_commutative_operands_p (trueop0, trueop1))
730 tem = op0, op0 = op1, op1 = tem;
731 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
734 if (GET_MODE_CLASS (mode) == MODE_FLOAT
735 && GET_CODE (trueop0) == CONST_DOUBLE
736 && GET_CODE (trueop1) == CONST_DOUBLE
737 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
739 REAL_VALUE_TYPE f0, f1, value;
741 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
742 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
743 f0 = real_value_truncate (mode, f0);
744 f1 = real_value_truncate (mode, f1);
746 if (code == DIV
747 && !MODE_HAS_INFINITIES (mode)
748 && REAL_VALUES_EQUAL (f1, dconst0))
749 return 0;
751 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
753 value = real_value_truncate (mode, value);
754 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
757 /* We can fold some multi-word operations. */
758 if (GET_MODE_CLASS (mode) == MODE_INT
759 && width == HOST_BITS_PER_WIDE_INT * 2
760 && (GET_CODE (trueop0) == CONST_DOUBLE
761 || GET_CODE (trueop0) == CONST_INT)
762 && (GET_CODE (trueop1) == CONST_DOUBLE
763 || GET_CODE (trueop1) == CONST_INT))
765 unsigned HOST_WIDE_INT l1, l2, lv;
766 HOST_WIDE_INT h1, h2, hv;
768 if (GET_CODE (trueop0) == CONST_DOUBLE)
769 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
770 else
771 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
773 if (GET_CODE (trueop1) == CONST_DOUBLE)
774 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
775 else
776 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
778 switch (code)
780 case MINUS:
781 /* A - B == A + (-B). */
782 neg_double (l2, h2, &lv, &hv);
783 l2 = lv, h2 = hv;
785 /* .. fall through ... */
787 case PLUS:
788 add_double (l1, h1, l2, h2, &lv, &hv);
789 break;
791 case MULT:
792 mul_double (l1, h1, l2, h2, &lv, &hv);
793 break;
795 case DIV: case MOD: case UDIV: case UMOD:
796 /* We'd need to include tree.h to do this and it doesn't seem worth
797 it. */
798 return 0;
800 case AND:
801 lv = l1 & l2, hv = h1 & h2;
802 break;
804 case IOR:
805 lv = l1 | l2, hv = h1 | h2;
806 break;
808 case XOR:
809 lv = l1 ^ l2, hv = h1 ^ h2;
810 break;
812 case SMIN:
813 if (h1 < h2
814 || (h1 == h2
815 && ((unsigned HOST_WIDE_INT) l1
816 < (unsigned HOST_WIDE_INT) l2)))
817 lv = l1, hv = h1;
818 else
819 lv = l2, hv = h2;
820 break;
822 case SMAX:
823 if (h1 > h2
824 || (h1 == h2
825 && ((unsigned HOST_WIDE_INT) l1
826 > (unsigned HOST_WIDE_INT) l2)))
827 lv = l1, hv = h1;
828 else
829 lv = l2, hv = h2;
830 break;
832 case UMIN:
833 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
834 || (h1 == h2
835 && ((unsigned HOST_WIDE_INT) l1
836 < (unsigned HOST_WIDE_INT) l2)))
837 lv = l1, hv = h1;
838 else
839 lv = l2, hv = h2;
840 break;
842 case UMAX:
843 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
844 || (h1 == h2
845 && ((unsigned HOST_WIDE_INT) l1
846 > (unsigned HOST_WIDE_INT) l2)))
847 lv = l1, hv = h1;
848 else
849 lv = l2, hv = h2;
850 break;
852 case LSHIFTRT: case ASHIFTRT:
853 case ASHIFT:
854 case ROTATE: case ROTATERT:
855 #ifdef SHIFT_COUNT_TRUNCATED
856 if (SHIFT_COUNT_TRUNCATED)
857 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
858 #endif
860 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
861 return 0;
863 if (code == LSHIFTRT || code == ASHIFTRT)
864 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
865 code == ASHIFTRT);
866 else if (code == ASHIFT)
867 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
868 else if (code == ROTATE)
869 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
870 else /* code == ROTATERT */
871 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
872 break;
874 default:
875 return 0;
878 return immed_double_const (lv, hv, mode);
881 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
882 || width > HOST_BITS_PER_WIDE_INT || width == 0)
884 /* Even if we can't compute a constant result,
885 there are some cases worth simplifying. */
887 switch (code)
889 case PLUS:
890 /* Maybe simplify x + 0 to x. The two expressions are equivalent
891 when x is NaN, infinite, or finite and nonzero. They aren't
892 when x is -0 and the rounding mode is not towards -infinity,
893 since (-0) + 0 is then 0. */
894 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
895 return op0;
897 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
898 transformations are safe even for IEEE. */
899 if (GET_CODE (op0) == NEG)
900 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
901 else if (GET_CODE (op1) == NEG)
902 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
904 /* (~a) + 1 -> -a */
905 if (INTEGRAL_MODE_P (mode)
906 && GET_CODE (op0) == NOT
907 && trueop1 == const1_rtx)
908 return gen_rtx_NEG (mode, XEXP (op0, 0));
910 /* Handle both-operands-constant cases. We can only add
911 CONST_INTs to constants since the sum of relocatable symbols
912 can't be handled by most assemblers. Don't add CONST_INT
913 to CONST_INT since overflow won't be computed properly if wider
914 than HOST_BITS_PER_WIDE_INT. */
916 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
917 && GET_CODE (op1) == CONST_INT)
918 return plus_constant (op0, INTVAL (op1));
919 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
920 && GET_CODE (op0) == CONST_INT)
921 return plus_constant (op1, INTVAL (op0));
923 /* See if this is something like X * C - X or vice versa or
924 if the multiplication is written as a shift. If so, we can
925 distribute and make a new multiply, shift, or maybe just
926 have X (if C is 2 in the example above). But don't make
927 real multiply if we didn't have one before. */
929 if (! FLOAT_MODE_P (mode))
931 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
932 rtx lhs = op0, rhs = op1;
933 int had_mult = 0;
935 if (GET_CODE (lhs) == NEG)
936 coeff0 = -1, lhs = XEXP (lhs, 0);
937 else if (GET_CODE (lhs) == MULT
938 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
940 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
941 had_mult = 1;
943 else if (GET_CODE (lhs) == ASHIFT
944 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
945 && INTVAL (XEXP (lhs, 1)) >= 0
946 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
948 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
949 lhs = XEXP (lhs, 0);
952 if (GET_CODE (rhs) == NEG)
953 coeff1 = -1, rhs = XEXP (rhs, 0);
954 else if (GET_CODE (rhs) == MULT
955 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
957 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
958 had_mult = 1;
960 else if (GET_CODE (rhs) == ASHIFT
961 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
962 && INTVAL (XEXP (rhs, 1)) >= 0
963 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
965 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
966 rhs = XEXP (rhs, 0);
969 if (rtx_equal_p (lhs, rhs))
971 tem = simplify_gen_binary (MULT, mode, lhs,
972 GEN_INT (coeff0 + coeff1));
973 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
977 /* If one of the operands is a PLUS or a MINUS, see if we can
978 simplify this by the associative law.
979 Don't use the associative law for floating point.
980 The inaccuracy makes it nonassociative,
981 and subtle programs can break if operations are associated. */
983 if (INTEGRAL_MODE_P (mode)
984 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
985 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
986 || (GET_CODE (op0) == CONST
987 && GET_CODE (XEXP (op0, 0)) == PLUS)
988 || (GET_CODE (op1) == CONST
989 && GET_CODE (XEXP (op1, 0)) == PLUS))
990 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
991 return tem;
992 break;
994 case COMPARE:
995 #ifdef HAVE_cc0
996 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
997 using cc0, in which case we want to leave it as a COMPARE
998 so we can distinguish it from a register-register-copy.
1000 In IEEE floating point, x-0 is not the same as x. */
1002 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1003 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1004 && trueop1 == CONST0_RTX (mode))
1005 return op0;
1006 #endif
1008 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1009 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1010 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1011 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1013 rtx xop00 = XEXP (op0, 0);
1014 rtx xop10 = XEXP (op1, 0);
1016 #ifdef HAVE_cc0
1017 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1018 #else
1019 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1020 && GET_MODE (xop00) == GET_MODE (xop10)
1021 && REGNO (xop00) == REGNO (xop10)
1022 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1023 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1024 #endif
1025 return xop00;
1027 break;
1029 case MINUS:
1030 /* We can't assume x-x is 0 even with non-IEEE floating point,
1031 but since it is zero except in very strange circumstances, we
1032 will treat it as zero with -funsafe-math-optimizations. */
1033 if (rtx_equal_p (trueop0, trueop1)
1034 && ! side_effects_p (op0)
1035 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1036 return CONST0_RTX (mode);
1038 /* Change subtraction from zero into negation. (0 - x) is the
1039 same as -x when x is NaN, infinite, or finite and nonzero.
1040 But if the mode has signed zeros, and does not round towards
1041 -infinity, then 0 - 0 is 0, not -0. */
1042 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1043 return gen_rtx_NEG (mode, op1);
1045 /* (-1 - a) is ~a. */
1046 if (trueop0 == constm1_rtx)
1047 return gen_rtx_NOT (mode, op1);
1049 /* Subtracting 0 has no effect unless the mode has signed zeros
1050 and supports rounding towards -infinity. In such a case,
1051 0 - 0 is -0. */
1052 if (!(HONOR_SIGNED_ZEROS (mode)
1053 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1054 && trueop1 == CONST0_RTX (mode))
1055 return op0;
1057 /* See if this is something like X * C - X or vice versa or
1058 if the multiplication is written as a shift. If so, we can
1059 distribute and make a new multiply, shift, or maybe just
1060 have X (if C is 2 in the example above). But don't make
1061 real multiply if we didn't have one before. */
1063 if (! FLOAT_MODE_P (mode))
1065 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1066 rtx lhs = op0, rhs = op1;
1067 int had_mult = 0;
1069 if (GET_CODE (lhs) == NEG)
1070 coeff0 = -1, lhs = XEXP (lhs, 0);
1071 else if (GET_CODE (lhs) == MULT
1072 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1074 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1075 had_mult = 1;
1077 else if (GET_CODE (lhs) == ASHIFT
1078 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1079 && INTVAL (XEXP (lhs, 1)) >= 0
1080 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1082 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1083 lhs = XEXP (lhs, 0);
1086 if (GET_CODE (rhs) == NEG)
1087 coeff1 = - 1, rhs = XEXP (rhs, 0);
1088 else if (GET_CODE (rhs) == MULT
1089 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1091 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1092 had_mult = 1;
1094 else if (GET_CODE (rhs) == ASHIFT
1095 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1096 && INTVAL (XEXP (rhs, 1)) >= 0
1097 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1099 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1100 rhs = XEXP (rhs, 0);
1103 if (rtx_equal_p (lhs, rhs))
1105 tem = simplify_gen_binary (MULT, mode, lhs,
1106 GEN_INT (coeff0 - coeff1));
1107 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1111 /* (a - (-b)) -> (a + b). True even for IEEE. */
1112 if (GET_CODE (op1) == NEG)
1113 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1115 /* If one of the operands is a PLUS or a MINUS, see if we can
1116 simplify this by the associative law.
1117 Don't use the associative law for floating point.
1118 The inaccuracy makes it nonassociative,
1119 and subtle programs can break if operations are associated. */
1121 if (INTEGRAL_MODE_P (mode)
1122 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1123 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1124 || (GET_CODE (op0) == CONST
1125 && GET_CODE (XEXP (op0, 0)) == PLUS)
1126 || (GET_CODE (op1) == CONST
1127 && GET_CODE (XEXP (op1, 0)) == PLUS))
1128 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1129 return tem;
1131 /* Don't let a relocatable value get a negative coeff. */
1132 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1133 return simplify_gen_binary (PLUS, mode,
1134 op0,
1135 neg_const_int (mode, op1));
1137 /* (x - (x & y)) -> (x & ~y) */
1138 if (GET_CODE (op1) == AND)
1140 if (rtx_equal_p (op0, XEXP (op1, 0)))
1141 return simplify_gen_binary (AND, mode, op0,
1142 gen_rtx_NOT (mode, XEXP (op1, 1)));
1143 if (rtx_equal_p (op0, XEXP (op1, 1)))
1144 return simplify_gen_binary (AND, mode, op0,
1145 gen_rtx_NOT (mode, XEXP (op1, 0)));
1147 break;
1149 case MULT:
1150 if (trueop1 == constm1_rtx)
1152 tem = simplify_unary_operation (NEG, mode, op0, mode);
1154 return tem ? tem : gen_rtx_NEG (mode, op0);
1157 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1158 x is NaN, since x * 0 is then also NaN. Nor is it valid
1159 when the mode has signed zeros, since multiplying a negative
1160 number by 0 will give -0, not 0. */
1161 if (!HONOR_NANS (mode)
1162 && !HONOR_SIGNED_ZEROS (mode)
1163 && trueop1 == CONST0_RTX (mode)
1164 && ! side_effects_p (op0))
1165 return op1;
1167 /* In IEEE floating point, x*1 is not equivalent to x for
1168 signalling NaNs. */
1169 if (!HONOR_SNANS (mode)
1170 && trueop1 == CONST1_RTX (mode))
1171 return op0;
1173 /* Convert multiply by constant power of two into shift unless
1174 we are still generating RTL. This test is a kludge. */
1175 if (GET_CODE (trueop1) == CONST_INT
1176 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1177 /* If the mode is larger than the host word size, and the
1178 uppermost bit is set, then this isn't a power of two due
1179 to implicit sign extension. */
1180 && (width <= HOST_BITS_PER_WIDE_INT
1181 || val != HOST_BITS_PER_WIDE_INT - 1)
1182 && ! rtx_equal_function_value_matters)
1183 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1185 /* x*2 is x+x and x*(-1) is -x */
1186 if (GET_CODE (trueop1) == CONST_DOUBLE
1187 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1188 && GET_MODE (op0) == mode)
1190 REAL_VALUE_TYPE d;
1191 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1193 if (REAL_VALUES_EQUAL (d, dconst2))
1194 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1196 if (REAL_VALUES_EQUAL (d, dconstm1))
1197 return gen_rtx_NEG (mode, op0);
1199 break;
1201 case IOR:
1202 if (trueop1 == const0_rtx)
1203 return op0;
1204 if (GET_CODE (trueop1) == CONST_INT
1205 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1206 == GET_MODE_MASK (mode)))
1207 return op1;
1208 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1209 return op0;
1210 /* A | (~A) -> -1 */
1211 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1212 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1213 && ! side_effects_p (op0)
1214 && GET_MODE_CLASS (mode) != MODE_CC)
1215 return constm1_rtx;
1216 break;
1218 case XOR:
1219 if (trueop1 == const0_rtx)
1220 return op0;
1221 if (GET_CODE (trueop1) == CONST_INT
1222 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1223 == GET_MODE_MASK (mode)))
1224 return gen_rtx_NOT (mode, op0);
1225 if (trueop0 == trueop1 && ! side_effects_p (op0)
1226 && GET_MODE_CLASS (mode) != MODE_CC)
1227 return const0_rtx;
1228 break;
1230 case AND:
1231 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1232 return const0_rtx;
1233 if (GET_CODE (trueop1) == CONST_INT
1234 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1235 == GET_MODE_MASK (mode)))
1236 return op0;
1237 if (trueop0 == trueop1 && ! side_effects_p (op0)
1238 && GET_MODE_CLASS (mode) != MODE_CC)
1239 return op0;
1240 /* A & (~A) -> 0 */
1241 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1242 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1243 && ! side_effects_p (op0)
1244 && GET_MODE_CLASS (mode) != MODE_CC)
1245 return const0_rtx;
1246 break;
1248 case UDIV:
1249 /* Convert divide by power of two into shift (divide by 1 handled
1250 below). */
1251 if (GET_CODE (trueop1) == CONST_INT
1252 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1253 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1255 /* ... fall through ... */
1257 case DIV:
1258 if (trueop1 == CONST1_RTX (mode))
1260 /* On some platforms DIV uses narrower mode than its
1261 operands. */
1262 rtx x = gen_lowpart_common (mode, op0);
1263 if (x)
1264 return x;
1265 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1266 return gen_lowpart_SUBREG (mode, op0);
1267 else
1268 return op0;
1271 /* Maybe change 0 / x to 0. This transformation isn't safe for
1272 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1273 Nor is it safe for modes with signed zeros, since dividing
1274 0 by a negative number gives -0, not 0. */
1275 if (!HONOR_NANS (mode)
1276 && !HONOR_SIGNED_ZEROS (mode)
1277 && trueop0 == CONST0_RTX (mode)
1278 && ! side_effects_p (op1))
1279 return op0;
1281 /* Change division by a constant into multiplication. Only do
1282 this with -funsafe-math-optimizations. */
1283 else if (GET_CODE (trueop1) == CONST_DOUBLE
1284 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1285 && trueop1 != CONST0_RTX (mode)
1286 && flag_unsafe_math_optimizations)
1288 REAL_VALUE_TYPE d;
1289 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1291 if (! REAL_VALUES_EQUAL (d, dconst0))
1293 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1294 return gen_rtx_MULT (mode, op0,
1295 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1298 break;
1300 case UMOD:
1301 /* Handle modulus by power of two (mod with 1 handled below). */
1302 if (GET_CODE (trueop1) == CONST_INT
1303 && exact_log2 (INTVAL (trueop1)) > 0)
1304 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1306 /* ... fall through ... */
1308 case MOD:
1309 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1310 && ! side_effects_p (op0) && ! side_effects_p (op1))
1311 return const0_rtx;
1312 break;
1314 case ROTATERT:
1315 case ROTATE:
1316 case ASHIFTRT:
1317 /* Rotating ~0 always results in ~0. */
1318 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1319 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1320 && ! side_effects_p (op1))
1321 return op0;
1323 /* ... fall through ... */
1325 case ASHIFT:
1326 case LSHIFTRT:
1327 if (trueop1 == const0_rtx)
1328 return op0;
1329 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1330 return op0;
1331 break;
1333 case SMIN:
1334 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1335 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1336 && ! side_effects_p (op0))
1337 return op1;
1338 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1339 return op0;
1340 break;
1342 case SMAX:
1343 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1344 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1345 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1346 && ! side_effects_p (op0))
1347 return op1;
1348 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1349 return op0;
1350 break;
1352 case UMIN:
1353 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1354 return op1;
1355 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1356 return op0;
1357 break;
1359 case UMAX:
1360 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1361 return op1;
1362 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1363 return op0;
1364 break;
1366 case SS_PLUS:
1367 case US_PLUS:
1368 case SS_MINUS:
1369 case US_MINUS:
1370 /* ??? There are simplifications that can be done. */
1371 return 0;
1373 default:
1374 abort ();
1377 return 0;
1380 /* Get the integer argument values in two forms:
1381 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1383 arg0 = INTVAL (trueop0);
1384 arg1 = INTVAL (trueop1);
1386 if (width < HOST_BITS_PER_WIDE_INT)
1388 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1389 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1391 arg0s = arg0;
1392 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1393 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1395 arg1s = arg1;
1396 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1397 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1399 else
1401 arg0s = arg0;
1402 arg1s = arg1;
1405 /* Compute the value of the arithmetic. */
1407 switch (code)
1409 case PLUS:
1410 val = arg0s + arg1s;
1411 break;
1413 case MINUS:
1414 val = arg0s - arg1s;
1415 break;
1417 case MULT:
1418 val = arg0s * arg1s;
1419 break;
1421 case DIV:
1422 if (arg1s == 0
1423 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1424 && arg1s == -1))
1425 return 0;
1426 val = arg0s / arg1s;
1427 break;
1429 case MOD:
1430 if (arg1s == 0
1431 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1432 && arg1s == -1))
1433 return 0;
1434 val = arg0s % arg1s;
1435 break;
1437 case UDIV:
1438 if (arg1 == 0
1439 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1440 && arg1s == -1))
1441 return 0;
1442 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1443 break;
1445 case UMOD:
1446 if (arg1 == 0
1447 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1448 && arg1s == -1))
1449 return 0;
1450 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1451 break;
1453 case AND:
1454 val = arg0 & arg1;
1455 break;
1457 case IOR:
1458 val = arg0 | arg1;
1459 break;
1461 case XOR:
1462 val = arg0 ^ arg1;
1463 break;
1465 case LSHIFTRT:
1466 /* If shift count is undefined, don't fold it; let the machine do
1467 what it wants. But truncate it if the machine will do that. */
1468 if (arg1 < 0)
1469 return 0;
1471 #ifdef SHIFT_COUNT_TRUNCATED
1472 if (SHIFT_COUNT_TRUNCATED)
1473 arg1 %= width;
1474 #endif
1476 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1477 break;
1479 case ASHIFT:
1480 if (arg1 < 0)
1481 return 0;
1483 #ifdef SHIFT_COUNT_TRUNCATED
1484 if (SHIFT_COUNT_TRUNCATED)
1485 arg1 %= width;
1486 #endif
1488 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1489 break;
1491 case ASHIFTRT:
1492 if (arg1 < 0)
1493 return 0;
1495 #ifdef SHIFT_COUNT_TRUNCATED
1496 if (SHIFT_COUNT_TRUNCATED)
1497 arg1 %= width;
1498 #endif
1500 val = arg0s >> arg1;
1502 /* Bootstrap compiler may not have sign extended the right shift.
1503 Manually extend the sign to insure bootstrap cc matches gcc. */
1504 if (arg0s < 0 && arg1 > 0)
1505 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1507 break;
1509 case ROTATERT:
1510 if (arg1 < 0)
1511 return 0;
1513 arg1 %= width;
1514 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1515 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1516 break;
1518 case ROTATE:
1519 if (arg1 < 0)
1520 return 0;
1522 arg1 %= width;
1523 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1524 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1525 break;
1527 case COMPARE:
1528 /* Do nothing here. */
1529 return 0;
1531 case SMIN:
1532 val = arg0s <= arg1s ? arg0s : arg1s;
1533 break;
1535 case UMIN:
1536 val = ((unsigned HOST_WIDE_INT) arg0
1537 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1538 break;
1540 case SMAX:
1541 val = arg0s > arg1s ? arg0s : arg1s;
1542 break;
1544 case UMAX:
1545 val = ((unsigned HOST_WIDE_INT) arg0
1546 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1547 break;
1549 default:
1550 abort ();
1553 val = trunc_int_for_mode (val, mode);
1555 return GEN_INT (val);
1558 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1559 PLUS or MINUS.
1561 Rather than test for specific case, we do this by a brute-force method
1562 and do all possible simplifications until no more changes occur. Then
1563 we rebuild the operation.
1565 If FORCE is true, then always generate the rtx. This is used to
1566 canonicalize stuff emitted from simplify_gen_binary. Note that this
1567 can still fail if the rtx is too complex. It won't fail just because
1568 the result is not 'simpler' than the input, however. */
1570 struct simplify_plus_minus_op_data
1572 rtx op;
1573 int neg;
1576 static int
1577 simplify_plus_minus_op_data_cmp (p1, p2)
1578 const void *p1;
1579 const void *p2;
1581 const struct simplify_plus_minus_op_data *d1 = p1;
1582 const struct simplify_plus_minus_op_data *d2 = p2;
1584 return (commutative_operand_precedence (d2->op)
1585 - commutative_operand_precedence (d1->op));
1588 static rtx
1589 simplify_plus_minus (code, mode, op0, op1, force)
1590 enum rtx_code code;
1591 enum machine_mode mode;
1592 rtx op0, op1;
1593 int force;
1595 struct simplify_plus_minus_op_data ops[8];
1596 rtx result, tem;
1597 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1598 int first, negate, changed;
1599 int i, j;
1601 memset ((char *) ops, 0, sizeof ops);
1603 /* Set up the two operands and then expand them until nothing has been
1604 changed. If we run out of room in our array, give up; this should
1605 almost never happen. */
1607 ops[0].op = op0;
1608 ops[0].neg = 0;
1609 ops[1].op = op1;
1610 ops[1].neg = (code == MINUS);
1614 changed = 0;
1616 for (i = 0; i < n_ops; i++)
1618 rtx this_op = ops[i].op;
1619 int this_neg = ops[i].neg;
1620 enum rtx_code this_code = GET_CODE (this_op);
1622 switch (this_code)
1624 case PLUS:
1625 case MINUS:
1626 if (n_ops == 7)
1627 return NULL_RTX;
1629 ops[n_ops].op = XEXP (this_op, 1);
1630 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1631 n_ops++;
1633 ops[i].op = XEXP (this_op, 0);
1634 input_ops++;
1635 changed = 1;
1636 break;
1638 case NEG:
1639 ops[i].op = XEXP (this_op, 0);
1640 ops[i].neg = ! this_neg;
1641 changed = 1;
1642 break;
1644 case CONST:
1645 if (n_ops < 7
1646 && GET_CODE (XEXP (this_op, 0)) == PLUS
1647 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1648 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1650 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1651 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1652 ops[n_ops].neg = this_neg;
1653 n_ops++;
1654 input_consts++;
1655 changed = 1;
1657 break;
1659 case NOT:
1660 /* ~a -> (-a - 1) */
1661 if (n_ops != 7)
1663 ops[n_ops].op = constm1_rtx;
1664 ops[n_ops++].neg = this_neg;
1665 ops[i].op = XEXP (this_op, 0);
1666 ops[i].neg = !this_neg;
1667 changed = 1;
1669 break;
1671 case CONST_INT:
1672 if (this_neg)
1674 ops[i].op = neg_const_int (mode, this_op);
1675 ops[i].neg = 0;
1676 changed = 1;
1678 break;
1680 default:
1681 break;
1685 while (changed);
1687 /* If we only have two operands, we can't do anything. */
1688 if (n_ops <= 2 && !force)
1689 return NULL_RTX;
1691 /* Count the number of CONSTs we didn't split above. */
1692 for (i = 0; i < n_ops; i++)
1693 if (GET_CODE (ops[i].op) == CONST)
1694 input_consts++;
1696 /* Now simplify each pair of operands until nothing changes. The first
1697 time through just simplify constants against each other. */
1699 first = 1;
1702 changed = first;
1704 for (i = 0; i < n_ops - 1; i++)
1705 for (j = i + 1; j < n_ops; j++)
1707 rtx lhs = ops[i].op, rhs = ops[j].op;
1708 int lneg = ops[i].neg, rneg = ops[j].neg;
1710 if (lhs != 0 && rhs != 0
1711 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1713 enum rtx_code ncode = PLUS;
1715 if (lneg != rneg)
1717 ncode = MINUS;
1718 if (lneg)
1719 tem = lhs, lhs = rhs, rhs = tem;
1721 else if (swap_commutative_operands_p (lhs, rhs))
1722 tem = lhs, lhs = rhs, rhs = tem;
1724 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1726 /* Reject "simplifications" that just wrap the two
1727 arguments in a CONST. Failure to do so can result
1728 in infinite recursion with simplify_binary_operation
1729 when it calls us to simplify CONST operations. */
1730 if (tem
1731 && ! (GET_CODE (tem) == CONST
1732 && GET_CODE (XEXP (tem, 0)) == ncode
1733 && XEXP (XEXP (tem, 0), 0) == lhs
1734 && XEXP (XEXP (tem, 0), 1) == rhs)
1735 /* Don't allow -x + -1 -> ~x simplifications in the
1736 first pass. This allows us the chance to combine
1737 the -1 with other constants. */
1738 && ! (first
1739 && GET_CODE (tem) == NOT
1740 && XEXP (tem, 0) == rhs))
1742 lneg &= rneg;
1743 if (GET_CODE (tem) == NEG)
1744 tem = XEXP (tem, 0), lneg = !lneg;
1745 if (GET_CODE (tem) == CONST_INT && lneg)
1746 tem = neg_const_int (mode, tem), lneg = 0;
1748 ops[i].op = tem;
1749 ops[i].neg = lneg;
1750 ops[j].op = NULL_RTX;
1751 changed = 1;
1756 first = 0;
1758 while (changed);
1760 /* Pack all the operands to the lower-numbered entries. */
1761 for (i = 0, j = 0; j < n_ops; j++)
1762 if (ops[j].op)
1763 ops[i++] = ops[j];
1764 n_ops = i;
1766 /* Sort the operations based on swap_commutative_operands_p. */
1767 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1769 /* We suppressed creation of trivial CONST expressions in the
1770 combination loop to avoid recursion. Create one manually now.
1771 The combination loop should have ensured that there is exactly
1772 one CONST_INT, and the sort will have ensured that it is last
1773 in the array and that any other constant will be next-to-last. */
1775 if (n_ops > 1
1776 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1777 && CONSTANT_P (ops[n_ops - 2].op))
1779 rtx value = ops[n_ops - 1].op;
1780 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1781 value = neg_const_int (mode, value);
1782 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1783 n_ops--;
1786 /* Count the number of CONSTs that we generated. */
1787 n_consts = 0;
1788 for (i = 0; i < n_ops; i++)
1789 if (GET_CODE (ops[i].op) == CONST)
1790 n_consts++;
1792 /* Give up if we didn't reduce the number of operands we had. Make
1793 sure we count a CONST as two operands. If we have the same
1794 number of operands, but have made more CONSTs than before, this
1795 is also an improvement, so accept it. */
1796 if (!force
1797 && (n_ops + n_consts > input_ops
1798 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1799 return NULL_RTX;
1801 /* Put a non-negated operand first. If there aren't any, make all
1802 operands positive and negate the whole thing later. */
1804 negate = 0;
1805 for (i = 0; i < n_ops && ops[i].neg; i++)
1806 continue;
1807 if (i == n_ops)
1809 for (i = 0; i < n_ops; i++)
1810 ops[i].neg = 0;
1811 negate = 1;
1813 else if (i != 0)
1815 tem = ops[0].op;
1816 ops[0] = ops[i];
1817 ops[i].op = tem;
1818 ops[i].neg = 1;
1821 /* Now make the result by performing the requested operations. */
1822 result = ops[0].op;
1823 for (i = 1; i < n_ops; i++)
1824 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1825 mode, result, ops[i].op);
1827 return negate ? gen_rtx_NEG (mode, result) : result;
1830 /* Like simplify_binary_operation except used for relational operators.
1831 MODE is the mode of the operands, not that of the result. If MODE
1832 is VOIDmode, both operands must also be VOIDmode and we compare the
1833 operands in "infinite precision".
1835 If no simplification is possible, this function returns zero. Otherwise,
1836 it returns either const_true_rtx or const0_rtx. */
1839 simplify_relational_operation (code, mode, op0, op1)
1840 enum rtx_code code;
1841 enum machine_mode mode;
1842 rtx op0, op1;
1844 int equal, op0lt, op0ltu, op1lt, op1ltu;
1845 rtx tem;
1846 rtx trueop0;
1847 rtx trueop1;
1849 if (mode == VOIDmode
1850 && (GET_MODE (op0) != VOIDmode
1851 || GET_MODE (op1) != VOIDmode))
1852 abort ();
1854 /* If op0 is a compare, extract the comparison arguments from it. */
1855 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1856 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1858 trueop0 = avoid_constant_pool_reference (op0);
1859 trueop1 = avoid_constant_pool_reference (op1);
1861 /* We can't simplify MODE_CC values since we don't know what the
1862 actual comparison is. */
1863 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1864 #ifdef HAVE_cc0
1865 || op0 == cc0_rtx
1866 #endif
1868 return 0;
1870 /* Make sure the constant is second. */
1871 if (swap_commutative_operands_p (trueop0, trueop1))
1873 tem = op0, op0 = op1, op1 = tem;
1874 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1875 code = swap_condition (code);
1878 /* For integer comparisons of A and B maybe we can simplify A - B and can
1879 then simplify a comparison of that with zero. If A and B are both either
1880 a register or a CONST_INT, this can't help; testing for these cases will
1881 prevent infinite recursion here and speed things up.
1883 If CODE is an unsigned comparison, then we can never do this optimization,
1884 because it gives an incorrect result if the subtraction wraps around zero.
1885 ANSI C defines unsigned operations such that they never overflow, and
1886 thus such cases can not be ignored. */
1888 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1889 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1890 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1891 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1892 && code != GTU && code != GEU && code != LTU && code != LEU)
1893 return simplify_relational_operation (signed_condition (code),
1894 mode, tem, const0_rtx);
1896 if (flag_unsafe_math_optimizations && code == ORDERED)
1897 return const_true_rtx;
1899 if (flag_unsafe_math_optimizations && code == UNORDERED)
1900 return const0_rtx;
1902 /* For modes without NaNs, if the two operands are equal, we know the
1903 result. */
1904 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
1905 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1907 /* If the operands are floating-point constants, see if we can fold
1908 the result. */
1909 else if (GET_CODE (trueop0) == CONST_DOUBLE
1910 && GET_CODE (trueop1) == CONST_DOUBLE
1911 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
1913 REAL_VALUE_TYPE d0, d1;
1915 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
1916 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
1918 /* Comparisons are unordered iff at least one of the values is NaN. */
1919 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
1920 switch (code)
1922 case UNEQ:
1923 case UNLT:
1924 case UNGT:
1925 case UNLE:
1926 case UNGE:
1927 case NE:
1928 case UNORDERED:
1929 return const_true_rtx;
1930 case EQ:
1931 case LT:
1932 case GT:
1933 case LE:
1934 case GE:
1935 case LTGT:
1936 case ORDERED:
1937 return const0_rtx;
1938 default:
1939 return 0;
1942 equal = REAL_VALUES_EQUAL (d0, d1);
1943 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
1944 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
1947 /* Otherwise, see if the operands are both integers. */
1948 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1949 && (GET_CODE (trueop0) == CONST_DOUBLE
1950 || GET_CODE (trueop0) == CONST_INT)
1951 && (GET_CODE (trueop1) == CONST_DOUBLE
1952 || GET_CODE (trueop1) == CONST_INT))
1954 int width = GET_MODE_BITSIZE (mode);
1955 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1956 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1958 /* Get the two words comprising each integer constant. */
1959 if (GET_CODE (trueop0) == CONST_DOUBLE)
1961 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
1962 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
1964 else
1966 l0u = l0s = INTVAL (trueop0);
1967 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1970 if (GET_CODE (trueop1) == CONST_DOUBLE)
1972 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
1973 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
1975 else
1977 l1u = l1s = INTVAL (trueop1);
1978 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1981 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1982 we have to sign or zero-extend the values. */
1983 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1985 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1986 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1988 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1989 l0s |= ((HOST_WIDE_INT) (-1) << width);
1991 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1992 l1s |= ((HOST_WIDE_INT) (-1) << width);
1994 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1995 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1997 equal = (h0u == h1u && l0u == l1u);
1998 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1999 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2000 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2001 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2004 /* Otherwise, there are some code-specific tests we can make. */
2005 else
2007 switch (code)
2009 case EQ:
2010 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2011 return const0_rtx;
2012 break;
2014 case NE:
2015 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2016 return const_true_rtx;
2017 break;
2019 case GEU:
2020 /* Unsigned values are never negative. */
2021 if (trueop1 == const0_rtx)
2022 return const_true_rtx;
2023 break;
2025 case LTU:
2026 if (trueop1 == const0_rtx)
2027 return const0_rtx;
2028 break;
2030 case LEU:
2031 /* Unsigned values are never greater than the largest
2032 unsigned value. */
2033 if (GET_CODE (trueop1) == CONST_INT
2034 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2035 && INTEGRAL_MODE_P (mode))
2036 return const_true_rtx;
2037 break;
2039 case GTU:
2040 if (GET_CODE (trueop1) == CONST_INT
2041 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2042 && INTEGRAL_MODE_P (mode))
2043 return const0_rtx;
2044 break;
2046 case LT:
2047 /* Optimize abs(x) < 0.0. */
2048 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2050 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2051 : trueop0;
2052 if (GET_CODE (tem) == ABS)
2053 return const0_rtx;
2055 break;
2057 case GE:
2058 /* Optimize abs(x) >= 0.0. */
2059 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2061 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2062 : trueop0;
2063 if (GET_CODE (tem) == ABS)
2064 return const1_rtx;
2066 break;
2068 default:
2069 break;
2072 return 0;
2075 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2076 as appropriate. */
2077 switch (code)
2079 case EQ:
2080 case UNEQ:
2081 return equal ? const_true_rtx : const0_rtx;
2082 case NE:
2083 case LTGT:
2084 return ! equal ? const_true_rtx : const0_rtx;
2085 case LT:
2086 case UNLT:
2087 return op0lt ? const_true_rtx : const0_rtx;
2088 case GT:
2089 case UNGT:
2090 return op1lt ? const_true_rtx : const0_rtx;
2091 case LTU:
2092 return op0ltu ? const_true_rtx : const0_rtx;
2093 case GTU:
2094 return op1ltu ? const_true_rtx : const0_rtx;
2095 case LE:
2096 case UNLE:
2097 return equal || op0lt ? const_true_rtx : const0_rtx;
2098 case GE:
2099 case UNGE:
2100 return equal || op1lt ? const_true_rtx : const0_rtx;
2101 case LEU:
2102 return equal || op0ltu ? const_true_rtx : const0_rtx;
2103 case GEU:
2104 return equal || op1ltu ? const_true_rtx : const0_rtx;
2105 case ORDERED:
2106 return const_true_rtx;
2107 case UNORDERED:
2108 return const0_rtx;
2109 default:
2110 abort ();
2114 /* Simplify CODE, an operation with result mode MODE and three operands,
2115 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2116 a constant. Return 0 if no simplifications is possible. */
2119 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2120 enum rtx_code code;
2121 enum machine_mode mode, op0_mode;
2122 rtx op0, op1, op2;
2124 unsigned int width = GET_MODE_BITSIZE (mode);
2126 /* VOIDmode means "infinite" precision. */
2127 if (width == 0)
2128 width = HOST_BITS_PER_WIDE_INT;
2130 switch (code)
2132 case SIGN_EXTRACT:
2133 case ZERO_EXTRACT:
2134 if (GET_CODE (op0) == CONST_INT
2135 && GET_CODE (op1) == CONST_INT
2136 && GET_CODE (op2) == CONST_INT
2137 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2138 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2140 /* Extracting a bit-field from a constant */
2141 HOST_WIDE_INT val = INTVAL (op0);
2143 if (BITS_BIG_ENDIAN)
2144 val >>= (GET_MODE_BITSIZE (op0_mode)
2145 - INTVAL (op2) - INTVAL (op1));
2146 else
2147 val >>= INTVAL (op2);
2149 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2151 /* First zero-extend. */
2152 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2153 /* If desired, propagate sign bit. */
2154 if (code == SIGN_EXTRACT
2155 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2156 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2159 /* Clear the bits that don't belong in our mode,
2160 unless they and our sign bit are all one.
2161 So we get either a reasonable negative value or a reasonable
2162 unsigned value for this mode. */
2163 if (width < HOST_BITS_PER_WIDE_INT
2164 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2165 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2166 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2168 return GEN_INT (val);
2170 break;
2172 case IF_THEN_ELSE:
2173 if (GET_CODE (op0) == CONST_INT)
2174 return op0 != const0_rtx ? op1 : op2;
2176 /* Convert a == b ? b : a to "a". */
2177 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2178 && !HONOR_NANS (mode)
2179 && rtx_equal_p (XEXP (op0, 0), op1)
2180 && rtx_equal_p (XEXP (op0, 1), op2))
2181 return op1;
2182 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2183 && !HONOR_NANS (mode)
2184 && rtx_equal_p (XEXP (op0, 1), op1)
2185 && rtx_equal_p (XEXP (op0, 0), op2))
2186 return op2;
2187 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2189 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2190 ? GET_MODE (XEXP (op0, 1))
2191 : GET_MODE (XEXP (op0, 0)));
2192 rtx temp;
2193 if (cmp_mode == VOIDmode)
2194 cmp_mode = op0_mode;
2195 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2196 XEXP (op0, 0), XEXP (op0, 1));
2198 /* See if any simplifications were possible. */
2199 if (temp == const0_rtx)
2200 return op2;
2201 else if (temp == const1_rtx)
2202 return op1;
2203 else if (temp)
2204 op0 = temp;
2206 /* Look for happy constants in op1 and op2. */
2207 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2209 HOST_WIDE_INT t = INTVAL (op1);
2210 HOST_WIDE_INT f = INTVAL (op2);
2212 if (t == STORE_FLAG_VALUE && f == 0)
2213 code = GET_CODE (op0);
2214 else if (t == 0 && f == STORE_FLAG_VALUE)
2216 enum rtx_code tmp;
2217 tmp = reversed_comparison_code (op0, NULL_RTX);
2218 if (tmp == UNKNOWN)
2219 break;
2220 code = tmp;
2222 else
2223 break;
2225 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2228 break;
2230 default:
2231 abort ();
2234 return 0;
2237 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2238 Return 0 if no simplifications is possible. */
2240 simplify_subreg (outermode, op, innermode, byte)
2241 rtx op;
2242 unsigned int byte;
2243 enum machine_mode outermode, innermode;
2245 /* Little bit of sanity checking. */
2246 if (innermode == VOIDmode || outermode == VOIDmode
2247 || innermode == BLKmode || outermode == BLKmode)
2248 abort ();
2250 if (GET_MODE (op) != innermode
2251 && GET_MODE (op) != VOIDmode)
2252 abort ();
2254 if (byte % GET_MODE_SIZE (outermode)
2255 || byte >= GET_MODE_SIZE (innermode))
2256 abort ();
2258 if (outermode == innermode && !byte)
2259 return op;
2261 /* Simplify subregs of vector constants. */
2262 if (GET_CODE (op) == CONST_VECTOR)
2264 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2265 const unsigned int offset = byte / elt_size;
2266 rtx elt;
2268 if (GET_MODE_INNER (innermode) == outermode)
2270 elt = CONST_VECTOR_ELT (op, offset);
2272 /* ?? We probably don't need this copy_rtx because constants
2273 can be shared. ?? */
2275 return copy_rtx (elt);
2277 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2278 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2280 return (gen_rtx_CONST_VECTOR
2281 (outermode,
2282 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2283 &CONST_VECTOR_ELT (op, offset))));
2285 else if (GET_MODE_CLASS (outermode) == MODE_INT
2286 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2288 /* This happens when the target register size is smaller then
2289 the vector mode, and we synthesize operations with vectors
2290 of elements that are smaller than the register size. */
2291 HOST_WIDE_INT sum = 0, high = 0;
2292 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2293 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2294 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2295 int shift = BITS_PER_UNIT * elt_size;
2297 for (; n_elts--; i += step)
2299 elt = CONST_VECTOR_ELT (op, i);
2300 if (GET_CODE (elt) == CONST_DOUBLE
2301 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2303 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2304 elt);
2305 if (! elt)
2306 return NULL_RTX;
2308 if (GET_CODE (elt) != CONST_INT)
2309 return NULL_RTX;
2310 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2311 sum = (sum << shift) + INTVAL (elt);
2313 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2314 return GEN_INT (trunc_int_for_mode (sum, outermode));
2315 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2316 return immed_double_const (high, sum, outermode);
2317 else
2318 return NULL_RTX;
2320 else if (GET_MODE_CLASS (outermode) == MODE_INT
2321 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2323 enum machine_mode new_mode
2324 = int_mode_for_mode (GET_MODE_INNER (innermode));
2325 int subbyte = byte % elt_size;
2327 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2328 if (! op)
2329 return NULL_RTX;
2330 return simplify_subreg (outermode, op, new_mode, subbyte);
2332 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2333 /* This shouldn't happen, but let's not do anything stupid. */
2334 return NULL_RTX;
2337 /* Attempt to simplify constant to non-SUBREG expression. */
2338 if (CONSTANT_P (op))
2340 int offset, part;
2341 unsigned HOST_WIDE_INT val = 0;
2343 if (GET_MODE_CLASS (outermode) == MODE_VECTOR_INT
2344 || GET_MODE_CLASS (outermode) == MODE_VECTOR_FLOAT)
2346 /* Construct a CONST_VECTOR from individual subregs. */
2347 enum machine_mode submode = GET_MODE_INNER (outermode);
2348 int subsize = GET_MODE_UNIT_SIZE (outermode);
2349 int i, elts = GET_MODE_NUNITS (outermode);
2350 rtvec v = rtvec_alloc (elts);
2351 rtx elt;
2353 for (i = 0; i < elts; i++, byte += subsize)
2355 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2356 /* ??? It would be nice if we could actually make such subregs
2357 on targets that allow such relocations. */
2358 if (byte >= GET_MODE_UNIT_SIZE (innermode))
2359 elt = CONST0_RTX (submode);
2360 else
2361 elt = simplify_subreg (submode, op, innermode, byte);
2362 if (! elt)
2363 return NULL_RTX;
2364 RTVEC_ELT (v, i) = elt;
2366 return gen_rtx_CONST_VECTOR (outermode, v);
2369 /* ??? This code is partly redundant with code below, but can handle
2370 the subregs of floats and similar corner cases.
2371 Later it we should move all simplification code here and rewrite
2372 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2373 using SIMPLIFY_SUBREG. */
2374 if (subreg_lowpart_offset (outermode, innermode) == byte
2375 && GET_CODE (op) != CONST_VECTOR)
2377 rtx new = gen_lowpart_if_possible (outermode, op);
2378 if (new)
2379 return new;
2382 /* Similar comment as above apply here. */
2383 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2384 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2385 && GET_MODE_CLASS (outermode) == MODE_INT)
2387 rtx new = constant_subword (op,
2388 (byte / UNITS_PER_WORD),
2389 innermode);
2390 if (new)
2391 return new;
2394 if (GET_MODE_CLASS (outermode) != MODE_INT
2395 && GET_MODE_CLASS (outermode) != MODE_CC)
2397 enum machine_mode new_mode = int_mode_for_mode (outermode);
2399 if (new_mode != innermode || byte != 0)
2401 op = simplify_subreg (new_mode, op, innermode, byte);
2402 if (! op)
2403 return NULL_RTX;
2404 return simplify_subreg (outermode, op, new_mode, 0);
2408 offset = byte * BITS_PER_UNIT;
2409 switch (GET_CODE (op))
2411 case CONST_DOUBLE:
2412 if (GET_MODE (op) != VOIDmode)
2413 break;
2415 /* We can't handle this case yet. */
2416 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2417 return NULL_RTX;
2419 part = offset >= HOST_BITS_PER_WIDE_INT;
2420 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2421 && BYTES_BIG_ENDIAN)
2422 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2423 && WORDS_BIG_ENDIAN))
2424 part = !part;
2425 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2426 offset %= HOST_BITS_PER_WIDE_INT;
2428 /* We've already picked the word we want from a double, so
2429 pretend this is actually an integer. */
2430 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2432 /* FALLTHROUGH */
2433 case CONST_INT:
2434 if (GET_CODE (op) == CONST_INT)
2435 val = INTVAL (op);
2437 /* We don't handle synthesizing of non-integral constants yet. */
2438 if (GET_MODE_CLASS (outermode) != MODE_INT)
2439 return NULL_RTX;
2441 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2443 if (WORDS_BIG_ENDIAN)
2444 offset = (GET_MODE_BITSIZE (innermode)
2445 - GET_MODE_BITSIZE (outermode) - offset);
2446 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2447 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2448 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2449 - 2 * (offset % BITS_PER_WORD));
2452 if (offset >= HOST_BITS_PER_WIDE_INT)
2453 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2454 else
2456 val >>= offset;
2457 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2458 val = trunc_int_for_mode (val, outermode);
2459 return GEN_INT (val);
2461 default:
2462 break;
2466 /* Changing mode twice with SUBREG => just change it once,
2467 or not at all if changing back op starting mode. */
2468 if (GET_CODE (op) == SUBREG)
2470 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2471 int final_offset = byte + SUBREG_BYTE (op);
2472 rtx new;
2474 if (outermode == innermostmode
2475 && byte == 0 && SUBREG_BYTE (op) == 0)
2476 return SUBREG_REG (op);
2478 /* The SUBREG_BYTE represents offset, as if the value were stored
2479 in memory. Irritating exception is paradoxical subreg, where
2480 we define SUBREG_BYTE to be 0. On big endian machines, this
2481 value should be negative. For a moment, undo this exception. */
2482 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2484 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2485 if (WORDS_BIG_ENDIAN)
2486 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2487 if (BYTES_BIG_ENDIAN)
2488 final_offset += difference % UNITS_PER_WORD;
2490 if (SUBREG_BYTE (op) == 0
2491 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2493 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2494 if (WORDS_BIG_ENDIAN)
2495 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2496 if (BYTES_BIG_ENDIAN)
2497 final_offset += difference % UNITS_PER_WORD;
2500 /* See whether resulting subreg will be paradoxical. */
2501 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2503 /* In nonparadoxical subregs we can't handle negative offsets. */
2504 if (final_offset < 0)
2505 return NULL_RTX;
2506 /* Bail out in case resulting subreg would be incorrect. */
2507 if (final_offset % GET_MODE_SIZE (outermode)
2508 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2509 return NULL_RTX;
2511 else
2513 int offset = 0;
2514 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2516 /* In paradoxical subreg, see if we are still looking on lower part.
2517 If so, our SUBREG_BYTE will be 0. */
2518 if (WORDS_BIG_ENDIAN)
2519 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2520 if (BYTES_BIG_ENDIAN)
2521 offset += difference % UNITS_PER_WORD;
2522 if (offset == final_offset)
2523 final_offset = 0;
2524 else
2525 return NULL_RTX;
2528 /* Recurse for futher possible simplifications. */
2529 new = simplify_subreg (outermode, SUBREG_REG (op),
2530 GET_MODE (SUBREG_REG (op)),
2531 final_offset);
2532 if (new)
2533 return new;
2534 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2537 /* SUBREG of a hard register => just change the register number
2538 and/or mode. If the hard register is not valid in that mode,
2539 suppress this simplification. If the hard register is the stack,
2540 frame, or argument pointer, leave this as a SUBREG. */
2542 if (REG_P (op)
2543 && (! REG_FUNCTION_VALUE_P (op)
2544 || ! rtx_equal_function_value_matters)
2545 && REGNO (op) < FIRST_PSEUDO_REGISTER
2546 #ifdef CANNOT_CHANGE_MODE_CLASS
2547 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), outermode, innermode)
2548 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2549 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
2550 #endif
2551 && ((reload_completed && !frame_pointer_needed)
2552 || (REGNO (op) != FRAME_POINTER_REGNUM
2553 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2554 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2555 #endif
2557 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2558 && REGNO (op) != ARG_POINTER_REGNUM
2559 #endif
2560 && REGNO (op) != STACK_POINTER_REGNUM)
2562 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2565 /* ??? We do allow it if the current REG is not valid for
2566 its mode. This is a kludge to work around how float/complex
2567 arguments are passed on 32-bit SPARC and should be fixed. */
2568 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2569 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2571 rtx x = gen_rtx_REG (outermode, final_regno);
2573 /* Propagate original regno. We don't have any way to specify
2574 the offset inside original regno, so do so only for lowpart.
2575 The information is used only by alias analysis that can not
2576 grog partial register anyway. */
2578 if (subreg_lowpart_offset (outermode, innermode) == byte)
2579 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2580 return x;
2584 /* If we have a SUBREG of a register that we are replacing and we are
2585 replacing it with a MEM, make a new MEM and try replacing the
2586 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2587 or if we would be widening it. */
2589 if (GET_CODE (op) == MEM
2590 && ! mode_dependent_address_p (XEXP (op, 0))
2591 /* Allow splitting of volatile memory references in case we don't
2592 have instruction to move the whole thing. */
2593 && (! MEM_VOLATILE_P (op)
2594 || ! have_insn_for (SET, innermode))
2595 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2596 return adjust_address_nv (op, outermode, byte);
2598 /* Handle complex values represented as CONCAT
2599 of real and imaginary part. */
2600 if (GET_CODE (op) == CONCAT)
2602 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2603 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2604 unsigned int final_offset;
2605 rtx res;
2607 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2608 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2609 if (res)
2610 return res;
2611 /* We can at least simplify it by referring directly to the relevant part. */
2612 return gen_rtx_SUBREG (outermode, part, final_offset);
2615 return NULL_RTX;
2617 /* Make a SUBREG operation or equivalent if it folds. */
2620 simplify_gen_subreg (outermode, op, innermode, byte)
2621 rtx op;
2622 unsigned int byte;
2623 enum machine_mode outermode, innermode;
2625 rtx new;
2626 /* Little bit of sanity checking. */
2627 if (innermode == VOIDmode || outermode == VOIDmode
2628 || innermode == BLKmode || outermode == BLKmode)
2629 abort ();
2631 if (GET_MODE (op) != innermode
2632 && GET_MODE (op) != VOIDmode)
2633 abort ();
2635 if (byte % GET_MODE_SIZE (outermode)
2636 || byte >= GET_MODE_SIZE (innermode))
2637 abort ();
2639 if (GET_CODE (op) == QUEUED)
2640 return NULL_RTX;
2642 new = simplify_subreg (outermode, op, innermode, byte);
2643 if (new)
2644 return new;
2646 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2647 return NULL_RTX;
2649 return gen_rtx_SUBREG (outermode, op, byte);
2651 /* Simplify X, an rtx expression.
2653 Return the simplified expression or NULL if no simplifications
2654 were possible.
2656 This is the preferred entry point into the simplification routines;
2657 however, we still allow passes to call the more specific routines.
2659 Right now GCC has three (yes, three) major bodies of RTL simplification
2660 code that need to be unified.
2662 1. fold_rtx in cse.c. This code uses various CSE specific
2663 information to aid in RTL simplification.
2665 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2666 it uses combine specific information to aid in RTL
2667 simplification.
2669 3. The routines in this file.
2672 Long term we want to only have one body of simplification code; to
2673 get to that state I recommend the following steps:
2675 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2676 which are not pass dependent state into these routines.
2678 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2679 use this routine whenever possible.
2681 3. Allow for pass dependent state to be provided to these
2682 routines and add simplifications based on the pass dependent
2683 state. Remove code from cse.c & combine.c that becomes
2684 redundant/dead.
2686 It will take time, but ultimately the compiler will be easier to
2687 maintain and improve. It's totally silly that when we add a
2688 simplification that it needs to be added to 4 places (3 for RTL
2689 simplification and 1 for tree simplification. */
2692 simplify_rtx (x)
2693 rtx x;
2695 enum rtx_code code = GET_CODE (x);
2696 enum machine_mode mode = GET_MODE (x);
2698 switch (GET_RTX_CLASS (code))
2700 case '1':
2701 return simplify_unary_operation (code, mode,
2702 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2703 case 'c':
2704 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2706 rtx tem;
2708 tem = XEXP (x, 0);
2709 XEXP (x, 0) = XEXP (x, 1);
2710 XEXP (x, 1) = tem;
2711 return simplify_binary_operation (code, mode,
2712 XEXP (x, 0), XEXP (x, 1));
2715 case '2':
2716 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2718 case '3':
2719 case 'b':
2720 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2721 XEXP (x, 0), XEXP (x, 1),
2722 XEXP (x, 2));
2724 case '<':
2725 return simplify_relational_operation (code,
2726 ((GET_MODE (XEXP (x, 0))
2727 != VOIDmode)
2728 ? GET_MODE (XEXP (x, 0))
2729 : GET_MODE (XEXP (x, 1))),
2730 XEXP (x, 0), XEXP (x, 1));
2731 case 'x':
2732 /* The only case we try to handle is a SUBREG. */
2733 if (code == SUBREG)
2734 return simplify_gen_subreg (mode, SUBREG_REG (x),
2735 GET_MODE (SUBREG_REG (x)),
2736 SUBREG_BYTE (x));
2737 return NULL;
2738 default:
2739 return NULL;