2002-05-04 David S. Miller <davem@redhat.com>
[official-gcc.git] / gcc / simplify-rtx.c
blob8441ea9e985f942c946eed41db67d611b97b491e
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
94 signed wide int. */
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
99 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
100 const void *));
101 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
102 enum machine_mode, rtx,
103 rtx, int));
105 /* Negate a CONST_INT rtx, truncating (because a conversion from a
106 maximally negative number can overflow). */
107 static rtx
108 neg_const_int (mode, i)
109 enum machine_mode mode;
110 rtx i;
112 return gen_int_mode (- INTVAL (i), mode);
116 /* Make a binary operation by properly ordering the operands and
117 seeing if the expression folds. */
120 simplify_gen_binary (code, mode, op0, op1)
121 enum rtx_code code;
122 enum machine_mode mode;
123 rtx op0, op1;
125 rtx tem;
127 /* Put complex operands first and constants second if commutative. */
128 if (GET_RTX_CLASS (code) == 'c'
129 && swap_commutative_operands_p (op0, op1))
130 tem = op0, op0 = op1, op1 = tem;
132 /* If this simplifies, do it. */
133 tem = simplify_binary_operation (code, mode, op0, op1);
134 if (tem)
135 return tem;
137 /* Handle addition and subtraction specially. Otherwise, just form
138 the operation. */
140 if (code == PLUS || code == MINUS)
142 tem = simplify_plus_minus (code, mode, op0, op1, 1);
143 if (tem)
144 return tem;
147 return gen_rtx_fmt_ee (code, mode, op0, op1);
150 /* If X is a MEM referencing the constant pool, return the real value.
151 Otherwise return X. */
153 avoid_constant_pool_reference (x)
154 rtx x;
156 rtx c, addr;
157 enum machine_mode cmode;
159 if (GET_CODE (x) != MEM)
160 return x;
161 addr = XEXP (x, 0);
163 if (GET_CODE (addr) != SYMBOL_REF
164 || ! CONSTANT_POOL_ADDRESS_P (addr))
165 return x;
167 c = get_pool_constant (addr);
168 cmode = get_pool_mode (addr);
170 /* If we're accessing the constant in a different mode than it was
171 originally stored, attempt to fix that up via subreg simplifications.
172 If that fails we have no choice but to return the original memory. */
173 if (cmode != GET_MODE (x))
175 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
176 return c ? c : x;
179 return c;
182 /* Make a unary operation by first seeing if it folds and otherwise making
183 the specified operation. */
186 simplify_gen_unary (code, mode, op, op_mode)
187 enum rtx_code code;
188 enum machine_mode mode;
189 rtx op;
190 enum machine_mode op_mode;
192 rtx tem;
194 /* If this simplifies, use it. */
195 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
196 return tem;
198 return gen_rtx_fmt_e (code, mode, op);
201 /* Likewise for ternary operations. */
204 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
205 enum rtx_code code;
206 enum machine_mode mode, op0_mode;
207 rtx op0, op1, op2;
209 rtx tem;
211 /* If this simplifies, use it. */
212 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
213 op0, op1, op2)))
214 return tem;
216 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
219 /* Likewise, for relational operations.
220 CMP_MODE specifies mode comparison is done in.
224 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
225 enum rtx_code code;
226 enum machine_mode mode;
227 enum machine_mode cmp_mode;
228 rtx op0, op1;
230 rtx tem;
232 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
233 return tem;
235 /* Put complex operands first and constants second. */
236 if (swap_commutative_operands_p (op0, op1))
237 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
239 return gen_rtx_fmt_ee (code, mode, op0, op1);
242 /* Replace all occurrences of OLD in X with NEW and try to simplify the
243 resulting RTX. Return a new RTX which is as simplified as possible. */
246 simplify_replace_rtx (x, old, new)
247 rtx x;
248 rtx old;
249 rtx new;
251 enum rtx_code code = GET_CODE (x);
252 enum machine_mode mode = GET_MODE (x);
254 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
258 if (x == old)
259 return new;
261 switch (GET_RTX_CLASS (code))
263 case '1':
265 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
266 rtx op = (XEXP (x, 0) == old
267 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
269 return simplify_gen_unary (code, mode, op, op_mode);
272 case '2':
273 case 'c':
274 return
275 simplify_gen_binary (code, mode,
276 simplify_replace_rtx (XEXP (x, 0), old, new),
277 simplify_replace_rtx (XEXP (x, 1), old, new));
278 case '<':
280 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
281 ? GET_MODE (XEXP (x, 0))
282 : GET_MODE (XEXP (x, 1)));
283 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
284 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
286 return
287 simplify_gen_relational (code, mode,
288 (op_mode != VOIDmode
289 ? op_mode
290 : GET_MODE (op0) != VOIDmode
291 ? GET_MODE (op0)
292 : GET_MODE (op1)),
293 op0, op1);
296 case '3':
297 case 'b':
299 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
300 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
302 return
303 simplify_gen_ternary (code, mode,
304 (op_mode != VOIDmode
305 ? op_mode
306 : GET_MODE (op0)),
307 op0,
308 simplify_replace_rtx (XEXP (x, 1), old, new),
309 simplify_replace_rtx (XEXP (x, 2), old, new));
312 case 'x':
313 /* The only case we try to handle is a SUBREG. */
314 if (code == SUBREG)
316 rtx exp;
317 exp = simplify_gen_subreg (GET_MODE (x),
318 simplify_replace_rtx (SUBREG_REG (x),
319 old, new),
320 GET_MODE (SUBREG_REG (x)),
321 SUBREG_BYTE (x));
322 if (exp)
323 x = exp;
325 return x;
327 default:
328 if (GET_CODE (x) == MEM)
329 return
330 replace_equiv_address_nv (x,
331 simplify_replace_rtx (XEXP (x, 0),
332 old, new));
334 return x;
336 return x;
339 /* Try to simplify a unary operation CODE whose output mode is to be
340 MODE with input operand OP whose mode was originally OP_MODE.
341 Return zero if no simplification can be made. */
343 simplify_unary_operation (code, mode, op, op_mode)
344 enum rtx_code code;
345 enum machine_mode mode;
346 rtx op;
347 enum machine_mode op_mode;
349 unsigned int width = GET_MODE_BITSIZE (mode);
350 rtx trueop = avoid_constant_pool_reference (op);
352 /* The order of these tests is critical so that, for example, we don't
353 check the wrong mode (input vs. output) for a conversion operation,
354 such as FIX. At some point, this should be simplified. */
356 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
357 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
359 HOST_WIDE_INT hv, lv;
360 REAL_VALUE_TYPE d;
362 if (GET_CODE (trueop) == CONST_INT)
363 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
364 else
365 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
367 REAL_VALUE_FROM_INT (d, lv, hv, mode);
368 d = real_value_truncate (mode, d);
369 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
371 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
372 && (GET_CODE (trueop) == CONST_DOUBLE
373 || GET_CODE (trueop) == CONST_INT))
375 HOST_WIDE_INT hv, lv;
376 REAL_VALUE_TYPE d;
378 if (GET_CODE (trueop) == CONST_INT)
379 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
380 else
381 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
383 if (op_mode == VOIDmode)
385 /* We don't know how to interpret negative-looking numbers in
386 this case, so don't try to fold those. */
387 if (hv < 0)
388 return 0;
390 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
392 else
393 hv = 0, lv &= GET_MODE_MASK (op_mode);
395 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
396 d = real_value_truncate (mode, d);
397 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
400 if (GET_CODE (trueop) == CONST_INT
401 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
403 HOST_WIDE_INT arg0 = INTVAL (trueop);
404 HOST_WIDE_INT val;
406 switch (code)
408 case NOT:
409 val = ~ arg0;
410 break;
412 case NEG:
413 val = - arg0;
414 break;
416 case ABS:
417 val = (arg0 >= 0 ? arg0 : - arg0);
418 break;
420 case FFS:
421 /* Don't use ffs here. Instead, get low order bit and then its
422 number. If arg0 is zero, this will return 0, as desired. */
423 arg0 &= GET_MODE_MASK (mode);
424 val = exact_log2 (arg0 & (- arg0)) + 1;
425 break;
427 case TRUNCATE:
428 val = arg0;
429 break;
431 case ZERO_EXTEND:
432 /* When zero-extending a CONST_INT, we need to know its
433 original mode. */
434 if (op_mode == VOIDmode)
435 abort ();
436 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
438 /* If we were really extending the mode,
439 we would have to distinguish between zero-extension
440 and sign-extension. */
441 if (width != GET_MODE_BITSIZE (op_mode))
442 abort ();
443 val = arg0;
445 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
446 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
447 else
448 return 0;
449 break;
451 case SIGN_EXTEND:
452 if (op_mode == VOIDmode)
453 op_mode = mode;
454 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
456 /* If we were really extending the mode,
457 we would have to distinguish between zero-extension
458 and sign-extension. */
459 if (width != GET_MODE_BITSIZE (op_mode))
460 abort ();
461 val = arg0;
463 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
466 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
467 if (val
468 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
469 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
471 else
472 return 0;
473 break;
475 case SQRT:
476 case FLOAT_EXTEND:
477 case FLOAT_TRUNCATE:
478 case SS_TRUNCATE:
479 case US_TRUNCATE:
480 return 0;
482 default:
483 abort ();
486 val = trunc_int_for_mode (val, mode);
488 return GEN_INT (val);
491 /* We can do some operations on integer CONST_DOUBLEs. Also allow
492 for a DImode operation on a CONST_INT. */
493 else if (GET_MODE (trueop) == VOIDmode
494 && width <= HOST_BITS_PER_WIDE_INT * 2
495 && (GET_CODE (trueop) == CONST_DOUBLE
496 || GET_CODE (trueop) == CONST_INT))
498 unsigned HOST_WIDE_INT l1, lv;
499 HOST_WIDE_INT h1, hv;
501 if (GET_CODE (trueop) == CONST_DOUBLE)
502 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
503 else
504 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
506 switch (code)
508 case NOT:
509 lv = ~ l1;
510 hv = ~ h1;
511 break;
513 case NEG:
514 neg_double (l1, h1, &lv, &hv);
515 break;
517 case ABS:
518 if (h1 < 0)
519 neg_double (l1, h1, &lv, &hv);
520 else
521 lv = l1, hv = h1;
522 break;
524 case FFS:
525 hv = 0;
526 if (l1 == 0)
527 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
528 else
529 lv = exact_log2 (l1 & (-l1)) + 1;
530 break;
532 case TRUNCATE:
533 /* This is just a change-of-mode, so do nothing. */
534 lv = l1, hv = h1;
535 break;
537 case ZERO_EXTEND:
538 if (op_mode == VOIDmode)
539 abort ();
541 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
542 return 0;
544 hv = 0;
545 lv = l1 & GET_MODE_MASK (op_mode);
546 break;
548 case SIGN_EXTEND:
549 if (op_mode == VOIDmode
550 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
551 return 0;
552 else
554 lv = l1 & GET_MODE_MASK (op_mode);
555 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
556 && (lv & ((HOST_WIDE_INT) 1
557 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
558 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
560 hv = HWI_SIGN_EXTEND (lv);
562 break;
564 case SQRT:
565 return 0;
567 default:
568 return 0;
571 return immed_double_const (lv, hv, mode);
574 else if (GET_CODE (trueop) == CONST_DOUBLE
575 && GET_MODE_CLASS (mode) == MODE_FLOAT)
577 REAL_VALUE_TYPE d;
578 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
580 switch (code)
582 case SQRT:
583 /* We don't attempt to optimize this. */
584 return 0;
586 case ABS: d = REAL_VALUE_ABS (d); break;
587 case NEG: d = REAL_VALUE_NEGATE (d); break;
588 case FLOAT_TRUNCATE: d = real_value_truncate (mode, d); break;
589 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
590 case FIX: d = REAL_VALUE_RNDZINT (d); break;
591 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
592 default:
593 abort ();
595 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
598 else if (GET_CODE (trueop) == CONST_DOUBLE
599 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
600 && GET_MODE_CLASS (mode) == MODE_INT
601 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
603 HOST_WIDE_INT i;
604 REAL_VALUE_TYPE d;
605 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
606 switch (code)
608 case FIX: i = REAL_VALUE_FIX (d); break;
609 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
610 default:
611 abort ();
613 return gen_int_mode (i, mode);
616 /* This was formerly used only for non-IEEE float.
617 eggert@twinsun.com says it is safe for IEEE also. */
618 else
620 enum rtx_code reversed;
621 /* There are some simplifications we can do even if the operands
622 aren't constant. */
623 switch (code)
625 case NOT:
626 /* (not (not X)) == X. */
627 if (GET_CODE (op) == NOT)
628 return XEXP (op, 0);
630 /* (not (eq X Y)) == (ne X Y), etc. */
631 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
632 && ((reversed = reversed_comparison_code (op, NULL_RTX))
633 != UNKNOWN))
634 return gen_rtx_fmt_ee (reversed,
635 op_mode, XEXP (op, 0), XEXP (op, 1));
636 break;
638 case NEG:
639 /* (neg (neg X)) == X. */
640 if (GET_CODE (op) == NEG)
641 return XEXP (op, 0);
642 break;
644 case SIGN_EXTEND:
645 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
646 becomes just the MINUS if its mode is MODE. This allows
647 folding switch statements on machines using casesi (such as
648 the VAX). */
649 if (GET_CODE (op) == TRUNCATE
650 && GET_MODE (XEXP (op, 0)) == mode
651 && GET_CODE (XEXP (op, 0)) == MINUS
652 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
653 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
654 return XEXP (op, 0);
656 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
657 if (! POINTERS_EXTEND_UNSIGNED
658 && mode == Pmode && GET_MODE (op) == ptr_mode
659 && (CONSTANT_P (op)
660 || (GET_CODE (op) == SUBREG
661 && GET_CODE (SUBREG_REG (op)) == REG
662 && REG_POINTER (SUBREG_REG (op))
663 && GET_MODE (SUBREG_REG (op)) == Pmode)))
664 return convert_memory_address (Pmode, op);
665 #endif
666 break;
668 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
669 case ZERO_EXTEND:
670 if (POINTERS_EXTEND_UNSIGNED > 0
671 && mode == Pmode && GET_MODE (op) == ptr_mode
672 && (CONSTANT_P (op)
673 || (GET_CODE (op) == SUBREG
674 && GET_CODE (SUBREG_REG (op)) == REG
675 && REG_POINTER (SUBREG_REG (op))
676 && GET_MODE (SUBREG_REG (op)) == Pmode)))
677 return convert_memory_address (Pmode, op);
678 break;
679 #endif
681 default:
682 break;
685 return 0;
689 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
690 and OP1. Return 0 if no simplification is possible.
692 Don't use this for relational operations such as EQ or LT.
693 Use simplify_relational_operation instead. */
695 simplify_binary_operation (code, mode, op0, op1)
696 enum rtx_code code;
697 enum machine_mode mode;
698 rtx op0, op1;
700 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
701 HOST_WIDE_INT val;
702 unsigned int width = GET_MODE_BITSIZE (mode);
703 rtx tem;
704 rtx trueop0 = avoid_constant_pool_reference (op0);
705 rtx trueop1 = avoid_constant_pool_reference (op1);
707 /* Relational operations don't work here. We must know the mode
708 of the operands in order to do the comparison correctly.
709 Assuming a full word can give incorrect results.
710 Consider comparing 128 with -128 in QImode. */
712 if (GET_RTX_CLASS (code) == '<')
713 abort ();
715 /* Make sure the constant is second. */
716 if (GET_RTX_CLASS (code) == 'c'
717 && swap_commutative_operands_p (trueop0, trueop1))
719 tem = op0, op0 = op1, op1 = tem;
720 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
723 if (GET_MODE_CLASS (mode) == MODE_FLOAT
724 && GET_CODE (trueop0) == CONST_DOUBLE
725 && GET_CODE (trueop1) == CONST_DOUBLE
726 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
728 REAL_VALUE_TYPE f0, f1, value;
730 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
731 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
732 f0 = real_value_truncate (mode, f0);
733 f1 = real_value_truncate (mode, f1);
735 if (code == DIV
736 && !MODE_HAS_INFINITIES (mode)
737 && REAL_VALUES_EQUAL (f1, dconst0))
738 return 0;
740 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
742 value = real_value_truncate (mode, value);
743 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
746 /* We can fold some multi-word operations. */
747 if (GET_MODE_CLASS (mode) == MODE_INT
748 && width == HOST_BITS_PER_WIDE_INT * 2
749 && (GET_CODE (trueop0) == CONST_DOUBLE
750 || GET_CODE (trueop0) == CONST_INT)
751 && (GET_CODE (trueop1) == CONST_DOUBLE
752 || GET_CODE (trueop1) == CONST_INT))
754 unsigned HOST_WIDE_INT l1, l2, lv;
755 HOST_WIDE_INT h1, h2, hv;
757 if (GET_CODE (trueop0) == CONST_DOUBLE)
758 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
759 else
760 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
762 if (GET_CODE (trueop1) == CONST_DOUBLE)
763 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
764 else
765 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
767 switch (code)
769 case MINUS:
770 /* A - B == A + (-B). */
771 neg_double (l2, h2, &lv, &hv);
772 l2 = lv, h2 = hv;
774 /* .. fall through ... */
776 case PLUS:
777 add_double (l1, h1, l2, h2, &lv, &hv);
778 break;
780 case MULT:
781 mul_double (l1, h1, l2, h2, &lv, &hv);
782 break;
784 case DIV: case MOD: case UDIV: case UMOD:
785 /* We'd need to include tree.h to do this and it doesn't seem worth
786 it. */
787 return 0;
789 case AND:
790 lv = l1 & l2, hv = h1 & h2;
791 break;
793 case IOR:
794 lv = l1 | l2, hv = h1 | h2;
795 break;
797 case XOR:
798 lv = l1 ^ l2, hv = h1 ^ h2;
799 break;
801 case SMIN:
802 if (h1 < h2
803 || (h1 == h2
804 && ((unsigned HOST_WIDE_INT) l1
805 < (unsigned HOST_WIDE_INT) l2)))
806 lv = l1, hv = h1;
807 else
808 lv = l2, hv = h2;
809 break;
811 case SMAX:
812 if (h1 > h2
813 || (h1 == h2
814 && ((unsigned HOST_WIDE_INT) l1
815 > (unsigned HOST_WIDE_INT) l2)))
816 lv = l1, hv = h1;
817 else
818 lv = l2, hv = h2;
819 break;
821 case UMIN:
822 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
823 || (h1 == h2
824 && ((unsigned HOST_WIDE_INT) l1
825 < (unsigned HOST_WIDE_INT) l2)))
826 lv = l1, hv = h1;
827 else
828 lv = l2, hv = h2;
829 break;
831 case UMAX:
832 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
833 || (h1 == h2
834 && ((unsigned HOST_WIDE_INT) l1
835 > (unsigned HOST_WIDE_INT) l2)))
836 lv = l1, hv = h1;
837 else
838 lv = l2, hv = h2;
839 break;
841 case LSHIFTRT: case ASHIFTRT:
842 case ASHIFT:
843 case ROTATE: case ROTATERT:
844 #ifdef SHIFT_COUNT_TRUNCATED
845 if (SHIFT_COUNT_TRUNCATED)
846 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
847 #endif
849 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
850 return 0;
852 if (code == LSHIFTRT || code == ASHIFTRT)
853 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
854 code == ASHIFTRT);
855 else if (code == ASHIFT)
856 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
857 else if (code == ROTATE)
858 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
859 else /* code == ROTATERT */
860 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
861 break;
863 default:
864 return 0;
867 return immed_double_const (lv, hv, mode);
870 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
871 || width > HOST_BITS_PER_WIDE_INT || width == 0)
873 /* Even if we can't compute a constant result,
874 there are some cases worth simplifying. */
876 switch (code)
878 case PLUS:
879 /* Maybe simplify x + 0 to x. The two expressions are equivalent
880 when x is NaN, infinite, or finite and non-zero. They aren't
881 when x is -0 and the rounding mode is not towards -infinity,
882 since (-0) + 0 is then 0. */
883 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
884 return op0;
886 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
887 transformations are safe even for IEEE. */
888 if (GET_CODE (op0) == NEG)
889 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
890 else if (GET_CODE (op1) == NEG)
891 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
893 /* (~a) + 1 -> -a */
894 if (INTEGRAL_MODE_P (mode)
895 && GET_CODE (op0) == NOT
896 && trueop1 == const1_rtx)
897 return gen_rtx_NEG (mode, XEXP (op0, 0));
899 /* Handle both-operands-constant cases. We can only add
900 CONST_INTs to constants since the sum of relocatable symbols
901 can't be handled by most assemblers. Don't add CONST_INT
902 to CONST_INT since overflow won't be computed properly if wider
903 than HOST_BITS_PER_WIDE_INT. */
905 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
906 && GET_CODE (op1) == CONST_INT)
907 return plus_constant (op0, INTVAL (op1));
908 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
909 && GET_CODE (op0) == CONST_INT)
910 return plus_constant (op1, INTVAL (op0));
912 /* See if this is something like X * C - X or vice versa or
913 if the multiplication is written as a shift. If so, we can
914 distribute and make a new multiply, shift, or maybe just
915 have X (if C is 2 in the example above). But don't make
916 real multiply if we didn't have one before. */
918 if (! FLOAT_MODE_P (mode))
920 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
921 rtx lhs = op0, rhs = op1;
922 int had_mult = 0;
924 if (GET_CODE (lhs) == NEG)
925 coeff0 = -1, lhs = XEXP (lhs, 0);
926 else if (GET_CODE (lhs) == MULT
927 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
929 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
930 had_mult = 1;
932 else if (GET_CODE (lhs) == ASHIFT
933 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
934 && INTVAL (XEXP (lhs, 1)) >= 0
935 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
937 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
938 lhs = XEXP (lhs, 0);
941 if (GET_CODE (rhs) == NEG)
942 coeff1 = -1, rhs = XEXP (rhs, 0);
943 else if (GET_CODE (rhs) == MULT
944 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
946 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
947 had_mult = 1;
949 else if (GET_CODE (rhs) == ASHIFT
950 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
951 && INTVAL (XEXP (rhs, 1)) >= 0
952 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
954 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
955 rhs = XEXP (rhs, 0);
958 if (rtx_equal_p (lhs, rhs))
960 tem = simplify_gen_binary (MULT, mode, lhs,
961 GEN_INT (coeff0 + coeff1));
962 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
966 /* If one of the operands is a PLUS or a MINUS, see if we can
967 simplify this by the associative law.
968 Don't use the associative law for floating point.
969 The inaccuracy makes it nonassociative,
970 and subtle programs can break if operations are associated. */
972 if (INTEGRAL_MODE_P (mode)
973 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
974 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
975 || (GET_CODE (op0) == CONST
976 && GET_CODE (XEXP (op0, 0)) == PLUS)
977 || (GET_CODE (op1) == CONST
978 && GET_CODE (XEXP (op1, 0)) == PLUS))
979 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
980 return tem;
981 break;
983 case COMPARE:
984 #ifdef HAVE_cc0
985 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
986 using cc0, in which case we want to leave it as a COMPARE
987 so we can distinguish it from a register-register-copy.
989 In IEEE floating point, x-0 is not the same as x. */
991 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
992 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
993 && trueop1 == CONST0_RTX (mode))
994 return op0;
995 #endif
997 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
998 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
999 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1000 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1002 rtx xop00 = XEXP (op0, 0);
1003 rtx xop10 = XEXP (op1, 0);
1005 #ifdef HAVE_cc0
1006 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1007 #else
1008 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1009 && GET_MODE (xop00) == GET_MODE (xop10)
1010 && REGNO (xop00) == REGNO (xop10)
1011 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1012 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1013 #endif
1014 return xop00;
1016 break;
1018 case MINUS:
1019 /* We can't assume x-x is 0 even with non-IEEE floating point,
1020 but since it is zero except in very strange circumstances, we
1021 will treat it as zero with -funsafe-math-optimizations. */
1022 if (rtx_equal_p (trueop0, trueop1)
1023 && ! side_effects_p (op0)
1024 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1025 return CONST0_RTX (mode);
1027 /* Change subtraction from zero into negation. (0 - x) is the
1028 same as -x when x is NaN, infinite, or finite and non-zero.
1029 But if the mode has signed zeros, and does not round towards
1030 -infinity, then 0 - 0 is 0, not -0. */
1031 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1032 return gen_rtx_NEG (mode, op1);
1034 /* (-1 - a) is ~a. */
1035 if (trueop0 == constm1_rtx)
1036 return gen_rtx_NOT (mode, op1);
1038 /* Subtracting 0 has no effect unless the mode has signed zeros
1039 and supports rounding towards -infinity. In such a case,
1040 0 - 0 is -0. */
1041 if (!(HONOR_SIGNED_ZEROS (mode)
1042 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1043 && trueop1 == CONST0_RTX (mode))
1044 return op0;
1046 /* See if this is something like X * C - X or vice versa or
1047 if the multiplication is written as a shift. If so, we can
1048 distribute and make a new multiply, shift, or maybe just
1049 have X (if C is 2 in the example above). But don't make
1050 real multiply if we didn't have one before. */
1052 if (! FLOAT_MODE_P (mode))
1054 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1055 rtx lhs = op0, rhs = op1;
1056 int had_mult = 0;
1058 if (GET_CODE (lhs) == NEG)
1059 coeff0 = -1, lhs = XEXP (lhs, 0);
1060 else if (GET_CODE (lhs) == MULT
1061 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1063 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1064 had_mult = 1;
1066 else if (GET_CODE (lhs) == ASHIFT
1067 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1068 && INTVAL (XEXP (lhs, 1)) >= 0
1069 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1071 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1072 lhs = XEXP (lhs, 0);
1075 if (GET_CODE (rhs) == NEG)
1076 coeff1 = - 1, rhs = XEXP (rhs, 0);
1077 else if (GET_CODE (rhs) == MULT
1078 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1080 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1081 had_mult = 1;
1083 else if (GET_CODE (rhs) == ASHIFT
1084 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1085 && INTVAL (XEXP (rhs, 1)) >= 0
1086 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1088 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1089 rhs = XEXP (rhs, 0);
1092 if (rtx_equal_p (lhs, rhs))
1094 tem = simplify_gen_binary (MULT, mode, lhs,
1095 GEN_INT (coeff0 - coeff1));
1096 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1100 /* (a - (-b)) -> (a + b). True even for IEEE. */
1101 if (GET_CODE (op1) == NEG)
1102 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1104 /* If one of the operands is a PLUS or a MINUS, see if we can
1105 simplify this by the associative law.
1106 Don't use the associative law for floating point.
1107 The inaccuracy makes it nonassociative,
1108 and subtle programs can break if operations are associated. */
1110 if (INTEGRAL_MODE_P (mode)
1111 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1112 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1113 || (GET_CODE (op0) == CONST
1114 && GET_CODE (XEXP (op0, 0)) == PLUS)
1115 || (GET_CODE (op1) == CONST
1116 && GET_CODE (XEXP (op1, 0)) == PLUS))
1117 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1118 return tem;
1120 /* Don't let a relocatable value get a negative coeff. */
1121 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1122 return simplify_gen_binary (PLUS, mode,
1123 op0,
1124 neg_const_int (mode, op1));
1126 /* (x - (x & y)) -> (x & ~y) */
1127 if (GET_CODE (op1) == AND)
1129 if (rtx_equal_p (op0, XEXP (op1, 0)))
1130 return simplify_gen_binary (AND, mode, op0,
1131 gen_rtx_NOT (mode, XEXP (op1, 1)));
1132 if (rtx_equal_p (op0, XEXP (op1, 1)))
1133 return simplify_gen_binary (AND, mode, op0,
1134 gen_rtx_NOT (mode, XEXP (op1, 0)));
1136 break;
1138 case MULT:
1139 if (trueop1 == constm1_rtx)
1141 tem = simplify_unary_operation (NEG, mode, op0, mode);
1143 return tem ? tem : gen_rtx_NEG (mode, op0);
1146 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1147 x is NaN, since x * 0 is then also NaN. Nor is it valid
1148 when the mode has signed zeros, since multiplying a negative
1149 number by 0 will give -0, not 0. */
1150 if (!HONOR_NANS (mode)
1151 && !HONOR_SIGNED_ZEROS (mode)
1152 && trueop1 == CONST0_RTX (mode)
1153 && ! side_effects_p (op0))
1154 return op1;
1156 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1157 However, ANSI says we can drop signals,
1158 so we can do this anyway. */
1159 if (trueop1 == CONST1_RTX (mode))
1160 return op0;
1162 /* Convert multiply by constant power of two into shift unless
1163 we are still generating RTL. This test is a kludge. */
1164 if (GET_CODE (trueop1) == CONST_INT
1165 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1166 /* If the mode is larger than the host word size, and the
1167 uppermost bit is set, then this isn't a power of two due
1168 to implicit sign extension. */
1169 && (width <= HOST_BITS_PER_WIDE_INT
1170 || val != HOST_BITS_PER_WIDE_INT - 1)
1171 && ! rtx_equal_function_value_matters)
1172 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1174 /* x*2 is x+x and x*(-1) is -x */
1175 if (GET_CODE (trueop1) == CONST_DOUBLE
1176 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1177 && GET_MODE (op0) == mode)
1179 REAL_VALUE_TYPE d;
1180 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1182 if (REAL_VALUES_EQUAL (d, dconst2))
1183 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1185 if (REAL_VALUES_EQUAL (d, dconstm1))
1186 return gen_rtx_NEG (mode, op0);
1188 break;
1190 case IOR:
1191 if (trueop1 == const0_rtx)
1192 return op0;
1193 if (GET_CODE (trueop1) == CONST_INT
1194 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1195 == GET_MODE_MASK (mode)))
1196 return op1;
1197 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1198 return op0;
1199 /* A | (~A) -> -1 */
1200 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1201 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1202 && ! side_effects_p (op0)
1203 && GET_MODE_CLASS (mode) != MODE_CC)
1204 return constm1_rtx;
1205 break;
1207 case XOR:
1208 if (trueop1 == const0_rtx)
1209 return op0;
1210 if (GET_CODE (trueop1) == CONST_INT
1211 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1212 == GET_MODE_MASK (mode)))
1213 return gen_rtx_NOT (mode, op0);
1214 if (trueop0 == trueop1 && ! side_effects_p (op0)
1215 && GET_MODE_CLASS (mode) != MODE_CC)
1216 return const0_rtx;
1217 break;
1219 case AND:
1220 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1221 return const0_rtx;
1222 if (GET_CODE (trueop1) == CONST_INT
1223 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1224 == GET_MODE_MASK (mode)))
1225 return op0;
1226 if (trueop0 == trueop1 && ! side_effects_p (op0)
1227 && GET_MODE_CLASS (mode) != MODE_CC)
1228 return op0;
1229 /* A & (~A) -> 0 */
1230 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1231 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1232 && ! side_effects_p (op0)
1233 && GET_MODE_CLASS (mode) != MODE_CC)
1234 return const0_rtx;
1235 break;
1237 case UDIV:
1238 /* Convert divide by power of two into shift (divide by 1 handled
1239 below). */
1240 if (GET_CODE (trueop1) == CONST_INT
1241 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1242 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1244 /* ... fall through ... */
1246 case DIV:
1247 if (trueop1 == CONST1_RTX (mode))
1249 /* On some platforms DIV uses narrower mode than its
1250 operands. */
1251 rtx x = gen_lowpart_common (mode, op0);
1252 if (x)
1253 return x;
1254 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1255 return gen_lowpart_SUBREG (mode, op0);
1256 else
1257 return op0;
1260 /* Maybe change 0 / x to 0. This transformation isn't safe for
1261 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1262 Nor is it safe for modes with signed zeros, since dividing
1263 0 by a negative number gives -0, not 0. */
1264 if (!HONOR_NANS (mode)
1265 && !HONOR_SIGNED_ZEROS (mode)
1266 && trueop0 == CONST0_RTX (mode)
1267 && ! side_effects_p (op1))
1268 return op0;
1270 /* Change division by a constant into multiplication. Only do
1271 this with -funsafe-math-optimizations. */
1272 else if (GET_CODE (trueop1) == CONST_DOUBLE
1273 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1274 && trueop1 != CONST0_RTX (mode)
1275 && flag_unsafe_math_optimizations)
1277 REAL_VALUE_TYPE d;
1278 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1280 if (! REAL_VALUES_EQUAL (d, dconst0))
1282 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1283 return gen_rtx_MULT (mode, op0,
1284 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1287 break;
1289 case UMOD:
1290 /* Handle modulus by power of two (mod with 1 handled below). */
1291 if (GET_CODE (trueop1) == CONST_INT
1292 && exact_log2 (INTVAL (trueop1)) > 0)
1293 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1295 /* ... fall through ... */
1297 case MOD:
1298 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1299 && ! side_effects_p (op0) && ! side_effects_p (op1))
1300 return const0_rtx;
1301 break;
1303 case ROTATERT:
1304 case ROTATE:
1305 /* Rotating ~0 always results in ~0. */
1306 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1307 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1308 && ! side_effects_p (op1))
1309 return op0;
1311 /* ... fall through ... */
1313 case ASHIFT:
1314 case ASHIFTRT:
1315 case LSHIFTRT:
1316 if (trueop1 == const0_rtx)
1317 return op0;
1318 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1319 return op0;
1320 break;
1322 case SMIN:
1323 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1324 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1325 && ! side_effects_p (op0))
1326 return op1;
1327 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1328 return op0;
1329 break;
1331 case SMAX:
1332 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1333 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1334 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1335 && ! side_effects_p (op0))
1336 return op1;
1337 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1338 return op0;
1339 break;
1341 case UMIN:
1342 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1343 return op1;
1344 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1345 return op0;
1346 break;
1348 case UMAX:
1349 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1350 return op1;
1351 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1352 return op0;
1353 break;
1355 case SS_PLUS:
1356 case US_PLUS:
1357 case SS_MINUS:
1358 case US_MINUS:
1359 /* ??? There are simplifications that can be done. */
1360 return 0;
1362 default:
1363 abort ();
1366 return 0;
1369 /* Get the integer argument values in two forms:
1370 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1372 arg0 = INTVAL (trueop0);
1373 arg1 = INTVAL (trueop1);
1375 if (width < HOST_BITS_PER_WIDE_INT)
1377 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1378 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1380 arg0s = arg0;
1381 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1382 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1384 arg1s = arg1;
1385 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1386 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1388 else
1390 arg0s = arg0;
1391 arg1s = arg1;
1394 /* Compute the value of the arithmetic. */
1396 switch (code)
1398 case PLUS:
1399 val = arg0s + arg1s;
1400 break;
1402 case MINUS:
1403 val = arg0s - arg1s;
1404 break;
1406 case MULT:
1407 val = arg0s * arg1s;
1408 break;
1410 case DIV:
1411 if (arg1s == 0
1412 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1413 && arg1s == -1))
1414 return 0;
1415 val = arg0s / arg1s;
1416 break;
1418 case MOD:
1419 if (arg1s == 0
1420 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1421 && arg1s == -1))
1422 return 0;
1423 val = arg0s % arg1s;
1424 break;
1426 case UDIV:
1427 if (arg1 == 0
1428 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1429 && arg1s == -1))
1430 return 0;
1431 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1432 break;
1434 case UMOD:
1435 if (arg1 == 0
1436 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1437 && arg1s == -1))
1438 return 0;
1439 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1440 break;
1442 case AND:
1443 val = arg0 & arg1;
1444 break;
1446 case IOR:
1447 val = arg0 | arg1;
1448 break;
1450 case XOR:
1451 val = arg0 ^ arg1;
1452 break;
1454 case LSHIFTRT:
1455 /* If shift count is undefined, don't fold it; let the machine do
1456 what it wants. But truncate it if the machine will do that. */
1457 if (arg1 < 0)
1458 return 0;
1460 #ifdef SHIFT_COUNT_TRUNCATED
1461 if (SHIFT_COUNT_TRUNCATED)
1462 arg1 %= width;
1463 #endif
1465 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1466 break;
1468 case ASHIFT:
1469 if (arg1 < 0)
1470 return 0;
1472 #ifdef SHIFT_COUNT_TRUNCATED
1473 if (SHIFT_COUNT_TRUNCATED)
1474 arg1 %= width;
1475 #endif
1477 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1478 break;
1480 case ASHIFTRT:
1481 if (arg1 < 0)
1482 return 0;
1484 #ifdef SHIFT_COUNT_TRUNCATED
1485 if (SHIFT_COUNT_TRUNCATED)
1486 arg1 %= width;
1487 #endif
1489 val = arg0s >> arg1;
1491 /* Bootstrap compiler may not have sign extended the right shift.
1492 Manually extend the sign to insure bootstrap cc matches gcc. */
1493 if (arg0s < 0 && arg1 > 0)
1494 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1496 break;
1498 case ROTATERT:
1499 if (arg1 < 0)
1500 return 0;
1502 arg1 %= width;
1503 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1504 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1505 break;
1507 case ROTATE:
1508 if (arg1 < 0)
1509 return 0;
1511 arg1 %= width;
1512 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1513 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1514 break;
1516 case COMPARE:
1517 /* Do nothing here. */
1518 return 0;
1520 case SMIN:
1521 val = arg0s <= arg1s ? arg0s : arg1s;
1522 break;
1524 case UMIN:
1525 val = ((unsigned HOST_WIDE_INT) arg0
1526 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1527 break;
1529 case SMAX:
1530 val = arg0s > arg1s ? arg0s : arg1s;
1531 break;
1533 case UMAX:
1534 val = ((unsigned HOST_WIDE_INT) arg0
1535 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1536 break;
1538 default:
1539 abort ();
1542 val = trunc_int_for_mode (val, mode);
1544 return GEN_INT (val);
1547 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1548 PLUS or MINUS.
1550 Rather than test for specific case, we do this by a brute-force method
1551 and do all possible simplifications until no more changes occur. Then
1552 we rebuild the operation.
1554 If FORCE is true, then always generate the rtx. This is used to
1555 canonicalize stuff emitted from simplify_gen_binary. Note that this
1556 can still fail if the rtx is too complex. It won't fail just because
1557 the result is not 'simpler' than the input, however. */
1559 struct simplify_plus_minus_op_data
1561 rtx op;
1562 int neg;
1565 static int
1566 simplify_plus_minus_op_data_cmp (p1, p2)
1567 const void *p1;
1568 const void *p2;
1570 const struct simplify_plus_minus_op_data *d1 = p1;
1571 const struct simplify_plus_minus_op_data *d2 = p2;
1573 return (commutative_operand_precedence (d2->op)
1574 - commutative_operand_precedence (d1->op));
1577 static rtx
1578 simplify_plus_minus (code, mode, op0, op1, force)
1579 enum rtx_code code;
1580 enum machine_mode mode;
1581 rtx op0, op1;
1582 int force;
1584 struct simplify_plus_minus_op_data ops[8];
1585 rtx result, tem;
1586 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1587 int first, negate, changed;
1588 int i, j;
1590 memset ((char *) ops, 0, sizeof ops);
1592 /* Set up the two operands and then expand them until nothing has been
1593 changed. If we run out of room in our array, give up; this should
1594 almost never happen. */
1596 ops[0].op = op0;
1597 ops[0].neg = 0;
1598 ops[1].op = op1;
1599 ops[1].neg = (code == MINUS);
1603 changed = 0;
1605 for (i = 0; i < n_ops; i++)
1607 rtx this_op = ops[i].op;
1608 int this_neg = ops[i].neg;
1609 enum rtx_code this_code = GET_CODE (this_op);
1611 switch (this_code)
1613 case PLUS:
1614 case MINUS:
1615 if (n_ops == 7)
1616 return NULL_RTX;
1618 ops[n_ops].op = XEXP (this_op, 1);
1619 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1620 n_ops++;
1622 ops[i].op = XEXP (this_op, 0);
1623 input_ops++;
1624 changed = 1;
1625 break;
1627 case NEG:
1628 ops[i].op = XEXP (this_op, 0);
1629 ops[i].neg = ! this_neg;
1630 changed = 1;
1631 break;
1633 case CONST:
1634 if (n_ops < 7
1635 && GET_CODE (XEXP (this_op, 0)) == PLUS
1636 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1637 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1639 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1640 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1641 ops[n_ops].neg = this_neg;
1642 n_ops++;
1643 input_consts++;
1644 changed = 1;
1646 break;
1648 case NOT:
1649 /* ~a -> (-a - 1) */
1650 if (n_ops != 7)
1652 ops[n_ops].op = constm1_rtx;
1653 ops[n_ops++].neg = this_neg;
1654 ops[i].op = XEXP (this_op, 0);
1655 ops[i].neg = !this_neg;
1656 changed = 1;
1658 break;
1660 case CONST_INT:
1661 if (this_neg)
1663 ops[i].op = neg_const_int (mode, this_op);
1664 ops[i].neg = 0;
1665 changed = 1;
1667 break;
1669 default:
1670 break;
1674 while (changed);
1676 /* If we only have two operands, we can't do anything. */
1677 if (n_ops <= 2 && !force)
1678 return NULL_RTX;
1680 /* Count the number of CONSTs we didn't split above. */
1681 for (i = 0; i < n_ops; i++)
1682 if (GET_CODE (ops[i].op) == CONST)
1683 input_consts++;
1685 /* Now simplify each pair of operands until nothing changes. The first
1686 time through just simplify constants against each other. */
1688 first = 1;
1691 changed = first;
1693 for (i = 0; i < n_ops - 1; i++)
1694 for (j = i + 1; j < n_ops; j++)
1696 rtx lhs = ops[i].op, rhs = ops[j].op;
1697 int lneg = ops[i].neg, rneg = ops[j].neg;
1699 if (lhs != 0 && rhs != 0
1700 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1702 enum rtx_code ncode = PLUS;
1704 if (lneg != rneg)
1706 ncode = MINUS;
1707 if (lneg)
1708 tem = lhs, lhs = rhs, rhs = tem;
1710 else if (swap_commutative_operands_p (lhs, rhs))
1711 tem = lhs, lhs = rhs, rhs = tem;
1713 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1715 /* Reject "simplifications" that just wrap the two
1716 arguments in a CONST. Failure to do so can result
1717 in infinite recursion with simplify_binary_operation
1718 when it calls us to simplify CONST operations. */
1719 if (tem
1720 && ! (GET_CODE (tem) == CONST
1721 && GET_CODE (XEXP (tem, 0)) == ncode
1722 && XEXP (XEXP (tem, 0), 0) == lhs
1723 && XEXP (XEXP (tem, 0), 1) == rhs)
1724 /* Don't allow -x + -1 -> ~x simplifications in the
1725 first pass. This allows us the chance to combine
1726 the -1 with other constants. */
1727 && ! (first
1728 && GET_CODE (tem) == NOT
1729 && XEXP (tem, 0) == rhs))
1731 lneg &= rneg;
1732 if (GET_CODE (tem) == NEG)
1733 tem = XEXP (tem, 0), lneg = !lneg;
1734 if (GET_CODE (tem) == CONST_INT && lneg)
1735 tem = neg_const_int (mode, tem), lneg = 0;
1737 ops[i].op = tem;
1738 ops[i].neg = lneg;
1739 ops[j].op = NULL_RTX;
1740 changed = 1;
1745 first = 0;
1747 while (changed);
1749 /* Pack all the operands to the lower-numbered entries. */
1750 for (i = 0, j = 0; j < n_ops; j++)
1751 if (ops[j].op)
1752 ops[i++] = ops[j];
1753 n_ops = i;
1755 /* Sort the operations based on swap_commutative_operands_p. */
1756 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1758 /* We suppressed creation of trivial CONST expressions in the
1759 combination loop to avoid recursion. Create one manually now.
1760 The combination loop should have ensured that there is exactly
1761 one CONST_INT, and the sort will have ensured that it is last
1762 in the array and that any other constant will be next-to-last. */
1764 if (n_ops > 1
1765 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1766 && CONSTANT_P (ops[n_ops - 2].op))
1768 rtx value = ops[n_ops - 1].op;
1769 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1770 value = neg_const_int (mode, value);
1771 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1772 n_ops--;
1775 /* Count the number of CONSTs that we generated. */
1776 n_consts = 0;
1777 for (i = 0; i < n_ops; i++)
1778 if (GET_CODE (ops[i].op) == CONST)
1779 n_consts++;
1781 /* Give up if we didn't reduce the number of operands we had. Make
1782 sure we count a CONST as two operands. If we have the same
1783 number of operands, but have made more CONSTs than before, this
1784 is also an improvement, so accept it. */
1785 if (!force
1786 && (n_ops + n_consts > input_ops
1787 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1788 return NULL_RTX;
1790 /* Put a non-negated operand first. If there aren't any, make all
1791 operands positive and negate the whole thing later. */
1793 negate = 0;
1794 for (i = 0; i < n_ops && ops[i].neg; i++)
1795 continue;
1796 if (i == n_ops)
1798 for (i = 0; i < n_ops; i++)
1799 ops[i].neg = 0;
1800 negate = 1;
1802 else if (i != 0)
1804 tem = ops[0].op;
1805 ops[0] = ops[i];
1806 ops[i].op = tem;
1807 ops[i].neg = 1;
1810 /* Now make the result by performing the requested operations. */
1811 result = ops[0].op;
1812 for (i = 1; i < n_ops; i++)
1813 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1814 mode, result, ops[i].op);
1816 return negate ? gen_rtx_NEG (mode, result) : result;
1819 /* Like simplify_binary_operation except used for relational operators.
1820 MODE is the mode of the operands, not that of the result. If MODE
1821 is VOIDmode, both operands must also be VOIDmode and we compare the
1822 operands in "infinite precision".
1824 If no simplification is possible, this function returns zero. Otherwise,
1825 it returns either const_true_rtx or const0_rtx. */
1828 simplify_relational_operation (code, mode, op0, op1)
1829 enum rtx_code code;
1830 enum machine_mode mode;
1831 rtx op0, op1;
1833 int equal, op0lt, op0ltu, op1lt, op1ltu;
1834 rtx tem;
1835 rtx trueop0;
1836 rtx trueop1;
1838 if (mode == VOIDmode
1839 && (GET_MODE (op0) != VOIDmode
1840 || GET_MODE (op1) != VOIDmode))
1841 abort ();
1843 /* If op0 is a compare, extract the comparison arguments from it. */
1844 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1845 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1847 trueop0 = avoid_constant_pool_reference (op0);
1848 trueop1 = avoid_constant_pool_reference (op1);
1850 /* We can't simplify MODE_CC values since we don't know what the
1851 actual comparison is. */
1852 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1853 #ifdef HAVE_cc0
1854 || op0 == cc0_rtx
1855 #endif
1857 return 0;
1859 /* Make sure the constant is second. */
1860 if (swap_commutative_operands_p (trueop0, trueop1))
1862 tem = op0, op0 = op1, op1 = tem;
1863 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1864 code = swap_condition (code);
1867 /* For integer comparisons of A and B maybe we can simplify A - B and can
1868 then simplify a comparison of that with zero. If A and B are both either
1869 a register or a CONST_INT, this can't help; testing for these cases will
1870 prevent infinite recursion here and speed things up.
1872 If CODE is an unsigned comparison, then we can never do this optimization,
1873 because it gives an incorrect result if the subtraction wraps around zero.
1874 ANSI C defines unsigned operations such that they never overflow, and
1875 thus such cases can not be ignored. */
1877 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1878 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1879 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1880 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1881 && code != GTU && code != GEU && code != LTU && code != LEU)
1882 return simplify_relational_operation (signed_condition (code),
1883 mode, tem, const0_rtx);
1885 if (flag_unsafe_math_optimizations && code == ORDERED)
1886 return const_true_rtx;
1888 if (flag_unsafe_math_optimizations && code == UNORDERED)
1889 return const0_rtx;
1891 /* For modes without NaNs, if the two operands are equal, we know the
1892 result. */
1893 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
1894 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1896 /* If the operands are floating-point constants, see if we can fold
1897 the result. */
1898 else if (GET_CODE (trueop0) == CONST_DOUBLE
1899 && GET_CODE (trueop1) == CONST_DOUBLE
1900 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
1902 REAL_VALUE_TYPE d0, d1;
1904 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
1905 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
1907 /* Comparisons are unordered iff at least one of the values is NaN. */
1908 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
1909 switch (code)
1911 case UNEQ:
1912 case UNLT:
1913 case UNGT:
1914 case UNLE:
1915 case UNGE:
1916 case NE:
1917 case UNORDERED:
1918 return const_true_rtx;
1919 case EQ:
1920 case LT:
1921 case GT:
1922 case LE:
1923 case GE:
1924 case LTGT:
1925 case ORDERED:
1926 return const0_rtx;
1927 default:
1928 return 0;
1931 equal = REAL_VALUES_EQUAL (d0, d1);
1932 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
1933 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
1936 /* Otherwise, see if the operands are both integers. */
1937 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1938 && (GET_CODE (trueop0) == CONST_DOUBLE
1939 || GET_CODE (trueop0) == CONST_INT)
1940 && (GET_CODE (trueop1) == CONST_DOUBLE
1941 || GET_CODE (trueop1) == CONST_INT))
1943 int width = GET_MODE_BITSIZE (mode);
1944 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1945 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1947 /* Get the two words comprising each integer constant. */
1948 if (GET_CODE (trueop0) == CONST_DOUBLE)
1950 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
1951 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
1953 else
1955 l0u = l0s = INTVAL (trueop0);
1956 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1959 if (GET_CODE (trueop1) == CONST_DOUBLE)
1961 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
1962 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
1964 else
1966 l1u = l1s = INTVAL (trueop1);
1967 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1970 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1971 we have to sign or zero-extend the values. */
1972 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1974 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1975 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1977 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1978 l0s |= ((HOST_WIDE_INT) (-1) << width);
1980 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1981 l1s |= ((HOST_WIDE_INT) (-1) << width);
1983 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1984 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1986 equal = (h0u == h1u && l0u == l1u);
1987 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1988 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1989 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1990 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1993 /* Otherwise, there are some code-specific tests we can make. */
1994 else
1996 switch (code)
1998 case EQ:
1999 /* References to the frame plus a constant or labels cannot
2000 be zero, but a SYMBOL_REF can due to #pragma weak. */
2001 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2002 || GET_CODE (trueop0) == LABEL_REF)
2003 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2004 /* On some machines, the ap reg can be 0 sometimes. */
2005 && op0 != arg_pointer_rtx
2006 #endif
2008 return const0_rtx;
2009 break;
2011 case NE:
2012 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2013 || GET_CODE (trueop0) == LABEL_REF)
2014 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2015 && op0 != arg_pointer_rtx
2016 #endif
2018 return const_true_rtx;
2019 break;
2021 case GEU:
2022 /* Unsigned values are never negative. */
2023 if (trueop1 == const0_rtx)
2024 return const_true_rtx;
2025 break;
2027 case LTU:
2028 if (trueop1 == const0_rtx)
2029 return const0_rtx;
2030 break;
2032 case LEU:
2033 /* Unsigned values are never greater than the largest
2034 unsigned value. */
2035 if (GET_CODE (trueop1) == CONST_INT
2036 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2037 && INTEGRAL_MODE_P (mode))
2038 return const_true_rtx;
2039 break;
2041 case GTU:
2042 if (GET_CODE (trueop1) == CONST_INT
2043 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2044 && INTEGRAL_MODE_P (mode))
2045 return const0_rtx;
2046 break;
2048 default:
2049 break;
2052 return 0;
2055 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2056 as appropriate. */
2057 switch (code)
2059 case EQ:
2060 case UNEQ:
2061 return equal ? const_true_rtx : const0_rtx;
2062 case NE:
2063 case LTGT:
2064 return ! equal ? const_true_rtx : const0_rtx;
2065 case LT:
2066 case UNLT:
2067 return op0lt ? const_true_rtx : const0_rtx;
2068 case GT:
2069 case UNGT:
2070 return op1lt ? const_true_rtx : const0_rtx;
2071 case LTU:
2072 return op0ltu ? const_true_rtx : const0_rtx;
2073 case GTU:
2074 return op1ltu ? const_true_rtx : const0_rtx;
2075 case LE:
2076 case UNLE:
2077 return equal || op0lt ? const_true_rtx : const0_rtx;
2078 case GE:
2079 case UNGE:
2080 return equal || op1lt ? const_true_rtx : const0_rtx;
2081 case LEU:
2082 return equal || op0ltu ? const_true_rtx : const0_rtx;
2083 case GEU:
2084 return equal || op1ltu ? const_true_rtx : const0_rtx;
2085 case ORDERED:
2086 return const_true_rtx;
2087 case UNORDERED:
2088 return const0_rtx;
2089 default:
2090 abort ();
2094 /* Simplify CODE, an operation with result mode MODE and three operands,
2095 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2096 a constant. Return 0 if no simplifications is possible. */
2099 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2100 enum rtx_code code;
2101 enum machine_mode mode, op0_mode;
2102 rtx op0, op1, op2;
2104 unsigned int width = GET_MODE_BITSIZE (mode);
2106 /* VOIDmode means "infinite" precision. */
2107 if (width == 0)
2108 width = HOST_BITS_PER_WIDE_INT;
2110 switch (code)
2112 case SIGN_EXTRACT:
2113 case ZERO_EXTRACT:
2114 if (GET_CODE (op0) == CONST_INT
2115 && GET_CODE (op1) == CONST_INT
2116 && GET_CODE (op2) == CONST_INT
2117 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2118 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2120 /* Extracting a bit-field from a constant */
2121 HOST_WIDE_INT val = INTVAL (op0);
2123 if (BITS_BIG_ENDIAN)
2124 val >>= (GET_MODE_BITSIZE (op0_mode)
2125 - INTVAL (op2) - INTVAL (op1));
2126 else
2127 val >>= INTVAL (op2);
2129 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2131 /* First zero-extend. */
2132 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2133 /* If desired, propagate sign bit. */
2134 if (code == SIGN_EXTRACT
2135 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2136 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2139 /* Clear the bits that don't belong in our mode,
2140 unless they and our sign bit are all one.
2141 So we get either a reasonable negative value or a reasonable
2142 unsigned value for this mode. */
2143 if (width < HOST_BITS_PER_WIDE_INT
2144 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2145 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2146 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2148 return GEN_INT (val);
2150 break;
2152 case IF_THEN_ELSE:
2153 if (GET_CODE (op0) == CONST_INT)
2154 return op0 != const0_rtx ? op1 : op2;
2156 /* Convert a == b ? b : a to "a". */
2157 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2158 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2159 && rtx_equal_p (XEXP (op0, 0), op1)
2160 && rtx_equal_p (XEXP (op0, 1), op2))
2161 return op1;
2162 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2163 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2164 && rtx_equal_p (XEXP (op0, 1), op1)
2165 && rtx_equal_p (XEXP (op0, 0), op2))
2166 return op2;
2167 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2169 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2170 ? GET_MODE (XEXP (op0, 1))
2171 : GET_MODE (XEXP (op0, 0)));
2172 rtx temp;
2173 if (cmp_mode == VOIDmode)
2174 cmp_mode = op0_mode;
2175 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2176 XEXP (op0, 0), XEXP (op0, 1));
2178 /* See if any simplifications were possible. */
2179 if (temp == const0_rtx)
2180 return op2;
2181 else if (temp == const1_rtx)
2182 return op1;
2183 else if (temp)
2184 op0 = temp;
2186 /* Look for happy constants in op1 and op2. */
2187 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2189 HOST_WIDE_INT t = INTVAL (op1);
2190 HOST_WIDE_INT f = INTVAL (op2);
2192 if (t == STORE_FLAG_VALUE && f == 0)
2193 code = GET_CODE (op0);
2194 else if (t == 0 && f == STORE_FLAG_VALUE)
2196 enum rtx_code tmp;
2197 tmp = reversed_comparison_code (op0, NULL_RTX);
2198 if (tmp == UNKNOWN)
2199 break;
2200 code = tmp;
2202 else
2203 break;
2205 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2208 break;
2210 default:
2211 abort ();
2214 return 0;
2217 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2218 Return 0 if no simplifications is possible. */
2220 simplify_subreg (outermode, op, innermode, byte)
2221 rtx op;
2222 unsigned int byte;
2223 enum machine_mode outermode, innermode;
2225 /* Little bit of sanity checking. */
2226 if (innermode == VOIDmode || outermode == VOIDmode
2227 || innermode == BLKmode || outermode == BLKmode)
2228 abort ();
2230 if (GET_MODE (op) != innermode
2231 && GET_MODE (op) != VOIDmode)
2232 abort ();
2234 if (byte % GET_MODE_SIZE (outermode)
2235 || byte >= GET_MODE_SIZE (innermode))
2236 abort ();
2238 if (outermode == innermode && !byte)
2239 return op;
2241 /* Attempt to simplify constant to non-SUBREG expression. */
2242 if (CONSTANT_P (op))
2244 int offset, part;
2245 unsigned HOST_WIDE_INT val = 0;
2247 /* ??? This code is partly redundant with code below, but can handle
2248 the subregs of floats and similar corner cases.
2249 Later it we should move all simplification code here and rewrite
2250 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2251 using SIMPLIFY_SUBREG. */
2252 if (subreg_lowpart_offset (outermode, innermode) == byte)
2254 rtx new = gen_lowpart_if_possible (outermode, op);
2255 if (new)
2256 return new;
2259 /* Similar comment as above apply here. */
2260 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2261 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2262 && GET_MODE_CLASS (outermode) == MODE_INT)
2264 rtx new = constant_subword (op,
2265 (byte / UNITS_PER_WORD),
2266 innermode);
2267 if (new)
2268 return new;
2271 offset = byte * BITS_PER_UNIT;
2272 switch (GET_CODE (op))
2274 case CONST_DOUBLE:
2275 if (GET_MODE (op) != VOIDmode)
2276 break;
2278 /* We can't handle this case yet. */
2279 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2280 return NULL_RTX;
2282 part = offset >= HOST_BITS_PER_WIDE_INT;
2283 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2284 && BYTES_BIG_ENDIAN)
2285 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2286 && WORDS_BIG_ENDIAN))
2287 part = !part;
2288 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2289 offset %= HOST_BITS_PER_WIDE_INT;
2291 /* We've already picked the word we want from a double, so
2292 pretend this is actually an integer. */
2293 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2295 /* FALLTHROUGH */
2296 case CONST_INT:
2297 if (GET_CODE (op) == CONST_INT)
2298 val = INTVAL (op);
2300 /* We don't handle synthetizing of non-integral constants yet. */
2301 if (GET_MODE_CLASS (outermode) != MODE_INT)
2302 return NULL_RTX;
2304 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2306 if (WORDS_BIG_ENDIAN)
2307 offset = (GET_MODE_BITSIZE (innermode)
2308 - GET_MODE_BITSIZE (outermode) - offset);
2309 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2310 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2311 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2312 - 2 * (offset % BITS_PER_WORD));
2315 if (offset >= HOST_BITS_PER_WIDE_INT)
2316 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2317 else
2319 val >>= offset;
2320 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2321 val = trunc_int_for_mode (val, outermode);
2322 return GEN_INT (val);
2324 default:
2325 break;
2329 /* Changing mode twice with SUBREG => just change it once,
2330 or not at all if changing back op starting mode. */
2331 if (GET_CODE (op) == SUBREG)
2333 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2334 int final_offset = byte + SUBREG_BYTE (op);
2335 rtx new;
2337 if (outermode == innermostmode
2338 && byte == 0 && SUBREG_BYTE (op) == 0)
2339 return SUBREG_REG (op);
2341 /* The SUBREG_BYTE represents offset, as if the value were stored
2342 in memory. Irritating exception is paradoxical subreg, where
2343 we define SUBREG_BYTE to be 0. On big endian machines, this
2344 value should be negative. For a moment, undo this exception. */
2345 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2347 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2348 if (WORDS_BIG_ENDIAN)
2349 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2350 if (BYTES_BIG_ENDIAN)
2351 final_offset += difference % UNITS_PER_WORD;
2353 if (SUBREG_BYTE (op) == 0
2354 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2356 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2357 if (WORDS_BIG_ENDIAN)
2358 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2359 if (BYTES_BIG_ENDIAN)
2360 final_offset += difference % UNITS_PER_WORD;
2363 /* See whether resulting subreg will be paradoxical. */
2364 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2366 /* In nonparadoxical subregs we can't handle negative offsets. */
2367 if (final_offset < 0)
2368 return NULL_RTX;
2369 /* Bail out in case resulting subreg would be incorrect. */
2370 if (final_offset % GET_MODE_SIZE (outermode)
2371 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2372 return NULL_RTX;
2374 else
2376 int offset = 0;
2377 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2379 /* In paradoxical subreg, see if we are still looking on lower part.
2380 If so, our SUBREG_BYTE will be 0. */
2381 if (WORDS_BIG_ENDIAN)
2382 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2383 if (BYTES_BIG_ENDIAN)
2384 offset += difference % UNITS_PER_WORD;
2385 if (offset == final_offset)
2386 final_offset = 0;
2387 else
2388 return NULL_RTX;
2391 /* Recurse for futher possible simplifications. */
2392 new = simplify_subreg (outermode, SUBREG_REG (op),
2393 GET_MODE (SUBREG_REG (op)),
2394 final_offset);
2395 if (new)
2396 return new;
2397 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2400 /* SUBREG of a hard register => just change the register number
2401 and/or mode. If the hard register is not valid in that mode,
2402 suppress this simplification. If the hard register is the stack,
2403 frame, or argument pointer, leave this as a SUBREG. */
2405 if (REG_P (op)
2406 && (! REG_FUNCTION_VALUE_P (op)
2407 || ! rtx_equal_function_value_matters)
2408 #ifdef CLASS_CANNOT_CHANGE_MODE
2409 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2410 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2411 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2412 && (TEST_HARD_REG_BIT
2413 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2414 REGNO (op))))
2415 #endif
2416 && REGNO (op) < FIRST_PSEUDO_REGISTER
2417 && ((reload_completed && !frame_pointer_needed)
2418 || (REGNO (op) != FRAME_POINTER_REGNUM
2419 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2420 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2421 #endif
2423 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2424 && REGNO (op) != ARG_POINTER_REGNUM
2425 #endif
2426 && REGNO (op) != STACK_POINTER_REGNUM)
2428 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2431 /* ??? We do allow it if the current REG is not valid for
2432 its mode. This is a kludge to work around how float/complex
2433 arguments are passed on 32-bit Sparc and should be fixed. */
2434 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2435 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2437 rtx x = gen_rtx_REG (outermode, final_regno);
2439 /* Propagate original regno. We don't have any way to specify
2440 the offset inside orignal regno, so do so only for lowpart.
2441 The information is used only by alias analysis that can not
2442 grog partial register anyway. */
2444 if (subreg_lowpart_offset (outermode, innermode) == byte)
2445 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2446 return x;
2450 /* If we have a SUBREG of a register that we are replacing and we are
2451 replacing it with a MEM, make a new MEM and try replacing the
2452 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2453 or if we would be widening it. */
2455 if (GET_CODE (op) == MEM
2456 && ! mode_dependent_address_p (XEXP (op, 0))
2457 /* Allow splitting of volatile memory references in case we don't
2458 have instruction to move the whole thing. */
2459 && (! MEM_VOLATILE_P (op)
2460 || ! have_insn_for (SET, innermode))
2461 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2462 return adjust_address_nv (op, outermode, byte);
2464 /* Handle complex values represented as CONCAT
2465 of real and imaginary part. */
2466 if (GET_CODE (op) == CONCAT)
2468 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2469 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2470 unsigned int final_offset;
2471 rtx res;
2473 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2474 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2475 if (res)
2476 return res;
2477 /* We can at least simplify it by referring directly to the relevant part. */
2478 return gen_rtx_SUBREG (outermode, part, final_offset);
2481 return NULL_RTX;
2483 /* Make a SUBREG operation or equivalent if it folds. */
2486 simplify_gen_subreg (outermode, op, innermode, byte)
2487 rtx op;
2488 unsigned int byte;
2489 enum machine_mode outermode, innermode;
2491 rtx new;
2492 /* Little bit of sanity checking. */
2493 if (innermode == VOIDmode || outermode == VOIDmode
2494 || innermode == BLKmode || outermode == BLKmode)
2495 abort ();
2497 if (GET_MODE (op) != innermode
2498 && GET_MODE (op) != VOIDmode)
2499 abort ();
2501 if (byte % GET_MODE_SIZE (outermode)
2502 || byte >= GET_MODE_SIZE (innermode))
2503 abort ();
2505 if (GET_CODE (op) == QUEUED)
2506 return NULL_RTX;
2508 new = simplify_subreg (outermode, op, innermode, byte);
2509 if (new)
2510 return new;
2512 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2513 return NULL_RTX;
2515 return gen_rtx_SUBREG (outermode, op, byte);
2517 /* Simplify X, an rtx expression.
2519 Return the simplified expression or NULL if no simplifications
2520 were possible.
2522 This is the preferred entry point into the simplification routines;
2523 however, we still allow passes to call the more specific routines.
2525 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2526 code that need to be unified.
2528 1. fold_rtx in cse.c. This code uses various CSE specific
2529 information to aid in RTL simplification.
2531 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2532 it uses combine specific information to aid in RTL
2533 simplification.
2535 3. The routines in this file.
2538 Long term we want to only have one body of simplification code; to
2539 get to that state I recommend the following steps:
2541 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2542 which are not pass dependent state into these routines.
2544 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2545 use this routine whenever possible.
2547 3. Allow for pass dependent state to be provided to these
2548 routines and add simplifications based on the pass dependent
2549 state. Remove code from cse.c & combine.c that becomes
2550 redundant/dead.
2552 It will take time, but ultimately the compiler will be easier to
2553 maintain and improve. It's totally silly that when we add a
2554 simplification that it needs to be added to 4 places (3 for RTL
2555 simplification and 1 for tree simplification. */
2558 simplify_rtx (x)
2559 rtx x;
2561 enum rtx_code code = GET_CODE (x);
2562 enum machine_mode mode = GET_MODE (x);
2564 switch (GET_RTX_CLASS (code))
2566 case '1':
2567 return simplify_unary_operation (code, mode,
2568 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2569 case 'c':
2570 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2572 rtx tem;
2574 tem = XEXP (x, 0);
2575 XEXP (x, 0) = XEXP (x, 1);
2576 XEXP (x, 1) = tem;
2577 return simplify_binary_operation (code, mode,
2578 XEXP (x, 0), XEXP (x, 1));
2581 case '2':
2582 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2584 case '3':
2585 case 'b':
2586 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2587 XEXP (x, 0), XEXP (x, 1),
2588 XEXP (x, 2));
2590 case '<':
2591 return simplify_relational_operation (code,
2592 ((GET_MODE (XEXP (x, 0))
2593 != VOIDmode)
2594 ? GET_MODE (XEXP (x, 0))
2595 : GET_MODE (XEXP (x, 1))),
2596 XEXP (x, 0), XEXP (x, 1));
2597 case 'x':
2598 /* The only case we try to handle is a SUBREG. */
2599 if (code == SUBREG)
2600 return simplify_gen_subreg (mode, SUBREG_REG (x),
2601 GET_MODE (SUBREG_REG (x)),
2602 SUBREG_BYTE (x));
2603 return NULL;
2604 default:
2605 return NULL;