* gcc.c-torture/execute/20020720-1.x: Skip test on ARM-based systems.
[official-gcc.git] / gcc / simplify-rtx.c
blob3566d106a72eb74aa012bc03c2bee05e09334b69
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? NONZERO_BASE_PLUS_P needs to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change this macro without also changing the copy in simplify-rtx.c. */
51 /* Allows reference to the stack pointer.
53 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
54 arg_pointer_rtx by itself is nonzero, because on at least one machine,
55 the i960, the arg pointer is zero when it is unused. */
57 #define NONZERO_BASE_PLUS_P(X) \
58 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
59 || (X) == virtual_stack_vars_rtx \
60 || (X) == virtual_incoming_args_rtx \
61 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
62 && (XEXP (X, 0) == frame_pointer_rtx \
63 || XEXP (X, 0) == hard_frame_pointer_rtx \
64 || ((X) == arg_pointer_rtx \
65 && fixed_regs[ARG_POINTER_REGNUM]) \
66 || XEXP (X, 0) == virtual_stack_vars_rtx \
67 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
68 || (X) == stack_pointer_rtx \
69 || (X) == virtual_stack_dynamic_rtx \
70 || (X) == virtual_outgoing_args_rtx \
71 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
72 && (XEXP (X, 0) == stack_pointer_rtx \
73 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
74 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
75 || GET_CODE (X) == ADDRESSOF)
77 /* Much code operates on (low, high) pairs; the low value is an
78 unsigned wide int, the high value a signed wide int. We
79 occasionally need to sign extend from low to high as if low were a
80 signed wide int. */
81 #define HWI_SIGN_EXTEND(low) \
82 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
84 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
85 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
86 const void *));
87 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
88 enum machine_mode, rtx,
89 rtx, int));
91 /* Negate a CONST_INT rtx, truncating (because a conversion from a
92 maximally negative number can overflow). */
93 static rtx
94 neg_const_int (mode, i)
95 enum machine_mode mode;
96 rtx i;
98 return gen_int_mode (- INTVAL (i), mode);
102 /* Make a binary operation by properly ordering the operands and
103 seeing if the expression folds. */
106 simplify_gen_binary (code, mode, op0, op1)
107 enum rtx_code code;
108 enum machine_mode mode;
109 rtx op0, op1;
111 rtx tem;
113 /* Put complex operands first and constants second if commutative. */
114 if (GET_RTX_CLASS (code) == 'c'
115 && swap_commutative_operands_p (op0, op1))
116 tem = op0, op0 = op1, op1 = tem;
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
123 /* Handle addition and subtraction specially. Otherwise, just form
124 the operation. */
126 if (code == PLUS || code == MINUS)
128 tem = simplify_plus_minus (code, mode, op0, op1, 1);
129 if (tem)
130 return tem;
133 return gen_rtx_fmt_ee (code, mode, op0, op1);
136 /* If X is a MEM referencing the constant pool, return the real value.
137 Otherwise return X. */
139 avoid_constant_pool_reference (x)
140 rtx x;
142 rtx c, addr;
143 enum machine_mode cmode;
145 if (GET_CODE (x) != MEM)
146 return x;
147 addr = XEXP (x, 0);
149 if (GET_CODE (addr) != SYMBOL_REF
150 || ! CONSTANT_POOL_ADDRESS_P (addr))
151 return x;
153 c = get_pool_constant (addr);
154 cmode = get_pool_mode (addr);
156 /* If we're accessing the constant in a different mode than it was
157 originally stored, attempt to fix that up via subreg simplifications.
158 If that fails we have no choice but to return the original memory. */
159 if (cmode != GET_MODE (x))
161 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
162 return c ? c : x;
165 return c;
168 /* Make a unary operation by first seeing if it folds and otherwise making
169 the specified operation. */
172 simplify_gen_unary (code, mode, op, op_mode)
173 enum rtx_code code;
174 enum machine_mode mode;
175 rtx op;
176 enum machine_mode op_mode;
178 rtx tem;
180 /* If this simplifies, use it. */
181 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
182 return tem;
184 return gen_rtx_fmt_e (code, mode, op);
187 /* Likewise for ternary operations. */
190 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
191 enum rtx_code code;
192 enum machine_mode mode, op0_mode;
193 rtx op0, op1, op2;
195 rtx tem;
197 /* If this simplifies, use it. */
198 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
199 op0, op1, op2)))
200 return tem;
202 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
205 /* Likewise, for relational operations.
206 CMP_MODE specifies mode comparison is done in.
210 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
211 enum rtx_code code;
212 enum machine_mode mode;
213 enum machine_mode cmp_mode;
214 rtx op0, op1;
216 rtx tem;
218 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
219 return tem;
221 /* For the following tests, ensure const0_rtx is op1. */
222 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
223 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
225 /* If op0 is a compare, extract the comparison arguments from it. */
226 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
227 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
229 /* If op0 is a comparison, extract the comparison arguments form it. */
230 if (code == NE && op1 == const0_rtx
231 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
232 return op0;
233 else if (code == EQ && op1 == const0_rtx)
235 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
236 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
237 if (new != UNKNOWN)
239 code = new;
240 mode = cmp_mode;
241 op1 = XEXP (op0, 1);
242 op0 = XEXP (op0, 0);
246 /* Put complex operands first and constants second. */
247 if (swap_commutative_operands_p (op0, op1))
248 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
253 /* Replace all occurrences of OLD in X with NEW and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (x, old, new)
258 rtx x;
259 rtx old;
260 rtx new;
262 enum rtx_code code = GET_CODE (x);
263 enum machine_mode mode = GET_MODE (x);
265 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
266 to build a new expression substituting recursively. If we can't do
267 anything, return our input. */
269 if (x == old)
270 return new;
272 switch (GET_RTX_CLASS (code))
274 case '1':
276 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
277 rtx op = (XEXP (x, 0) == old
278 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
280 return simplify_gen_unary (code, mode, op, op_mode);
283 case '2':
284 case 'c':
285 return
286 simplify_gen_binary (code, mode,
287 simplify_replace_rtx (XEXP (x, 0), old, new),
288 simplify_replace_rtx (XEXP (x, 1), old, new));
289 case '<':
291 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
292 ? GET_MODE (XEXP (x, 0))
293 : GET_MODE (XEXP (x, 1)));
294 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
295 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
297 return
298 simplify_gen_relational (code, mode,
299 (op_mode != VOIDmode
300 ? op_mode
301 : GET_MODE (op0) != VOIDmode
302 ? GET_MODE (op0)
303 : GET_MODE (op1)),
304 op0, op1);
307 case '3':
308 case 'b':
310 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
311 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
313 return
314 simplify_gen_ternary (code, mode,
315 (op_mode != VOIDmode
316 ? op_mode
317 : GET_MODE (op0)),
318 op0,
319 simplify_replace_rtx (XEXP (x, 1), old, new),
320 simplify_replace_rtx (XEXP (x, 2), old, new));
323 case 'x':
324 /* The only case we try to handle is a SUBREG. */
325 if (code == SUBREG)
327 rtx exp;
328 exp = simplify_gen_subreg (GET_MODE (x),
329 simplify_replace_rtx (SUBREG_REG (x),
330 old, new),
331 GET_MODE (SUBREG_REG (x)),
332 SUBREG_BYTE (x));
333 if (exp)
334 x = exp;
336 return x;
338 case 'o':
339 if (code == MEM)
340 return replace_equiv_address_nv (x,
341 simplify_replace_rtx (XEXP (x, 0),
342 old, new));
344 if (REG_P (x) && REG_P (old) && REGNO (x) == REGNO (old))
345 return new;
347 return x;
349 default:
350 return x;
352 return x;
355 /* Try to simplify a unary operation CODE whose output mode is to be
356 MODE with input operand OP whose mode was originally OP_MODE.
357 Return zero if no simplification can be made. */
359 simplify_unary_operation (code, mode, op, op_mode)
360 enum rtx_code code;
361 enum machine_mode mode;
362 rtx op;
363 enum machine_mode op_mode;
365 unsigned int width = GET_MODE_BITSIZE (mode);
366 rtx trueop = avoid_constant_pool_reference (op);
368 /* The order of these tests is critical so that, for example, we don't
369 check the wrong mode (input vs. output) for a conversion operation,
370 such as FIX. At some point, this should be simplified. */
372 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
373 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
375 HOST_WIDE_INT hv, lv;
376 REAL_VALUE_TYPE d;
378 if (GET_CODE (trueop) == CONST_INT)
379 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
380 else
381 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
383 REAL_VALUE_FROM_INT (d, lv, hv, mode);
384 d = real_value_truncate (mode, d);
385 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
387 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
388 && (GET_CODE (trueop) == CONST_DOUBLE
389 || GET_CODE (trueop) == CONST_INT))
391 HOST_WIDE_INT hv, lv;
392 REAL_VALUE_TYPE d;
394 if (GET_CODE (trueop) == CONST_INT)
395 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
396 else
397 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
399 if (op_mode == VOIDmode)
401 /* We don't know how to interpret negative-looking numbers in
402 this case, so don't try to fold those. */
403 if (hv < 0)
404 return 0;
406 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
408 else
409 hv = 0, lv &= GET_MODE_MASK (op_mode);
411 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
412 d = real_value_truncate (mode, d);
413 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
416 if (GET_CODE (trueop) == CONST_INT
417 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
419 HOST_WIDE_INT arg0 = INTVAL (trueop);
420 HOST_WIDE_INT val;
422 switch (code)
424 case NOT:
425 val = ~ arg0;
426 break;
428 case NEG:
429 val = - arg0;
430 break;
432 case ABS:
433 val = (arg0 >= 0 ? arg0 : - arg0);
434 break;
436 case FFS:
437 /* Don't use ffs here. Instead, get low order bit and then its
438 number. If arg0 is zero, this will return 0, as desired. */
439 arg0 &= GET_MODE_MASK (mode);
440 val = exact_log2 (arg0 & (- arg0)) + 1;
441 break;
443 case TRUNCATE:
444 val = arg0;
445 break;
447 case ZERO_EXTEND:
448 /* When zero-extending a CONST_INT, we need to know its
449 original mode. */
450 if (op_mode == VOIDmode)
451 abort ();
452 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
454 /* If we were really extending the mode,
455 we would have to distinguish between zero-extension
456 and sign-extension. */
457 if (width != GET_MODE_BITSIZE (op_mode))
458 abort ();
459 val = arg0;
461 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
462 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
463 else
464 return 0;
465 break;
467 case SIGN_EXTEND:
468 if (op_mode == VOIDmode)
469 op_mode = mode;
470 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
472 /* If we were really extending the mode,
473 we would have to distinguish between zero-extension
474 and sign-extension. */
475 if (width != GET_MODE_BITSIZE (op_mode))
476 abort ();
477 val = arg0;
479 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
482 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
483 if (val
484 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
485 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
487 else
488 return 0;
489 break;
491 case SQRT:
492 case FLOAT_EXTEND:
493 case FLOAT_TRUNCATE:
494 case SS_TRUNCATE:
495 case US_TRUNCATE:
496 return 0;
498 default:
499 abort ();
502 val = trunc_int_for_mode (val, mode);
504 return GEN_INT (val);
507 /* We can do some operations on integer CONST_DOUBLEs. Also allow
508 for a DImode operation on a CONST_INT. */
509 else if (GET_MODE (trueop) == VOIDmode
510 && width <= HOST_BITS_PER_WIDE_INT * 2
511 && (GET_CODE (trueop) == CONST_DOUBLE
512 || GET_CODE (trueop) == CONST_INT))
514 unsigned HOST_WIDE_INT l1, lv;
515 HOST_WIDE_INT h1, hv;
517 if (GET_CODE (trueop) == CONST_DOUBLE)
518 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
519 else
520 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
522 switch (code)
524 case NOT:
525 lv = ~ l1;
526 hv = ~ h1;
527 break;
529 case NEG:
530 neg_double (l1, h1, &lv, &hv);
531 break;
533 case ABS:
534 if (h1 < 0)
535 neg_double (l1, h1, &lv, &hv);
536 else
537 lv = l1, hv = h1;
538 break;
540 case FFS:
541 hv = 0;
542 if (l1 == 0)
543 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
544 else
545 lv = exact_log2 (l1 & (-l1)) + 1;
546 break;
548 case TRUNCATE:
549 /* This is just a change-of-mode, so do nothing. */
550 lv = l1, hv = h1;
551 break;
553 case ZERO_EXTEND:
554 if (op_mode == VOIDmode)
555 abort ();
557 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
558 return 0;
560 hv = 0;
561 lv = l1 & GET_MODE_MASK (op_mode);
562 break;
564 case SIGN_EXTEND:
565 if (op_mode == VOIDmode
566 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
567 return 0;
568 else
570 lv = l1 & GET_MODE_MASK (op_mode);
571 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
572 && (lv & ((HOST_WIDE_INT) 1
573 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
574 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
576 hv = HWI_SIGN_EXTEND (lv);
578 break;
580 case SQRT:
581 return 0;
583 default:
584 return 0;
587 return immed_double_const (lv, hv, mode);
590 else if (GET_CODE (trueop) == CONST_DOUBLE
591 && GET_MODE_CLASS (mode) == MODE_FLOAT)
593 REAL_VALUE_TYPE d;
594 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
596 switch (code)
598 case SQRT:
599 /* We don't attempt to optimize this. */
600 return 0;
602 case ABS:
603 d = REAL_VALUE_ABS (d);
604 break;
605 case NEG:
606 d = REAL_VALUE_NEGATE (d);
607 break;
608 case FLOAT_TRUNCATE:
609 d = real_value_truncate (mode, d);
610 break;
611 case FLOAT_EXTEND:
612 /* All this does is change the mode. */
613 break;
614 case FIX:
615 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
616 break;
618 default:
619 abort ();
621 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
624 else if (GET_CODE (trueop) == CONST_DOUBLE
625 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
626 && GET_MODE_CLASS (mode) == MODE_INT
627 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
629 HOST_WIDE_INT i;
630 REAL_VALUE_TYPE d;
631 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
632 switch (code)
634 case FIX: i = REAL_VALUE_FIX (d); break;
635 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
636 default:
637 abort ();
639 return gen_int_mode (i, mode);
642 /* This was formerly used only for non-IEEE float.
643 eggert@twinsun.com says it is safe for IEEE also. */
644 else
646 enum rtx_code reversed;
647 /* There are some simplifications we can do even if the operands
648 aren't constant. */
649 switch (code)
651 case NOT:
652 /* (not (not X)) == X. */
653 if (GET_CODE (op) == NOT)
654 return XEXP (op, 0);
656 /* (not (eq X Y)) == (ne X Y), etc. */
657 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
658 && ((reversed = reversed_comparison_code (op, NULL_RTX))
659 != UNKNOWN))
660 return gen_rtx_fmt_ee (reversed,
661 op_mode, XEXP (op, 0), XEXP (op, 1));
662 break;
664 case NEG:
665 /* (neg (neg X)) == X. */
666 if (GET_CODE (op) == NEG)
667 return XEXP (op, 0);
668 break;
670 case SIGN_EXTEND:
671 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
672 becomes just the MINUS if its mode is MODE. This allows
673 folding switch statements on machines using casesi (such as
674 the VAX). */
675 if (GET_CODE (op) == TRUNCATE
676 && GET_MODE (XEXP (op, 0)) == mode
677 && GET_CODE (XEXP (op, 0)) == MINUS
678 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
679 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
680 return XEXP (op, 0);
682 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
683 if (! POINTERS_EXTEND_UNSIGNED
684 && mode == Pmode && GET_MODE (op) == ptr_mode
685 && (CONSTANT_P (op)
686 || (GET_CODE (op) == SUBREG
687 && GET_CODE (SUBREG_REG (op)) == REG
688 && REG_POINTER (SUBREG_REG (op))
689 && GET_MODE (SUBREG_REG (op)) == Pmode)))
690 return convert_memory_address (Pmode, op);
691 #endif
692 break;
694 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
695 case ZERO_EXTEND:
696 if (POINTERS_EXTEND_UNSIGNED > 0
697 && mode == Pmode && GET_MODE (op) == ptr_mode
698 && (CONSTANT_P (op)
699 || (GET_CODE (op) == SUBREG
700 && GET_CODE (SUBREG_REG (op)) == REG
701 && REG_POINTER (SUBREG_REG (op))
702 && GET_MODE (SUBREG_REG (op)) == Pmode)))
703 return convert_memory_address (Pmode, op);
704 break;
705 #endif
707 default:
708 break;
711 return 0;
715 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
716 and OP1. Return 0 if no simplification is possible.
718 Don't use this for relational operations such as EQ or LT.
719 Use simplify_relational_operation instead. */
721 simplify_binary_operation (code, mode, op0, op1)
722 enum rtx_code code;
723 enum machine_mode mode;
724 rtx op0, op1;
726 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
727 HOST_WIDE_INT val;
728 unsigned int width = GET_MODE_BITSIZE (mode);
729 rtx tem;
730 rtx trueop0 = avoid_constant_pool_reference (op0);
731 rtx trueop1 = avoid_constant_pool_reference (op1);
733 /* Relational operations don't work here. We must know the mode
734 of the operands in order to do the comparison correctly.
735 Assuming a full word can give incorrect results.
736 Consider comparing 128 with -128 in QImode. */
738 if (GET_RTX_CLASS (code) == '<')
739 abort ();
741 /* Make sure the constant is second. */
742 if (GET_RTX_CLASS (code) == 'c'
743 && swap_commutative_operands_p (trueop0, trueop1))
745 tem = op0, op0 = op1, op1 = tem;
746 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
749 if (GET_MODE_CLASS (mode) == MODE_FLOAT
750 && GET_CODE (trueop0) == CONST_DOUBLE
751 && GET_CODE (trueop1) == CONST_DOUBLE
752 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
754 REAL_VALUE_TYPE f0, f1, value;
756 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
757 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
758 f0 = real_value_truncate (mode, f0);
759 f1 = real_value_truncate (mode, f1);
761 if (code == DIV
762 && !MODE_HAS_INFINITIES (mode)
763 && REAL_VALUES_EQUAL (f1, dconst0))
764 return 0;
766 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
768 value = real_value_truncate (mode, value);
769 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
772 /* We can fold some multi-word operations. */
773 if (GET_MODE_CLASS (mode) == MODE_INT
774 && width == HOST_BITS_PER_WIDE_INT * 2
775 && (GET_CODE (trueop0) == CONST_DOUBLE
776 || GET_CODE (trueop0) == CONST_INT)
777 && (GET_CODE (trueop1) == CONST_DOUBLE
778 || GET_CODE (trueop1) == CONST_INT))
780 unsigned HOST_WIDE_INT l1, l2, lv;
781 HOST_WIDE_INT h1, h2, hv;
783 if (GET_CODE (trueop0) == CONST_DOUBLE)
784 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
785 else
786 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
788 if (GET_CODE (trueop1) == CONST_DOUBLE)
789 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
790 else
791 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
793 switch (code)
795 case MINUS:
796 /* A - B == A + (-B). */
797 neg_double (l2, h2, &lv, &hv);
798 l2 = lv, h2 = hv;
800 /* .. fall through ... */
802 case PLUS:
803 add_double (l1, h1, l2, h2, &lv, &hv);
804 break;
806 case MULT:
807 mul_double (l1, h1, l2, h2, &lv, &hv);
808 break;
810 case DIV: case MOD: case UDIV: case UMOD:
811 /* We'd need to include tree.h to do this and it doesn't seem worth
812 it. */
813 return 0;
815 case AND:
816 lv = l1 & l2, hv = h1 & h2;
817 break;
819 case IOR:
820 lv = l1 | l2, hv = h1 | h2;
821 break;
823 case XOR:
824 lv = l1 ^ l2, hv = h1 ^ h2;
825 break;
827 case SMIN:
828 if (h1 < h2
829 || (h1 == h2
830 && ((unsigned HOST_WIDE_INT) l1
831 < (unsigned HOST_WIDE_INT) l2)))
832 lv = l1, hv = h1;
833 else
834 lv = l2, hv = h2;
835 break;
837 case SMAX:
838 if (h1 > h2
839 || (h1 == h2
840 && ((unsigned HOST_WIDE_INT) l1
841 > (unsigned HOST_WIDE_INT) l2)))
842 lv = l1, hv = h1;
843 else
844 lv = l2, hv = h2;
845 break;
847 case UMIN:
848 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
849 || (h1 == h2
850 && ((unsigned HOST_WIDE_INT) l1
851 < (unsigned HOST_WIDE_INT) l2)))
852 lv = l1, hv = h1;
853 else
854 lv = l2, hv = h2;
855 break;
857 case UMAX:
858 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
859 || (h1 == h2
860 && ((unsigned HOST_WIDE_INT) l1
861 > (unsigned HOST_WIDE_INT) l2)))
862 lv = l1, hv = h1;
863 else
864 lv = l2, hv = h2;
865 break;
867 case LSHIFTRT: case ASHIFTRT:
868 case ASHIFT:
869 case ROTATE: case ROTATERT:
870 #ifdef SHIFT_COUNT_TRUNCATED
871 if (SHIFT_COUNT_TRUNCATED)
872 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
873 #endif
875 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
876 return 0;
878 if (code == LSHIFTRT || code == ASHIFTRT)
879 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
880 code == ASHIFTRT);
881 else if (code == ASHIFT)
882 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
883 else if (code == ROTATE)
884 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
885 else /* code == ROTATERT */
886 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
887 break;
889 default:
890 return 0;
893 return immed_double_const (lv, hv, mode);
896 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
897 || width > HOST_BITS_PER_WIDE_INT || width == 0)
899 /* Even if we can't compute a constant result,
900 there are some cases worth simplifying. */
902 switch (code)
904 case PLUS:
905 /* Maybe simplify x + 0 to x. The two expressions are equivalent
906 when x is NaN, infinite, or finite and non-zero. They aren't
907 when x is -0 and the rounding mode is not towards -infinity,
908 since (-0) + 0 is then 0. */
909 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
910 return op0;
912 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
913 transformations are safe even for IEEE. */
914 if (GET_CODE (op0) == NEG)
915 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
916 else if (GET_CODE (op1) == NEG)
917 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
919 /* (~a) + 1 -> -a */
920 if (INTEGRAL_MODE_P (mode)
921 && GET_CODE (op0) == NOT
922 && trueop1 == const1_rtx)
923 return gen_rtx_NEG (mode, XEXP (op0, 0));
925 /* Handle both-operands-constant cases. We can only add
926 CONST_INTs to constants since the sum of relocatable symbols
927 can't be handled by most assemblers. Don't add CONST_INT
928 to CONST_INT since overflow won't be computed properly if wider
929 than HOST_BITS_PER_WIDE_INT. */
931 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
932 && GET_CODE (op1) == CONST_INT)
933 return plus_constant (op0, INTVAL (op1));
934 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
935 && GET_CODE (op0) == CONST_INT)
936 return plus_constant (op1, INTVAL (op0));
938 /* See if this is something like X * C - X or vice versa or
939 if the multiplication is written as a shift. If so, we can
940 distribute and make a new multiply, shift, or maybe just
941 have X (if C is 2 in the example above). But don't make
942 real multiply if we didn't have one before. */
944 if (! FLOAT_MODE_P (mode))
946 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
947 rtx lhs = op0, rhs = op1;
948 int had_mult = 0;
950 if (GET_CODE (lhs) == NEG)
951 coeff0 = -1, lhs = XEXP (lhs, 0);
952 else if (GET_CODE (lhs) == MULT
953 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
955 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
956 had_mult = 1;
958 else if (GET_CODE (lhs) == ASHIFT
959 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
960 && INTVAL (XEXP (lhs, 1)) >= 0
961 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
963 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
964 lhs = XEXP (lhs, 0);
967 if (GET_CODE (rhs) == NEG)
968 coeff1 = -1, rhs = XEXP (rhs, 0);
969 else if (GET_CODE (rhs) == MULT
970 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
972 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
973 had_mult = 1;
975 else if (GET_CODE (rhs) == ASHIFT
976 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
977 && INTVAL (XEXP (rhs, 1)) >= 0
978 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
980 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
981 rhs = XEXP (rhs, 0);
984 if (rtx_equal_p (lhs, rhs))
986 tem = simplify_gen_binary (MULT, mode, lhs,
987 GEN_INT (coeff0 + coeff1));
988 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
992 /* If one of the operands is a PLUS or a MINUS, see if we can
993 simplify this by the associative law.
994 Don't use the associative law for floating point.
995 The inaccuracy makes it nonassociative,
996 and subtle programs can break if operations are associated. */
998 if (INTEGRAL_MODE_P (mode)
999 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1000 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1001 || (GET_CODE (op0) == CONST
1002 && GET_CODE (XEXP (op0, 0)) == PLUS)
1003 || (GET_CODE (op1) == CONST
1004 && GET_CODE (XEXP (op1, 0)) == PLUS))
1005 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1006 return tem;
1007 break;
1009 case COMPARE:
1010 #ifdef HAVE_cc0
1011 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1012 using cc0, in which case we want to leave it as a COMPARE
1013 so we can distinguish it from a register-register-copy.
1015 In IEEE floating point, x-0 is not the same as x. */
1017 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1018 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1019 && trueop1 == CONST0_RTX (mode))
1020 return op0;
1021 #endif
1023 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1024 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1025 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1026 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1028 rtx xop00 = XEXP (op0, 0);
1029 rtx xop10 = XEXP (op1, 0);
1031 #ifdef HAVE_cc0
1032 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1033 #else
1034 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1035 && GET_MODE (xop00) == GET_MODE (xop10)
1036 && REGNO (xop00) == REGNO (xop10)
1037 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1038 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1039 #endif
1040 return xop00;
1042 break;
1044 case MINUS:
1045 /* We can't assume x-x is 0 even with non-IEEE floating point,
1046 but since it is zero except in very strange circumstances, we
1047 will treat it as zero with -funsafe-math-optimizations. */
1048 if (rtx_equal_p (trueop0, trueop1)
1049 && ! side_effects_p (op0)
1050 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1051 return CONST0_RTX (mode);
1053 /* Change subtraction from zero into negation. (0 - x) is the
1054 same as -x when x is NaN, infinite, or finite and non-zero.
1055 But if the mode has signed zeros, and does not round towards
1056 -infinity, then 0 - 0 is 0, not -0. */
1057 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1058 return gen_rtx_NEG (mode, op1);
1060 /* (-1 - a) is ~a. */
1061 if (trueop0 == constm1_rtx)
1062 return gen_rtx_NOT (mode, op1);
1064 /* Subtracting 0 has no effect unless the mode has signed zeros
1065 and supports rounding towards -infinity. In such a case,
1066 0 - 0 is -0. */
1067 if (!(HONOR_SIGNED_ZEROS (mode)
1068 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1069 && trueop1 == CONST0_RTX (mode))
1070 return op0;
1072 /* See if this is something like X * C - X or vice versa or
1073 if the multiplication is written as a shift. If so, we can
1074 distribute and make a new multiply, shift, or maybe just
1075 have X (if C is 2 in the example above). But don't make
1076 real multiply if we didn't have one before. */
1078 if (! FLOAT_MODE_P (mode))
1080 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1081 rtx lhs = op0, rhs = op1;
1082 int had_mult = 0;
1084 if (GET_CODE (lhs) == NEG)
1085 coeff0 = -1, lhs = XEXP (lhs, 0);
1086 else if (GET_CODE (lhs) == MULT
1087 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1089 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1090 had_mult = 1;
1092 else if (GET_CODE (lhs) == ASHIFT
1093 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1094 && INTVAL (XEXP (lhs, 1)) >= 0
1095 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1097 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1098 lhs = XEXP (lhs, 0);
1101 if (GET_CODE (rhs) == NEG)
1102 coeff1 = - 1, rhs = XEXP (rhs, 0);
1103 else if (GET_CODE (rhs) == MULT
1104 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1106 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1107 had_mult = 1;
1109 else if (GET_CODE (rhs) == ASHIFT
1110 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1111 && INTVAL (XEXP (rhs, 1)) >= 0
1112 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1114 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1115 rhs = XEXP (rhs, 0);
1118 if (rtx_equal_p (lhs, rhs))
1120 tem = simplify_gen_binary (MULT, mode, lhs,
1121 GEN_INT (coeff0 - coeff1));
1122 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1126 /* (a - (-b)) -> (a + b). True even for IEEE. */
1127 if (GET_CODE (op1) == NEG)
1128 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1130 /* If one of the operands is a PLUS or a MINUS, see if we can
1131 simplify this by the associative law.
1132 Don't use the associative law for floating point.
1133 The inaccuracy makes it nonassociative,
1134 and subtle programs can break if operations are associated. */
1136 if (INTEGRAL_MODE_P (mode)
1137 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1138 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1139 || (GET_CODE (op0) == CONST
1140 && GET_CODE (XEXP (op0, 0)) == PLUS)
1141 || (GET_CODE (op1) == CONST
1142 && GET_CODE (XEXP (op1, 0)) == PLUS))
1143 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1144 return tem;
1146 /* Don't let a relocatable value get a negative coeff. */
1147 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1148 return simplify_gen_binary (PLUS, mode,
1149 op0,
1150 neg_const_int (mode, op1));
1152 /* (x - (x & y)) -> (x & ~y) */
1153 if (GET_CODE (op1) == AND)
1155 if (rtx_equal_p (op0, XEXP (op1, 0)))
1156 return simplify_gen_binary (AND, mode, op0,
1157 gen_rtx_NOT (mode, XEXP (op1, 1)));
1158 if (rtx_equal_p (op0, XEXP (op1, 1)))
1159 return simplify_gen_binary (AND, mode, op0,
1160 gen_rtx_NOT (mode, XEXP (op1, 0)));
1162 break;
1164 case MULT:
1165 if (trueop1 == constm1_rtx)
1167 tem = simplify_unary_operation (NEG, mode, op0, mode);
1169 return tem ? tem : gen_rtx_NEG (mode, op0);
1172 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1173 x is NaN, since x * 0 is then also NaN. Nor is it valid
1174 when the mode has signed zeros, since multiplying a negative
1175 number by 0 will give -0, not 0. */
1176 if (!HONOR_NANS (mode)
1177 && !HONOR_SIGNED_ZEROS (mode)
1178 && trueop1 == CONST0_RTX (mode)
1179 && ! side_effects_p (op0))
1180 return op1;
1182 /* In IEEE floating point, x*1 is not equivalent to x for
1183 signalling NaNs. */
1184 if (!HONOR_SNANS (mode)
1185 && trueop1 == CONST1_RTX (mode))
1186 return op0;
1188 /* Convert multiply by constant power of two into shift unless
1189 we are still generating RTL. This test is a kludge. */
1190 if (GET_CODE (trueop1) == CONST_INT
1191 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1192 /* If the mode is larger than the host word size, and the
1193 uppermost bit is set, then this isn't a power of two due
1194 to implicit sign extension. */
1195 && (width <= HOST_BITS_PER_WIDE_INT
1196 || val != HOST_BITS_PER_WIDE_INT - 1)
1197 && ! rtx_equal_function_value_matters)
1198 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1200 /* x*2 is x+x and x*(-1) is -x */
1201 if (GET_CODE (trueop1) == CONST_DOUBLE
1202 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1203 && GET_MODE (op0) == mode)
1205 REAL_VALUE_TYPE d;
1206 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1208 if (REAL_VALUES_EQUAL (d, dconst2))
1209 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1211 if (REAL_VALUES_EQUAL (d, dconstm1))
1212 return gen_rtx_NEG (mode, op0);
1214 break;
1216 case IOR:
1217 if (trueop1 == const0_rtx)
1218 return op0;
1219 if (GET_CODE (trueop1) == CONST_INT
1220 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1221 == GET_MODE_MASK (mode)))
1222 return op1;
1223 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1224 return op0;
1225 /* A | (~A) -> -1 */
1226 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1227 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1228 && ! side_effects_p (op0)
1229 && GET_MODE_CLASS (mode) != MODE_CC)
1230 return constm1_rtx;
1231 break;
1233 case XOR:
1234 if (trueop1 == const0_rtx)
1235 return op0;
1236 if (GET_CODE (trueop1) == CONST_INT
1237 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1238 == GET_MODE_MASK (mode)))
1239 return gen_rtx_NOT (mode, op0);
1240 if (trueop0 == trueop1 && ! side_effects_p (op0)
1241 && GET_MODE_CLASS (mode) != MODE_CC)
1242 return const0_rtx;
1243 break;
1245 case AND:
1246 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1247 return const0_rtx;
1248 if (GET_CODE (trueop1) == CONST_INT
1249 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1250 == GET_MODE_MASK (mode)))
1251 return op0;
1252 if (trueop0 == trueop1 && ! side_effects_p (op0)
1253 && GET_MODE_CLASS (mode) != MODE_CC)
1254 return op0;
1255 /* A & (~A) -> 0 */
1256 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1257 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1258 && ! side_effects_p (op0)
1259 && GET_MODE_CLASS (mode) != MODE_CC)
1260 return const0_rtx;
1261 break;
1263 case UDIV:
1264 /* Convert divide by power of two into shift (divide by 1 handled
1265 below). */
1266 if (GET_CODE (trueop1) == CONST_INT
1267 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1268 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1270 /* ... fall through ... */
1272 case DIV:
1273 if (trueop1 == CONST1_RTX (mode))
1275 /* On some platforms DIV uses narrower mode than its
1276 operands. */
1277 rtx x = gen_lowpart_common (mode, op0);
1278 if (x)
1279 return x;
1280 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1281 return gen_lowpart_SUBREG (mode, op0);
1282 else
1283 return op0;
1286 /* Maybe change 0 / x to 0. This transformation isn't safe for
1287 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1288 Nor is it safe for modes with signed zeros, since dividing
1289 0 by a negative number gives -0, not 0. */
1290 if (!HONOR_NANS (mode)
1291 && !HONOR_SIGNED_ZEROS (mode)
1292 && trueop0 == CONST0_RTX (mode)
1293 && ! side_effects_p (op1))
1294 return op0;
1296 /* Change division by a constant into multiplication. Only do
1297 this with -funsafe-math-optimizations. */
1298 else if (GET_CODE (trueop1) == CONST_DOUBLE
1299 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1300 && trueop1 != CONST0_RTX (mode)
1301 && flag_unsafe_math_optimizations)
1303 REAL_VALUE_TYPE d;
1304 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1306 if (! REAL_VALUES_EQUAL (d, dconst0))
1308 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1309 return gen_rtx_MULT (mode, op0,
1310 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1313 break;
1315 case UMOD:
1316 /* Handle modulus by power of two (mod with 1 handled below). */
1317 if (GET_CODE (trueop1) == CONST_INT
1318 && exact_log2 (INTVAL (trueop1)) > 0)
1319 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1321 /* ... fall through ... */
1323 case MOD:
1324 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1325 && ! side_effects_p (op0) && ! side_effects_p (op1))
1326 return const0_rtx;
1327 break;
1329 case ROTATERT:
1330 case ROTATE:
1331 /* Rotating ~0 always results in ~0. */
1332 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1333 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1334 && ! side_effects_p (op1))
1335 return op0;
1337 /* ... fall through ... */
1339 case ASHIFT:
1340 case ASHIFTRT:
1341 case LSHIFTRT:
1342 if (trueop1 == const0_rtx)
1343 return op0;
1344 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1345 return op0;
1346 break;
1348 case SMIN:
1349 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1350 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1351 && ! side_effects_p (op0))
1352 return op1;
1353 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1354 return op0;
1355 break;
1357 case SMAX:
1358 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1359 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1360 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1361 && ! side_effects_p (op0))
1362 return op1;
1363 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1364 return op0;
1365 break;
1367 case UMIN:
1368 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1369 return op1;
1370 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1371 return op0;
1372 break;
1374 case UMAX:
1375 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1376 return op1;
1377 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1378 return op0;
1379 break;
1381 case SS_PLUS:
1382 case US_PLUS:
1383 case SS_MINUS:
1384 case US_MINUS:
1385 /* ??? There are simplifications that can be done. */
1386 return 0;
1388 default:
1389 abort ();
1392 return 0;
1395 /* Get the integer argument values in two forms:
1396 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1398 arg0 = INTVAL (trueop0);
1399 arg1 = INTVAL (trueop1);
1401 if (width < HOST_BITS_PER_WIDE_INT)
1403 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1404 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1406 arg0s = arg0;
1407 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1408 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1410 arg1s = arg1;
1411 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1412 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1414 else
1416 arg0s = arg0;
1417 arg1s = arg1;
1420 /* Compute the value of the arithmetic. */
1422 switch (code)
1424 case PLUS:
1425 val = arg0s + arg1s;
1426 break;
1428 case MINUS:
1429 val = arg0s - arg1s;
1430 break;
1432 case MULT:
1433 val = arg0s * arg1s;
1434 break;
1436 case DIV:
1437 if (arg1s == 0
1438 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1439 && arg1s == -1))
1440 return 0;
1441 val = arg0s / arg1s;
1442 break;
1444 case MOD:
1445 if (arg1s == 0
1446 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1447 && arg1s == -1))
1448 return 0;
1449 val = arg0s % arg1s;
1450 break;
1452 case UDIV:
1453 if (arg1 == 0
1454 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1455 && arg1s == -1))
1456 return 0;
1457 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1458 break;
1460 case UMOD:
1461 if (arg1 == 0
1462 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1463 && arg1s == -1))
1464 return 0;
1465 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1466 break;
1468 case AND:
1469 val = arg0 & arg1;
1470 break;
1472 case IOR:
1473 val = arg0 | arg1;
1474 break;
1476 case XOR:
1477 val = arg0 ^ arg1;
1478 break;
1480 case LSHIFTRT:
1481 /* If shift count is undefined, don't fold it; let the machine do
1482 what it wants. But truncate it if the machine will do that. */
1483 if (arg1 < 0)
1484 return 0;
1486 #ifdef SHIFT_COUNT_TRUNCATED
1487 if (SHIFT_COUNT_TRUNCATED)
1488 arg1 %= width;
1489 #endif
1491 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1492 break;
1494 case ASHIFT:
1495 if (arg1 < 0)
1496 return 0;
1498 #ifdef SHIFT_COUNT_TRUNCATED
1499 if (SHIFT_COUNT_TRUNCATED)
1500 arg1 %= width;
1501 #endif
1503 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1504 break;
1506 case ASHIFTRT:
1507 if (arg1 < 0)
1508 return 0;
1510 #ifdef SHIFT_COUNT_TRUNCATED
1511 if (SHIFT_COUNT_TRUNCATED)
1512 arg1 %= width;
1513 #endif
1515 val = arg0s >> arg1;
1517 /* Bootstrap compiler may not have sign extended the right shift.
1518 Manually extend the sign to insure bootstrap cc matches gcc. */
1519 if (arg0s < 0 && arg1 > 0)
1520 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1522 break;
1524 case ROTATERT:
1525 if (arg1 < 0)
1526 return 0;
1528 arg1 %= width;
1529 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1530 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1531 break;
1533 case ROTATE:
1534 if (arg1 < 0)
1535 return 0;
1537 arg1 %= width;
1538 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1539 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1540 break;
1542 case COMPARE:
1543 /* Do nothing here. */
1544 return 0;
1546 case SMIN:
1547 val = arg0s <= arg1s ? arg0s : arg1s;
1548 break;
1550 case UMIN:
1551 val = ((unsigned HOST_WIDE_INT) arg0
1552 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1553 break;
1555 case SMAX:
1556 val = arg0s > arg1s ? arg0s : arg1s;
1557 break;
1559 case UMAX:
1560 val = ((unsigned HOST_WIDE_INT) arg0
1561 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1562 break;
1564 default:
1565 abort ();
1568 val = trunc_int_for_mode (val, mode);
1570 return GEN_INT (val);
1573 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1574 PLUS or MINUS.
1576 Rather than test for specific case, we do this by a brute-force method
1577 and do all possible simplifications until no more changes occur. Then
1578 we rebuild the operation.
1580 If FORCE is true, then always generate the rtx. This is used to
1581 canonicalize stuff emitted from simplify_gen_binary. Note that this
1582 can still fail if the rtx is too complex. It won't fail just because
1583 the result is not 'simpler' than the input, however. */
1585 struct simplify_plus_minus_op_data
1587 rtx op;
1588 int neg;
1591 static int
1592 simplify_plus_minus_op_data_cmp (p1, p2)
1593 const void *p1;
1594 const void *p2;
1596 const struct simplify_plus_minus_op_data *d1 = p1;
1597 const struct simplify_plus_minus_op_data *d2 = p2;
1599 return (commutative_operand_precedence (d2->op)
1600 - commutative_operand_precedence (d1->op));
1603 static rtx
1604 simplify_plus_minus (code, mode, op0, op1, force)
1605 enum rtx_code code;
1606 enum machine_mode mode;
1607 rtx op0, op1;
1608 int force;
1610 struct simplify_plus_minus_op_data ops[8];
1611 rtx result, tem;
1612 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1613 int first, negate, changed;
1614 int i, j;
1616 memset ((char *) ops, 0, sizeof ops);
1618 /* Set up the two operands and then expand them until nothing has been
1619 changed. If we run out of room in our array, give up; this should
1620 almost never happen. */
1622 ops[0].op = op0;
1623 ops[0].neg = 0;
1624 ops[1].op = op1;
1625 ops[1].neg = (code == MINUS);
1629 changed = 0;
1631 for (i = 0; i < n_ops; i++)
1633 rtx this_op = ops[i].op;
1634 int this_neg = ops[i].neg;
1635 enum rtx_code this_code = GET_CODE (this_op);
1637 switch (this_code)
1639 case PLUS:
1640 case MINUS:
1641 if (n_ops == 7)
1642 return NULL_RTX;
1644 ops[n_ops].op = XEXP (this_op, 1);
1645 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1646 n_ops++;
1648 ops[i].op = XEXP (this_op, 0);
1649 input_ops++;
1650 changed = 1;
1651 break;
1653 case NEG:
1654 ops[i].op = XEXP (this_op, 0);
1655 ops[i].neg = ! this_neg;
1656 changed = 1;
1657 break;
1659 case CONST:
1660 if (n_ops < 7
1661 && GET_CODE (XEXP (this_op, 0)) == PLUS
1662 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1663 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1665 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1666 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1667 ops[n_ops].neg = this_neg;
1668 n_ops++;
1669 input_consts++;
1670 changed = 1;
1672 break;
1674 case NOT:
1675 /* ~a -> (-a - 1) */
1676 if (n_ops != 7)
1678 ops[n_ops].op = constm1_rtx;
1679 ops[n_ops++].neg = this_neg;
1680 ops[i].op = XEXP (this_op, 0);
1681 ops[i].neg = !this_neg;
1682 changed = 1;
1684 break;
1686 case CONST_INT:
1687 if (this_neg)
1689 ops[i].op = neg_const_int (mode, this_op);
1690 ops[i].neg = 0;
1691 changed = 1;
1693 break;
1695 default:
1696 break;
1700 while (changed);
1702 /* If we only have two operands, we can't do anything. */
1703 if (n_ops <= 2 && !force)
1704 return NULL_RTX;
1706 /* Count the number of CONSTs we didn't split above. */
1707 for (i = 0; i < n_ops; i++)
1708 if (GET_CODE (ops[i].op) == CONST)
1709 input_consts++;
1711 /* Now simplify each pair of operands until nothing changes. The first
1712 time through just simplify constants against each other. */
1714 first = 1;
1717 changed = first;
1719 for (i = 0; i < n_ops - 1; i++)
1720 for (j = i + 1; j < n_ops; j++)
1722 rtx lhs = ops[i].op, rhs = ops[j].op;
1723 int lneg = ops[i].neg, rneg = ops[j].neg;
1725 if (lhs != 0 && rhs != 0
1726 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1728 enum rtx_code ncode = PLUS;
1730 if (lneg != rneg)
1732 ncode = MINUS;
1733 if (lneg)
1734 tem = lhs, lhs = rhs, rhs = tem;
1736 else if (swap_commutative_operands_p (lhs, rhs))
1737 tem = lhs, lhs = rhs, rhs = tem;
1739 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1741 /* Reject "simplifications" that just wrap the two
1742 arguments in a CONST. Failure to do so can result
1743 in infinite recursion with simplify_binary_operation
1744 when it calls us to simplify CONST operations. */
1745 if (tem
1746 && ! (GET_CODE (tem) == CONST
1747 && GET_CODE (XEXP (tem, 0)) == ncode
1748 && XEXP (XEXP (tem, 0), 0) == lhs
1749 && XEXP (XEXP (tem, 0), 1) == rhs)
1750 /* Don't allow -x + -1 -> ~x simplifications in the
1751 first pass. This allows us the chance to combine
1752 the -1 with other constants. */
1753 && ! (first
1754 && GET_CODE (tem) == NOT
1755 && XEXP (tem, 0) == rhs))
1757 lneg &= rneg;
1758 if (GET_CODE (tem) == NEG)
1759 tem = XEXP (tem, 0), lneg = !lneg;
1760 if (GET_CODE (tem) == CONST_INT && lneg)
1761 tem = neg_const_int (mode, tem), lneg = 0;
1763 ops[i].op = tem;
1764 ops[i].neg = lneg;
1765 ops[j].op = NULL_RTX;
1766 changed = 1;
1771 first = 0;
1773 while (changed);
1775 /* Pack all the operands to the lower-numbered entries. */
1776 for (i = 0, j = 0; j < n_ops; j++)
1777 if (ops[j].op)
1778 ops[i++] = ops[j];
1779 n_ops = i;
1781 /* Sort the operations based on swap_commutative_operands_p. */
1782 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1784 /* We suppressed creation of trivial CONST expressions in the
1785 combination loop to avoid recursion. Create one manually now.
1786 The combination loop should have ensured that there is exactly
1787 one CONST_INT, and the sort will have ensured that it is last
1788 in the array and that any other constant will be next-to-last. */
1790 if (n_ops > 1
1791 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1792 && CONSTANT_P (ops[n_ops - 2].op))
1794 rtx value = ops[n_ops - 1].op;
1795 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1796 value = neg_const_int (mode, value);
1797 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1798 n_ops--;
1801 /* Count the number of CONSTs that we generated. */
1802 n_consts = 0;
1803 for (i = 0; i < n_ops; i++)
1804 if (GET_CODE (ops[i].op) == CONST)
1805 n_consts++;
1807 /* Give up if we didn't reduce the number of operands we had. Make
1808 sure we count a CONST as two operands. If we have the same
1809 number of operands, but have made more CONSTs than before, this
1810 is also an improvement, so accept it. */
1811 if (!force
1812 && (n_ops + n_consts > input_ops
1813 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1814 return NULL_RTX;
1816 /* Put a non-negated operand first. If there aren't any, make all
1817 operands positive and negate the whole thing later. */
1819 negate = 0;
1820 for (i = 0; i < n_ops && ops[i].neg; i++)
1821 continue;
1822 if (i == n_ops)
1824 for (i = 0; i < n_ops; i++)
1825 ops[i].neg = 0;
1826 negate = 1;
1828 else if (i != 0)
1830 tem = ops[0].op;
1831 ops[0] = ops[i];
1832 ops[i].op = tem;
1833 ops[i].neg = 1;
1836 /* Now make the result by performing the requested operations. */
1837 result = ops[0].op;
1838 for (i = 1; i < n_ops; i++)
1839 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1840 mode, result, ops[i].op);
1842 return negate ? gen_rtx_NEG (mode, result) : result;
1845 /* Like simplify_binary_operation except used for relational operators.
1846 MODE is the mode of the operands, not that of the result. If MODE
1847 is VOIDmode, both operands must also be VOIDmode and we compare the
1848 operands in "infinite precision".
1850 If no simplification is possible, this function returns zero. Otherwise,
1851 it returns either const_true_rtx or const0_rtx. */
1854 simplify_relational_operation (code, mode, op0, op1)
1855 enum rtx_code code;
1856 enum machine_mode mode;
1857 rtx op0, op1;
1859 int equal, op0lt, op0ltu, op1lt, op1ltu;
1860 rtx tem;
1861 rtx trueop0;
1862 rtx trueop1;
1864 if (mode == VOIDmode
1865 && (GET_MODE (op0) != VOIDmode
1866 || GET_MODE (op1) != VOIDmode))
1867 abort ();
1869 /* If op0 is a compare, extract the comparison arguments from it. */
1870 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1871 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1873 trueop0 = avoid_constant_pool_reference (op0);
1874 trueop1 = avoid_constant_pool_reference (op1);
1876 /* We can't simplify MODE_CC values since we don't know what the
1877 actual comparison is. */
1878 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1879 #ifdef HAVE_cc0
1880 || op0 == cc0_rtx
1881 #endif
1883 return 0;
1885 /* Make sure the constant is second. */
1886 if (swap_commutative_operands_p (trueop0, trueop1))
1888 tem = op0, op0 = op1, op1 = tem;
1889 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1890 code = swap_condition (code);
1893 /* For integer comparisons of A and B maybe we can simplify A - B and can
1894 then simplify a comparison of that with zero. If A and B are both either
1895 a register or a CONST_INT, this can't help; testing for these cases will
1896 prevent infinite recursion here and speed things up.
1898 If CODE is an unsigned comparison, then we can never do this optimization,
1899 because it gives an incorrect result if the subtraction wraps around zero.
1900 ANSI C defines unsigned operations such that they never overflow, and
1901 thus such cases can not be ignored. */
1903 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1904 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1905 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1906 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1907 && code != GTU && code != GEU && code != LTU && code != LEU)
1908 return simplify_relational_operation (signed_condition (code),
1909 mode, tem, const0_rtx);
1911 if (flag_unsafe_math_optimizations && code == ORDERED)
1912 return const_true_rtx;
1914 if (flag_unsafe_math_optimizations && code == UNORDERED)
1915 return const0_rtx;
1917 /* For modes without NaNs, if the two operands are equal, we know the
1918 result. */
1919 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
1920 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1922 /* If the operands are floating-point constants, see if we can fold
1923 the result. */
1924 else if (GET_CODE (trueop0) == CONST_DOUBLE
1925 && GET_CODE (trueop1) == CONST_DOUBLE
1926 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
1928 REAL_VALUE_TYPE d0, d1;
1930 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
1931 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
1933 /* Comparisons are unordered iff at least one of the values is NaN. */
1934 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
1935 switch (code)
1937 case UNEQ:
1938 case UNLT:
1939 case UNGT:
1940 case UNLE:
1941 case UNGE:
1942 case NE:
1943 case UNORDERED:
1944 return const_true_rtx;
1945 case EQ:
1946 case LT:
1947 case GT:
1948 case LE:
1949 case GE:
1950 case LTGT:
1951 case ORDERED:
1952 return const0_rtx;
1953 default:
1954 return 0;
1957 equal = REAL_VALUES_EQUAL (d0, d1);
1958 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
1959 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
1962 /* Otherwise, see if the operands are both integers. */
1963 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1964 && (GET_CODE (trueop0) == CONST_DOUBLE
1965 || GET_CODE (trueop0) == CONST_INT)
1966 && (GET_CODE (trueop1) == CONST_DOUBLE
1967 || GET_CODE (trueop1) == CONST_INT))
1969 int width = GET_MODE_BITSIZE (mode);
1970 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1971 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1973 /* Get the two words comprising each integer constant. */
1974 if (GET_CODE (trueop0) == CONST_DOUBLE)
1976 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
1977 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
1979 else
1981 l0u = l0s = INTVAL (trueop0);
1982 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1985 if (GET_CODE (trueop1) == CONST_DOUBLE)
1987 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
1988 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
1990 else
1992 l1u = l1s = INTVAL (trueop1);
1993 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1996 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1997 we have to sign or zero-extend the values. */
1998 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2000 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2001 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2003 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2004 l0s |= ((HOST_WIDE_INT) (-1) << width);
2006 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2007 l1s |= ((HOST_WIDE_INT) (-1) << width);
2009 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2010 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2012 equal = (h0u == h1u && l0u == l1u);
2013 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2014 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2015 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2016 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2019 /* Otherwise, there are some code-specific tests we can make. */
2020 else
2022 switch (code)
2024 case EQ:
2025 /* References to the frame plus a constant or labels cannot
2026 be zero, but a SYMBOL_REF can due to #pragma weak. */
2027 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2028 || GET_CODE (trueop0) == LABEL_REF)
2029 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2030 /* On some machines, the ap reg can be 0 sometimes. */
2031 && op0 != arg_pointer_rtx
2032 #endif
2034 return const0_rtx;
2035 break;
2037 case NE:
2038 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2039 || GET_CODE (trueop0) == LABEL_REF)
2040 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2041 && op0 != arg_pointer_rtx
2042 #endif
2044 return const_true_rtx;
2045 break;
2047 case GEU:
2048 /* Unsigned values are never negative. */
2049 if (trueop1 == const0_rtx)
2050 return const_true_rtx;
2051 break;
2053 case LTU:
2054 if (trueop1 == const0_rtx)
2055 return const0_rtx;
2056 break;
2058 case LEU:
2059 /* Unsigned values are never greater than the largest
2060 unsigned value. */
2061 if (GET_CODE (trueop1) == CONST_INT
2062 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2063 && INTEGRAL_MODE_P (mode))
2064 return const_true_rtx;
2065 break;
2067 case GTU:
2068 if (GET_CODE (trueop1) == CONST_INT
2069 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2070 && INTEGRAL_MODE_P (mode))
2071 return const0_rtx;
2072 break;
2074 case LT:
2075 /* Optimize abs(x) < 0.0. */
2076 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2078 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2079 : trueop0;
2080 if (GET_CODE (tem) == ABS)
2081 return const0_rtx;
2083 break;
2085 case GE:
2086 /* Optimize abs(x) >= 0.0. */
2087 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2089 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2090 : trueop0;
2091 if (GET_CODE (tem) == ABS)
2092 return const1_rtx;
2094 break;
2096 default:
2097 break;
2100 return 0;
2103 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2104 as appropriate. */
2105 switch (code)
2107 case EQ:
2108 case UNEQ:
2109 return equal ? const_true_rtx : const0_rtx;
2110 case NE:
2111 case LTGT:
2112 return ! equal ? const_true_rtx : const0_rtx;
2113 case LT:
2114 case UNLT:
2115 return op0lt ? const_true_rtx : const0_rtx;
2116 case GT:
2117 case UNGT:
2118 return op1lt ? const_true_rtx : const0_rtx;
2119 case LTU:
2120 return op0ltu ? const_true_rtx : const0_rtx;
2121 case GTU:
2122 return op1ltu ? const_true_rtx : const0_rtx;
2123 case LE:
2124 case UNLE:
2125 return equal || op0lt ? const_true_rtx : const0_rtx;
2126 case GE:
2127 case UNGE:
2128 return equal || op1lt ? const_true_rtx : const0_rtx;
2129 case LEU:
2130 return equal || op0ltu ? const_true_rtx : const0_rtx;
2131 case GEU:
2132 return equal || op1ltu ? const_true_rtx : const0_rtx;
2133 case ORDERED:
2134 return const_true_rtx;
2135 case UNORDERED:
2136 return const0_rtx;
2137 default:
2138 abort ();
2142 /* Simplify CODE, an operation with result mode MODE and three operands,
2143 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2144 a constant. Return 0 if no simplifications is possible. */
2147 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2148 enum rtx_code code;
2149 enum machine_mode mode, op0_mode;
2150 rtx op0, op1, op2;
2152 unsigned int width = GET_MODE_BITSIZE (mode);
2154 /* VOIDmode means "infinite" precision. */
2155 if (width == 0)
2156 width = HOST_BITS_PER_WIDE_INT;
2158 switch (code)
2160 case SIGN_EXTRACT:
2161 case ZERO_EXTRACT:
2162 if (GET_CODE (op0) == CONST_INT
2163 && GET_CODE (op1) == CONST_INT
2164 && GET_CODE (op2) == CONST_INT
2165 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2166 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2168 /* Extracting a bit-field from a constant */
2169 HOST_WIDE_INT val = INTVAL (op0);
2171 if (BITS_BIG_ENDIAN)
2172 val >>= (GET_MODE_BITSIZE (op0_mode)
2173 - INTVAL (op2) - INTVAL (op1));
2174 else
2175 val >>= INTVAL (op2);
2177 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2179 /* First zero-extend. */
2180 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2181 /* If desired, propagate sign bit. */
2182 if (code == SIGN_EXTRACT
2183 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2184 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2187 /* Clear the bits that don't belong in our mode,
2188 unless they and our sign bit are all one.
2189 So we get either a reasonable negative value or a reasonable
2190 unsigned value for this mode. */
2191 if (width < HOST_BITS_PER_WIDE_INT
2192 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2193 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2194 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2196 return GEN_INT (val);
2198 break;
2200 case IF_THEN_ELSE:
2201 if (GET_CODE (op0) == CONST_INT)
2202 return op0 != const0_rtx ? op1 : op2;
2204 /* Convert a == b ? b : a to "a". */
2205 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2206 && !HONOR_NANS (mode)
2207 && rtx_equal_p (XEXP (op0, 0), op1)
2208 && rtx_equal_p (XEXP (op0, 1), op2))
2209 return op1;
2210 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2211 && !HONOR_NANS (mode)
2212 && rtx_equal_p (XEXP (op0, 1), op1)
2213 && rtx_equal_p (XEXP (op0, 0), op2))
2214 return op2;
2215 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2217 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2218 ? GET_MODE (XEXP (op0, 1))
2219 : GET_MODE (XEXP (op0, 0)));
2220 rtx temp;
2221 if (cmp_mode == VOIDmode)
2222 cmp_mode = op0_mode;
2223 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2224 XEXP (op0, 0), XEXP (op0, 1));
2226 /* See if any simplifications were possible. */
2227 if (temp == const0_rtx)
2228 return op2;
2229 else if (temp == const1_rtx)
2230 return op1;
2231 else if (temp)
2232 op0 = temp;
2234 /* Look for happy constants in op1 and op2. */
2235 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2237 HOST_WIDE_INT t = INTVAL (op1);
2238 HOST_WIDE_INT f = INTVAL (op2);
2240 if (t == STORE_FLAG_VALUE && f == 0)
2241 code = GET_CODE (op0);
2242 else if (t == 0 && f == STORE_FLAG_VALUE)
2244 enum rtx_code tmp;
2245 tmp = reversed_comparison_code (op0, NULL_RTX);
2246 if (tmp == UNKNOWN)
2247 break;
2248 code = tmp;
2250 else
2251 break;
2253 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2256 break;
2258 default:
2259 abort ();
2262 return 0;
2265 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2266 Return 0 if no simplifications is possible. */
2268 simplify_subreg (outermode, op, innermode, byte)
2269 rtx op;
2270 unsigned int byte;
2271 enum machine_mode outermode, innermode;
2273 /* Little bit of sanity checking. */
2274 if (innermode == VOIDmode || outermode == VOIDmode
2275 || innermode == BLKmode || outermode == BLKmode)
2276 abort ();
2278 if (GET_MODE (op) != innermode
2279 && GET_MODE (op) != VOIDmode)
2280 abort ();
2282 if (byte % GET_MODE_SIZE (outermode)
2283 || byte >= GET_MODE_SIZE (innermode))
2284 abort ();
2286 if (outermode == innermode && !byte)
2287 return op;
2289 /* Simplify subregs of vector constants. */
2290 if (GET_CODE (op) == CONST_VECTOR)
2292 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2293 const unsigned int offset = byte / elt_size;
2294 rtx elt;
2296 if (GET_MODE_INNER (innermode) == outermode)
2298 elt = CONST_VECTOR_ELT (op, offset);
2300 /* ?? We probably don't need this copy_rtx because constants
2301 can be shared. ?? */
2303 return copy_rtx (elt);
2305 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2306 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2308 return (gen_rtx_CONST_VECTOR
2309 (outermode,
2310 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2311 &CONST_VECTOR_ELT (op, offset))));
2313 else if (GET_MODE_CLASS (outermode) == MODE_INT
2314 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2316 /* This happens when the target register size is smaller then
2317 the vector mode, and we synthesize operations with vectors
2318 of elements that are smaller than the register size. */
2319 HOST_WIDE_INT sum = 0, high = 0;
2320 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2321 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2322 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2323 int shift = BITS_PER_UNIT * elt_size;
2325 for (; n_elts--; i += step)
2327 elt = CONST_VECTOR_ELT (op, i);
2328 if (GET_CODE (elt) == CONST_DOUBLE
2329 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2331 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2332 elt);
2333 if (! elt)
2334 return NULL_RTX;
2336 if (GET_CODE (elt) != CONST_INT)
2337 return NULL_RTX;
2338 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2339 sum = (sum << shift) + INTVAL (elt);
2341 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2342 return GEN_INT (trunc_int_for_mode (sum, outermode));
2343 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2344 return immed_double_const (high, sum, outermode);
2345 else
2346 return NULL_RTX;
2348 else if (GET_MODE_CLASS (outermode) == MODE_INT
2349 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2351 enum machine_mode new_mode
2352 = int_mode_for_mode (GET_MODE_INNER (innermode));
2353 int subbyte = byte % elt_size;
2355 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2356 if (! op)
2357 return NULL_RTX;
2358 return simplify_subreg (outermode, op, new_mode, subbyte);
2360 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2361 /* This shouldn't happen, but let's not do anything stupid. */
2362 return NULL_RTX;
2365 /* Attempt to simplify constant to non-SUBREG expression. */
2366 if (CONSTANT_P (op))
2368 int offset, part;
2369 unsigned HOST_WIDE_INT val = 0;
2371 if (GET_MODE_CLASS (outermode) == MODE_VECTOR_INT
2372 || GET_MODE_CLASS (outermode) == MODE_VECTOR_FLOAT)
2374 /* Construct a CONST_VECTOR from individual subregs. */
2375 enum machine_mode submode = GET_MODE_INNER (outermode);
2376 int subsize = GET_MODE_UNIT_SIZE (outermode);
2377 int i, elts = GET_MODE_NUNITS (outermode);
2378 rtvec v = rtvec_alloc (elts);
2379 rtx elt;
2381 for (i = 0; i < elts; i++, byte += subsize)
2383 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2384 /* ??? It would be nice if we could actually make such subregs
2385 on targets that allow such relocations. */
2386 elt = simplify_subreg (submode, op, innermode, byte);
2387 if (! elt)
2388 return NULL_RTX;
2389 RTVEC_ELT (v, i) = elt;
2391 return gen_rtx_CONST_VECTOR (outermode, v);
2394 /* ??? This code is partly redundant with code below, but can handle
2395 the subregs of floats and similar corner cases.
2396 Later it we should move all simplification code here and rewrite
2397 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2398 using SIMPLIFY_SUBREG. */
2399 if (subreg_lowpart_offset (outermode, innermode) == byte
2400 && GET_CODE (op) != CONST_VECTOR)
2402 rtx new = gen_lowpart_if_possible (outermode, op);
2403 if (new)
2404 return new;
2407 /* Similar comment as above apply here. */
2408 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2409 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2410 && GET_MODE_CLASS (outermode) == MODE_INT)
2412 rtx new = constant_subword (op,
2413 (byte / UNITS_PER_WORD),
2414 innermode);
2415 if (new)
2416 return new;
2419 if (GET_MODE_CLASS (outermode) != MODE_INT
2420 && GET_MODE_CLASS (outermode) != MODE_CC)
2422 enum machine_mode new_mode = int_mode_for_mode (outermode);
2424 if (new_mode != innermode || byte != 0)
2426 op = simplify_subreg (new_mode, op, innermode, byte);
2427 if (! op)
2428 return NULL_RTX;
2429 return simplify_subreg (outermode, op, new_mode, 0);
2433 offset = byte * BITS_PER_UNIT;
2434 switch (GET_CODE (op))
2436 case CONST_DOUBLE:
2437 if (GET_MODE (op) != VOIDmode)
2438 break;
2440 /* We can't handle this case yet. */
2441 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2442 return NULL_RTX;
2444 part = offset >= HOST_BITS_PER_WIDE_INT;
2445 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2446 && BYTES_BIG_ENDIAN)
2447 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2448 && WORDS_BIG_ENDIAN))
2449 part = !part;
2450 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2451 offset %= HOST_BITS_PER_WIDE_INT;
2453 /* We've already picked the word we want from a double, so
2454 pretend this is actually an integer. */
2455 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2457 /* FALLTHROUGH */
2458 case CONST_INT:
2459 if (GET_CODE (op) == CONST_INT)
2460 val = INTVAL (op);
2462 /* We don't handle synthetizing of non-integral constants yet. */
2463 if (GET_MODE_CLASS (outermode) != MODE_INT)
2464 return NULL_RTX;
2466 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2468 if (WORDS_BIG_ENDIAN)
2469 offset = (GET_MODE_BITSIZE (innermode)
2470 - GET_MODE_BITSIZE (outermode) - offset);
2471 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2472 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2473 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2474 - 2 * (offset % BITS_PER_WORD));
2477 if (offset >= HOST_BITS_PER_WIDE_INT)
2478 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2479 else
2481 val >>= offset;
2482 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2483 val = trunc_int_for_mode (val, outermode);
2484 return GEN_INT (val);
2486 default:
2487 break;
2491 /* Changing mode twice with SUBREG => just change it once,
2492 or not at all if changing back op starting mode. */
2493 if (GET_CODE (op) == SUBREG)
2495 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2496 int final_offset = byte + SUBREG_BYTE (op);
2497 rtx new;
2499 if (outermode == innermostmode
2500 && byte == 0 && SUBREG_BYTE (op) == 0)
2501 return SUBREG_REG (op);
2503 /* The SUBREG_BYTE represents offset, as if the value were stored
2504 in memory. Irritating exception is paradoxical subreg, where
2505 we define SUBREG_BYTE to be 0. On big endian machines, this
2506 value should be negative. For a moment, undo this exception. */
2507 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2509 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2510 if (WORDS_BIG_ENDIAN)
2511 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2512 if (BYTES_BIG_ENDIAN)
2513 final_offset += difference % UNITS_PER_WORD;
2515 if (SUBREG_BYTE (op) == 0
2516 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2518 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2519 if (WORDS_BIG_ENDIAN)
2520 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2521 if (BYTES_BIG_ENDIAN)
2522 final_offset += difference % UNITS_PER_WORD;
2525 /* See whether resulting subreg will be paradoxical. */
2526 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2528 /* In nonparadoxical subregs we can't handle negative offsets. */
2529 if (final_offset < 0)
2530 return NULL_RTX;
2531 /* Bail out in case resulting subreg would be incorrect. */
2532 if (final_offset % GET_MODE_SIZE (outermode)
2533 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2534 return NULL_RTX;
2536 else
2538 int offset = 0;
2539 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2541 /* In paradoxical subreg, see if we are still looking on lower part.
2542 If so, our SUBREG_BYTE will be 0. */
2543 if (WORDS_BIG_ENDIAN)
2544 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2545 if (BYTES_BIG_ENDIAN)
2546 offset += difference % UNITS_PER_WORD;
2547 if (offset == final_offset)
2548 final_offset = 0;
2549 else
2550 return NULL_RTX;
2553 /* Recurse for futher possible simplifications. */
2554 new = simplify_subreg (outermode, SUBREG_REG (op),
2555 GET_MODE (SUBREG_REG (op)),
2556 final_offset);
2557 if (new)
2558 return new;
2559 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2562 /* SUBREG of a hard register => just change the register number
2563 and/or mode. If the hard register is not valid in that mode,
2564 suppress this simplification. If the hard register is the stack,
2565 frame, or argument pointer, leave this as a SUBREG. */
2567 if (REG_P (op)
2568 && (! REG_FUNCTION_VALUE_P (op)
2569 || ! rtx_equal_function_value_matters)
2570 #ifdef CLASS_CANNOT_CHANGE_MODE
2571 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2572 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2573 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2574 && (TEST_HARD_REG_BIT
2575 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2576 REGNO (op))))
2577 #endif
2578 && REGNO (op) < FIRST_PSEUDO_REGISTER
2579 && ((reload_completed && !frame_pointer_needed)
2580 || (REGNO (op) != FRAME_POINTER_REGNUM
2581 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2582 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2583 #endif
2585 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2586 && REGNO (op) != ARG_POINTER_REGNUM
2587 #endif
2588 && REGNO (op) != STACK_POINTER_REGNUM)
2590 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2593 /* ??? We do allow it if the current REG is not valid for
2594 its mode. This is a kludge to work around how float/complex
2595 arguments are passed on 32-bit SPARC and should be fixed. */
2596 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2597 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2599 rtx x = gen_rtx_REG (outermode, final_regno);
2601 /* Propagate original regno. We don't have any way to specify
2602 the offset inside orignal regno, so do so only for lowpart.
2603 The information is used only by alias analysis that can not
2604 grog partial register anyway. */
2606 if (subreg_lowpart_offset (outermode, innermode) == byte)
2607 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2608 return x;
2612 /* If we have a SUBREG of a register that we are replacing and we are
2613 replacing it with a MEM, make a new MEM and try replacing the
2614 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2615 or if we would be widening it. */
2617 if (GET_CODE (op) == MEM
2618 && ! mode_dependent_address_p (XEXP (op, 0))
2619 /* Allow splitting of volatile memory references in case we don't
2620 have instruction to move the whole thing. */
2621 && (! MEM_VOLATILE_P (op)
2622 || ! have_insn_for (SET, innermode))
2623 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2624 return adjust_address_nv (op, outermode, byte);
2626 /* Handle complex values represented as CONCAT
2627 of real and imaginary part. */
2628 if (GET_CODE (op) == CONCAT)
2630 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2631 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2632 unsigned int final_offset;
2633 rtx res;
2635 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2636 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2637 if (res)
2638 return res;
2639 /* We can at least simplify it by referring directly to the relevant part. */
2640 return gen_rtx_SUBREG (outermode, part, final_offset);
2643 return NULL_RTX;
2645 /* Make a SUBREG operation or equivalent if it folds. */
2648 simplify_gen_subreg (outermode, op, innermode, byte)
2649 rtx op;
2650 unsigned int byte;
2651 enum machine_mode outermode, innermode;
2653 rtx new;
2654 /* Little bit of sanity checking. */
2655 if (innermode == VOIDmode || outermode == VOIDmode
2656 || innermode == BLKmode || outermode == BLKmode)
2657 abort ();
2659 if (GET_MODE (op) != innermode
2660 && GET_MODE (op) != VOIDmode)
2661 abort ();
2663 if (byte % GET_MODE_SIZE (outermode)
2664 || byte >= GET_MODE_SIZE (innermode))
2665 abort ();
2667 if (GET_CODE (op) == QUEUED)
2668 return NULL_RTX;
2670 new = simplify_subreg (outermode, op, innermode, byte);
2671 if (new)
2672 return new;
2674 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2675 return NULL_RTX;
2677 return gen_rtx_SUBREG (outermode, op, byte);
2679 /* Simplify X, an rtx expression.
2681 Return the simplified expression or NULL if no simplifications
2682 were possible.
2684 This is the preferred entry point into the simplification routines;
2685 however, we still allow passes to call the more specific routines.
2687 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2688 code that need to be unified.
2690 1. fold_rtx in cse.c. This code uses various CSE specific
2691 information to aid in RTL simplification.
2693 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2694 it uses combine specific information to aid in RTL
2695 simplification.
2697 3. The routines in this file.
2700 Long term we want to only have one body of simplification code; to
2701 get to that state I recommend the following steps:
2703 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2704 which are not pass dependent state into these routines.
2706 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2707 use this routine whenever possible.
2709 3. Allow for pass dependent state to be provided to these
2710 routines and add simplifications based on the pass dependent
2711 state. Remove code from cse.c & combine.c that becomes
2712 redundant/dead.
2714 It will take time, but ultimately the compiler will be easier to
2715 maintain and improve. It's totally silly that when we add a
2716 simplification that it needs to be added to 4 places (3 for RTL
2717 simplification and 1 for tree simplification. */
2720 simplify_rtx (x)
2721 rtx x;
2723 enum rtx_code code = GET_CODE (x);
2724 enum machine_mode mode = GET_MODE (x);
2726 switch (GET_RTX_CLASS (code))
2728 case '1':
2729 return simplify_unary_operation (code, mode,
2730 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2731 case 'c':
2732 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2734 rtx tem;
2736 tem = XEXP (x, 0);
2737 XEXP (x, 0) = XEXP (x, 1);
2738 XEXP (x, 1) = tem;
2739 return simplify_binary_operation (code, mode,
2740 XEXP (x, 0), XEXP (x, 1));
2743 case '2':
2744 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2746 case '3':
2747 case 'b':
2748 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2749 XEXP (x, 0), XEXP (x, 1),
2750 XEXP (x, 2));
2752 case '<':
2753 return simplify_relational_operation (code,
2754 ((GET_MODE (XEXP (x, 0))
2755 != VOIDmode)
2756 ? GET_MODE (XEXP (x, 0))
2757 : GET_MODE (XEXP (x, 1))),
2758 XEXP (x, 0), XEXP (x, 1));
2759 case 'x':
2760 /* The only case we try to handle is a SUBREG. */
2761 if (code == SUBREG)
2762 return simplify_gen_subreg (mode, SUBREG_REG (x),
2763 GET_MODE (SUBREG_REG (x)),
2764 SUBREG_BYTE (x));
2765 return NULL;
2766 default:
2767 return NULL;